index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/store/StoreUtils.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.store;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.Callable;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.ResultSetFuture;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.Statement;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.MoreExecutors;
import com.netflix.titus.api.jobmanager.store.JobStoreException;
import com.netflix.titus.api.json.ObjectMappers;
import com.netflix.titus.common.model.sanitizer.EntitySanitizer;
import com.netflix.titus.common.model.sanitizer.ValidationError;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Completable;
import rx.Emitter;
import rx.Observable;
/**
* TODO to share common store related code, we need to have a common base class for store exceptions
*/
final class StoreUtils {
private static final Logger logger = LoggerFactory.getLogger(StoreUtils.class);
private StoreUtils() {
}
/**
* Execute single CASS statement, wrapping it into observable.
*/
static Observable<ResultSet> execute(Session session, Statement statement) {
return Observable.create(emitter -> {
ResultSetFuture resultSetFuture = session.executeAsync(statement);
Futures.addCallback(resultSetFuture, new FutureCallback<ResultSet>() {
@Override
public void onSuccess(@Nullable ResultSet result) {
emitter.onNext(result);
emitter.onCompleted();
}
@Override
public void onFailure(@Nonnull Throwable e) {
emitter.onError(JobStoreException.cassandraDriverError(e));
}
}, MoreExecutors.directExecutor());
}, Emitter.BackpressureMode.NONE);
}
static <T> Observable<T> retrieve(Session session,
ObjectMapper mapper,
Statement fetchStatement,
Class<T> type,
EntitySanitizer entitySanitizer,
boolean failOnError) {
fetchStatement.setFetchSize(Integer.MAX_VALUE);
return execute(session, fetchStatement)
.flatMapIterable(resultSet -> {
List<Row> all = resultSet.all();
List<T> converted = new ArrayList<>();
all.forEach(row -> {
String text = row.getString(0);
try {
T value = ObjectMappers.readValue(mapper, text, type);
Set<ValidationError> violations = entitySanitizer.validate(value);
if (violations.isEmpty()) {
converted.add(value);
} else {
if (failOnError) {
throw JobStoreException.badData(value, violations);
}
logger.warn("Ignoring bad record of type {} due to validation constraint violations: record={}, violations={}", type, value, violations);
}
} catch (Exception e) {
if (failOnError) {
throw e;
}
logger.warn("Ignoring bad record of type {}: content={}, jacksonError={}", type.getName(), text, e.getMessage());
}
});
return converted;
}
);
}
static <T> Completable store(Session session,
ObjectMapper mapper,
PreparedStatement insertEntityStatement,
String id,
T entity) {
return Observable.fromCallable((Callable<Statement>) () -> {
String entityJsonString = ObjectMappers.writeValueAsString(mapper, entity);
return insertEntityStatement.bind(id, entityJsonString);
}).flatMap(
statement -> StoreUtils.execute(session, statement)
).toCompletable();
}
static Completable remove(Session session, PreparedStatement deleteStatment, String id) {
return execute(session, deleteStatment.bind(id)).toCompletable();
}
}
| 1,400 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/store/CassStoreHelper.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.store;
import java.util.function.Supplier;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.ResultSetFuture;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.Statement;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.MoreExecutors;
import com.netflix.titus.api.jobmanager.store.JobStoreException;
import rx.Emitter;
import rx.Observable;
import rx.Scheduler;
public class CassStoreHelper {
private final Session session;
private final Scheduler scheduler;
/**
* Results from queries will have pages fetched on demand, which can block code iterating on
* {@link ResultSet result sets}. For that reason, it is recommended a Scheduler suitable for (slow) blocking
* operations is used to process results.
*
* @param session a C* session where queries will be executed
* @param scheduler where results (callbacks) will be processed so C* driver threads are not blocked
*/
public CassStoreHelper(Session session, Scheduler scheduler) {
this.session = session;
this.scheduler = scheduler;
}
public Observable<ResultSet> execute(Statement statement) {
return buildResultSetObservable(() -> session.executeAsync(statement));
}
private Observable<ResultSet> buildResultSetObservable(Supplier<ResultSetFuture> resultSetFutureSupplier) {
return Observable.<ResultSet>create(emitter -> {
ResultSetFuture resultSetFuture = resultSetFutureSupplier.get();
Futures.addCallback(resultSetFuture, new FutureCallback<ResultSet>() {
@Override
public void onSuccess(@Nullable ResultSet result) {
emitter.onNext(result);
emitter.onCompleted();
}
@Override
public void onFailure(@Nonnull Throwable e) {
emitter.onError(JobStoreException.cassandraDriverError(e));
}
}, MoreExecutors.directExecutor());
},
Emitter.BackpressureMode.NONE
).observeOn(scheduler);
}
}
| 1,401 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/store/CassandraJobStore.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.store;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.Callable;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.datastax.driver.core.BatchStatement;
import com.datastax.driver.core.ConsistencyLevel;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.QueryTrace;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.Statement;
import com.datastax.driver.core.exceptions.DriverException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.JobState;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.Version;
import com.netflix.titus.api.jobmanager.model.job.disruptionbudget.DisruptionBudget;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.api.jobmanager.model.job.migration.SystemDefaultMigrationPolicy;
import com.netflix.titus.api.jobmanager.service.V3JobOperations;
import com.netflix.titus.api.jobmanager.store.JobStore;
import com.netflix.titus.api.jobmanager.store.JobStoreException;
import com.netflix.titus.api.jobmanager.store.JobStoreFitAction;
import com.netflix.titus.api.json.ObjectMappers;
import com.netflix.titus.common.framework.fit.FitFramework;
import com.netflix.titus.common.framework.fit.FitInjection;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.guice.annotation.ProxyConfiguration;
import com.netflix.titus.common.util.tuple.Either;
import com.netflix.titus.common.util.tuple.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Completable;
import rx.Emitter;
import rx.Observable;
import rx.exceptions.Exceptions;
import static com.netflix.titus.common.util.guice.ProxyType.Logging;
import static com.netflix.titus.common.util.guice.ProxyType.Spectator;
import static com.netflix.titus.ext.cassandra.store.StoreTransactionLoggers.transactionLogger;
@Singleton
@ProxyConfiguration(types = {Logging, Spectator})
public class CassandraJobStore implements JobStore {
private static final Logger logger = LoggerFactory.getLogger(CassandraJobStore.class);
private static final int INITIAL_BUCKET_COUNT = 100;
private static final int MAX_BUCKET_SIZE = 2_000;
private static final String METRIC_NAME_ROOT = "titusMaster.jobManager.cassandra";
// SELECT Queries
private static final String RETRIEVE_ACTIVE_JOB_ID_BUCKETS_STRING = "SELECT distinct bucket FROM active_job_ids";
private static final String RETRIEVE_ACTIVE_JOB_IDS_STRING = "SELECT job_id FROM active_job_ids WHERE bucket = ?;";
private static final String RETRIEVE_ACTIVE_JOB_STRING = "SELECT value FROM active_jobs WHERE job_id = ?;";
private static final String RETRIEVE_ARCHIVED_JOB_STRING = "SELECT value FROM archived_jobs WHERE job_id = ?;";
private static final String RETRIEVE_ACTIVE_TASK_IDS_FOR_JOB_STRING = "SELECT task_id FROM active_task_ids WHERE job_id = ?;";
private static final String RETRIEVE_ARCHIVED_TASK_IDS_FOR_JOB_STRING = "SELECT task_id FROM archived_task_ids WHERE job_id = ?;";
private static final String RETRIEVE_ACTIVE_TASK_STRING = "SELECT value FROM active_tasks WHERE task_id = ?;";
private static final String RETRIEVE_ARCHIVED_TASK_STRING = "SELECT value FROM archived_tasks WHERE task_id = ?;";
private static final String RETRIEVE_ARCHIVED_TASKS_COUNT_STRING = "SELECT count(*) FROM archived_task_ids WHERE job_id = ?;";
private final PreparedStatement retrieveActiveJobIdBucketsStatement;
private final PreparedStatement retrieveActiveJobIdsStatement;
private final PreparedStatement retrieveActiveJobStatement;
private final PreparedStatement retrieveArchivedJobStatement;
private final PreparedStatement retrieveActiveTaskIdsForJobStatement;
private final PreparedStatement retrieveArchivedTaskIdsForJobStatement;
private final PreparedStatement retrieveActiveTaskStatement;
private final PreparedStatement retrieveArchivedTaskStatement;
private final PreparedStatement retrieveArchivedTasksCountStatement;
// INSERT Queries
private static final String INSERT_ACTIVE_JOB_ID_STRING = "INSERT INTO active_job_ids (bucket, job_id) VALUES (?, ?);";
private static final String INSERT_ACTIVE_JOB_STRING = "INSERT INTO active_jobs (job_id, value) VALUES (?, ?);";
private static final String INSERT_ARCHIVED_JOB_STRING = "INSERT INTO archived_jobs (job_id, value) VALUES (?, ?);";
private static final String INSERT_ACTIVE_TASK_ID_STRING = "INSERT INTO active_task_ids (job_id, task_id) VALUES (?, ?);";
private static final String INSERT_ACTIVE_TASK_STRING = "INSERT INTO active_tasks (task_id, value) VALUES (?, ?);";
private static final String INSERT_ARCHIVED_TASK_ID_STRING = "INSERT INTO archived_task_ids (job_id, task_id) VALUES (?, ?);";
private static final String INSERT_ARCHIVED_TASK_STRING = "INSERT INTO archived_tasks (task_id, value) VALUES (?, ?);";
private final PreparedStatement insertActiveJobStatement;
private final PreparedStatement insertActiveJobIdStatement;
private final PreparedStatement insertArchivedJobStatement;
private final PreparedStatement insertActiveTaskStatement;
private final PreparedStatement insertActiveTaskIdStatement;
private final PreparedStatement insertArchivedTaskIdStatement;
private final PreparedStatement insertArchivedTaskStatement;
// DELETE Queries
private static final String DELETE_ACTIVE_JOB_ID_STRING = "DELETE FROM active_job_ids WHERE bucket = ? and job_id = ?";
private static final String DELETE_ACTIVE_JOB_STRING = "DELETE FROM active_jobs WHERE job_id = ?;";
private static final String DELETE_ACTIVE_TASK_ID_STRING = "DELETE FROM active_task_ids WHERE job_id = ? and task_id = ?";
private static final String DELETE_ACTIVE_TASK_STRING = "DELETE FROM active_tasks WHERE task_id = ?;";
private static final String DELETE_ARCHIVED_TASK_ID_STRING = "DELETE FROM archived_task_ids WHERE job_id = ? and task_id = ?;";
private static final String DELETE_ARCHIVED_TASK_STRING = "DELETE FROM archived_tasks WHERE task_id = ?;";
private final PreparedStatement deleteActiveJobIdStatement;
private final PreparedStatement deleteActiveJobStatement;
private final PreparedStatement deleteActiveTaskIdStatement;
private final PreparedStatement deleteActiveTaskStatement;
private final PreparedStatement deletedArchivedTaskIdStatement;
private final PreparedStatement deletedArchivedTaskStatement;
private final TitusRuntime titusRuntime;
private final Session session;
private final ObjectMapper mapper;
private final BalancedBucketManager<String> activeJobIdsBucketManager;
private final CassandraStoreConfiguration configuration;
private final Optional<FitInjection> fitDriverInjection;
private final Optional<FitInjection> fitBadDataInjection;
@Inject
public CassandraJobStore(CassandraStoreConfiguration configuration,
Session session,
TitusRuntime titusRuntime) {
this(configuration, session, titusRuntime, ObjectMappers.storeMapper(), INITIAL_BUCKET_COUNT, MAX_BUCKET_SIZE);
}
CassandraJobStore(CassandraStoreConfiguration configuration,
Session session,
TitusRuntime titusRuntime,
ObjectMapper mapper,
int initialBucketCount,
int maxBucketSize) {
this.configuration = configuration;
this.session = session;
this.titusRuntime = titusRuntime;
FitFramework fit = titusRuntime.getFitFramework();
if (fit.isActive()) {
FitInjection fitDriverInjection = fit.newFitInjectionBuilder("cassandraDriver")
.withDescription("Fail Cassandra driver requests")
.withExceptionType(DriverException.class)
.build();
FitInjection fitBadDataInjection = fit.newFitInjectionBuilder("dataCorruption")
.withDescription("Corrupt data loaded from the database")
.build();
fit.getRootComponent().getChild(V3JobOperations.COMPONENT)
.addInjection(fitDriverInjection)
.addInjection(fitBadDataInjection);
this.fitDriverInjection = Optional.of(fitDriverInjection);
this.fitBadDataInjection = Optional.of(fitBadDataInjection);
} else {
this.fitDriverInjection = Optional.empty();
this.fitBadDataInjection = Optional.empty();
}
this.mapper = mapper;
this.activeJobIdsBucketManager = new BalancedBucketManager<>(initialBucketCount, maxBucketSize, METRIC_NAME_ROOT, titusRuntime.getRegistry());
retrieveActiveJobIdBucketsStatement = session.prepare(RETRIEVE_ACTIVE_JOB_ID_BUCKETS_STRING).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
retrieveActiveJobIdsStatement = session.prepare(RETRIEVE_ACTIVE_JOB_IDS_STRING).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
retrieveActiveJobStatement = session.prepare(RETRIEVE_ACTIVE_JOB_STRING).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
retrieveArchivedJobStatement = session.prepare(RETRIEVE_ARCHIVED_JOB_STRING).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
retrieveActiveTaskIdsForJobStatement = session.prepare(RETRIEVE_ACTIVE_TASK_IDS_FOR_JOB_STRING).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
retrieveArchivedTaskIdsForJobStatement = session.prepare(RETRIEVE_ARCHIVED_TASK_IDS_FOR_JOB_STRING).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
retrieveActiveTaskStatement = session.prepare(RETRIEVE_ACTIVE_TASK_STRING).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
retrieveArchivedTaskStatement = session.prepare(RETRIEVE_ARCHIVED_TASK_STRING).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
retrieveArchivedTasksCountStatement = session.prepare(RETRIEVE_ARCHIVED_TASKS_COUNT_STRING).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
insertActiveJobStatement = session.prepare(INSERT_ACTIVE_JOB_STRING).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
insertActiveJobIdStatement = session.prepare(INSERT_ACTIVE_JOB_ID_STRING).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
insertArchivedJobStatement = session.prepare(INSERT_ARCHIVED_JOB_STRING).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
insertActiveTaskStatement = session.prepare(INSERT_ACTIVE_TASK_STRING).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
insertActiveTaskIdStatement = session.prepare(INSERT_ACTIVE_TASK_ID_STRING).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
insertArchivedTaskIdStatement = session.prepare(INSERT_ARCHIVED_TASK_ID_STRING).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
insertArchivedTaskStatement = session.prepare(INSERT_ARCHIVED_TASK_STRING).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
deleteActiveJobIdStatement = session.prepare(DELETE_ACTIVE_JOB_ID_STRING).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
deleteActiveJobStatement = session.prepare(DELETE_ACTIVE_JOB_STRING).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
deleteActiveTaskIdStatement = session.prepare(DELETE_ACTIVE_TASK_ID_STRING).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
deleteActiveTaskStatement = session.prepare(DELETE_ACTIVE_TASK_STRING).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
deletedArchivedTaskIdStatement = session.prepare(DELETE_ARCHIVED_TASK_ID_STRING).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
deletedArchivedTaskStatement = session.prepare(DELETE_ARCHIVED_TASK_STRING).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
}
@Override
public Completable init() {
return Observable.fromCallable(() -> retrieveActiveJobIdBucketsStatement.bind().setFetchSize(Integer.MAX_VALUE))
.flatMap(statement -> execute(statement).flatMap(resultSet -> {
List<Completable> completables = new ArrayList<>();
for (Row row : resultSet.all()) {
int bucket = row.getInt(0);
Statement retrieveJobIdsStatement = retrieveActiveJobIdsStatement.bind(bucket).setFetchSize(Integer.MAX_VALUE);
Completable completable = execute(retrieveJobIdsStatement)
.flatMap(jobIdsResultSet -> {
List<String> jobIds = new ArrayList<>();
for (Row jobIdRow : jobIdsResultSet.all()) {
String jobId = jobIdRow.getString(0);
if (fitBadDataInjection.isPresent()) {
String effectiveJobId = fitBadDataInjection.get().afterImmediate(JobStoreFitAction.ErrorKind.LostJobIds.name(), jobId);
if (effectiveJobId != null) {
jobIds.add(effectiveJobId);
}
String phantomId = fitBadDataInjection.get().afterImmediate(JobStoreFitAction.ErrorKind.PhantomJobIds.name(), jobId);
if (phantomId != null && !phantomId.equals(jobId)) {
jobIds.add(phantomId);
}
} else {
jobIds.add(jobId);
}
}
activeJobIdsBucketManager.addItems(bucket, jobIds);
return Observable.empty();
}).toCompletable();
completables.add(completable);
}
return Completable.merge(Observable.from(completables), getConcurrencyLimit()).toObservable();
})).toCompletable();
}
@Override
public Observable<Pair<List<Job<?>>, Integer>> retrieveJobs() {
Observable result = Observable.fromCallable(() -> {
List<String> jobIds = activeJobIdsBucketManager.getItems();
return jobIds.stream().map(retrieveActiveJobStatement::bind).map(this::execute).collect(Collectors.toList());
}).flatMap(observables -> Observable.merge(observables, getConcurrencyLimit()).flatMapIterable(resultSet -> {
List<Row> allRows = resultSet.all();
if (allRows.isEmpty()) {
logger.debug("Job id with no record");
return Collections.emptyList();
}
return allRows.stream()
.map(row -> row.getString(0))
.map(value -> {
String effectiveValue;
if (fitBadDataInjection.isPresent()) {
effectiveValue = fitBadDataInjection.get().afterImmediate(JobStoreFitAction.ErrorKind.CorruptedRawJobRecords.name(), value);
} else {
effectiveValue = value;
}
Job<?> job;
try {
job = deserializeJob(effectiveValue);
} catch (Exception e) {
logger.error("Cannot map serialized job data to Job class: {}", effectiveValue, e);
return Either.ofError(e);
}
if (job.getJobDescriptor().getDisruptionBudget() == null) {
titusRuntime.getCodeInvariants().inconsistent("jobWithNoDisruptionBudget: jobId=%s", job.getId());
job = JobFunctions.changeDisruptionBudget(job, DisruptionBudget.none());
}
// TODO Remove this code when there are no more jobs with missing migration data (caused by a bug in ServiceJobExt builder).
if (job.getJobDescriptor().getExtensions() instanceof ServiceJobExt) {
Job<ServiceJobExt> serviceJob = (Job<ServiceJobExt>) job;
ServiceJobExt ext = serviceJob.getJobDescriptor().getExtensions();
if (ext.getMigrationPolicy() == null) {
titusRuntime.getCodePointTracker().markReachable("Corrupted task migration record in Cassandra: " + job.getId());
ServiceJobExt fixedExt = ext.toBuilder().withMigrationPolicy(SystemDefaultMigrationPolicy.newBuilder().build()).build();
logger.warn("Service job with no migration policy defined. Setting system default: {}", job.getId());
job = serviceJob.toBuilder().withJobDescriptor(
serviceJob.getJobDescriptor().toBuilder().withExtensions(fixedExt).build()
).build();
}
}
if (!fitBadDataInjection.isPresent()) {
return Either.ofValue(job);
}
Job<?> effectiveJob = fitBadDataInjection.get().afterImmediate(JobStoreFitAction.ErrorKind.CorruptedJobRecords.name(), job);
return Either.ofValue(effectiveJob);
})
.collect(Collectors.toList());
})).toList().map(everything -> {
List<Job> goodJobs = (List<Job>) everything.stream().filter(Either::hasValue).map(Either::getValue).collect(Collectors.toList());
int errors = everything.size() - goodJobs.size();
return Pair.of(goodJobs, errors);
});
return result;
}
@Override
public Observable<Job<?>> retrieveJob(String jobId) {
return Observable.fromCallable((Callable<Statement>) () -> {
checkIfJobIsActive(jobId);
return retrieveActiveJobStatement.bind(jobId);
}).flatMap(statement -> execute(statement).map(resultSet -> {
Row row = resultSet.one();
if (row == null) {
throw JobStoreException.jobDoesNotExist(jobId);
}
String value = row.getString(0);
return deserializeJob(value);
}));
}
@Override
public Completable storeJob(Job job) {
return Observable
.fromCallable((Callable<Statement>) () -> {
String jobId = job.getId();
checkIfJobAlreadyExists(jobId);
String jobJsonString = writeJobToString(job);
int bucket = activeJobIdsBucketManager.getNextBucket();
activeJobIdsBucketManager.addItem(bucket, jobId);
Statement jobStatement = insertActiveJobStatement.bind(jobId, jobJsonString);
Statement jobIdStatement = insertActiveJobIdStatement.bind(bucket, jobId);
BatchStatement batchStatement = new BatchStatement();
batchStatement.add(jobStatement);
batchStatement.add(jobIdStatement);
transactionLogger().logBeforeCreate(insertActiveJobStatement, "storeJob", job);
return batchStatement;
})
.flatMap(statement -> execute(statement)
.doOnNext(rs -> transactionLogger().logAfterCreate(insertActiveJobStatement, "storeJob", job))
.doOnError(throwable -> activeJobIdsBucketManager.deleteItem(job.getId()))
)
.toCompletable();
}
private String writeJobToString(Job job) {
return ObjectMappers.writeValueAsString(mapper, job);
}
@Override
public Completable updateJob(Job job) {
return Observable
.fromCallable((Callable<Statement>) () -> {
String jobId = job.getId();
checkIfJobIsActive(jobId);
String jobJsonString = writeJobToString(job);
transactionLogger().logBeforeUpdate(insertActiveJobStatement, "updateJob", job);
return insertActiveJobStatement.bind(jobId, jobJsonString);
})
.flatMap(statement ->
execute(statement).doOnNext(rs -> transactionLogger().logAfterUpdate(insertActiveJobStatement, "updateJob", job))
)
.toCompletable();
}
@Override
public Completable deleteJob(Job job) {
return Observable.fromCallable(() -> {
String jobId = job.getId();
checkIfJobIsActive(jobId);
return jobId;
}).flatMap(jobId -> retrieveTasksForJob(jobId).flatMap(tasksAndErrors -> {
List<Task> tasks = tasksAndErrors.getLeft();
int errors = tasksAndErrors.getRight();
if (errors > 0) {
logger.warn("Some tasks records could not be loaded during the job delete operation. Ignoring them: {}", errors);
}
List<Task> fixedTasks = checkTaskConsistency(tasks);
List<Completable> completables = fixedTasks.stream().map(this::deleteTask).collect(Collectors.toList());
return Completable.merge(Observable.from(completables), getConcurrencyLimit()).toObservable();
})).toList().flatMap(ignored -> {
BatchStatement statement = getArchiveJobBatchStatement(job);
transactionLogger().logBeforeDelete(deleteActiveJobStatement, "deleteJob", job);
return execute(statement).doOnNext(rs -> transactionLogger().logAfterDelete(deleteActiveJobStatement, "deleteJob", job));
}).flatMap(ignored -> {
activeJobIdsBucketManager.deleteItem(job.getId());
return Observable.empty();
}).toCompletable();
}
private List<Task> checkTaskConsistency(List<Task> tasks) {
List<Task> checkedTasks = new ArrayList<>();
for (Task task : tasks) {
if (task.getStatus().getState() == TaskState.Finished) {
checkedTasks.add(task);
} else {
titusRuntime.getCodeInvariants().inconsistent("Archiving task that is not in Finished state: task={}", task);
Task fixed = JobFunctions.fixArchivedTaskStatus(task, titusRuntime.getClock());
checkedTasks.add(fixed);
}
}
return checkedTasks;
}
@Override
public Observable<Pair<List<Task>, Integer>> retrieveTasksForJob(String jobId) {
return Observable.fromCallable(() -> {
checkIfJobIsActive(jobId);
return retrieveActiveTaskIdsForJobStatement.bind(jobId).setFetchSize(Integer.MAX_VALUE);
}).flatMap(retrieveActiveTaskIdsForJob -> execute(retrieveActiveTaskIdsForJob).flatMap(taskIdsResultSet -> {
List<String> taskIds = taskIdsResultSet.all().stream()
.map(row -> row.getString(0))
.flatMap(taskId -> {
if (fitBadDataInjection.isPresent()) {
List<String> effectiveTaskIds = new ArrayList<>();
String effectiveTaskId = fitBadDataInjection.get().afterImmediate(JobStoreFitAction.ErrorKind.LostTaskIds.name(), taskId);
if (effectiveTaskId != null) {
effectiveTaskIds.add(effectiveTaskId);
}
String phantomId = fitBadDataInjection.get().afterImmediate(JobStoreFitAction.ErrorKind.PhantomTaskIds.name(), taskId);
if (phantomId != null && !phantomId.equals(taskId)) {
effectiveTaskIds.add(phantomId);
}
return effectiveTaskIds.stream();
}
return Stream.of(taskId);
})
.collect(Collectors.toList());
List<Observable<ResultSet>> observables = taskIds.stream().map(retrieveActiveTaskStatement::bind).map(this::execute).collect(Collectors.toList());
return Observable.merge(observables, getConcurrencyLimit()).flatMapIterable(tasksResultSet -> {
List<Either<Task, Throwable>> tasks = new ArrayList<>();
for (Row row : tasksResultSet.all()) {
String value = row.getString(0);
String effectiveValue;
if (fitBadDataInjection.isPresent()) {
effectiveValue = fitBadDataInjection.get().afterImmediate(JobStoreFitAction.ErrorKind.CorruptedRawTaskRecords.name(), value);
} else {
effectiveValue = value;
}
Task task;
try {
task = deserializeTask(effectiveValue);
if (!fitBadDataInjection.isPresent()) {
tasks.add(Either.ofValue(task));
} else {
Task effectiveTask = fitBadDataInjection.get().afterImmediate(JobStoreFitAction.ErrorKind.CorruptedTaskRecords.name(), task);
effectiveTask = fitBadDataInjection.get().afterImmediate(JobStoreFitAction.ErrorKind.DuplicatedEni.name(), effectiveTask);
effectiveTask = fitBadDataInjection.get().afterImmediate(JobStoreFitAction.ErrorKind.CorruptedTaskPlacementData.name(), effectiveTask);
tasks.add(Either.ofValue(effectiveTask));
}
transactionLogger().logAfterRead(retrieveActiveTaskStatement, "retrieveTasksForJob", task);
} catch (Exception e) {
logger.error("Cannot map serialized task data to Task class: {}", effectiveValue, e);
tasks.add(Either.ofError(e));
}
}
return tasks;
});
})).toList().map(taskErrorPairs -> {
List<Task> tasks = taskErrorPairs.stream().filter(Either::hasValue).map(Either::getValue).collect(Collectors.toList());
int errors = (int) taskErrorPairs.stream().filter(Either::hasError).count();
return Pair.of(tasks, errors);
});
}
@Override
public Observable<Task> retrieveTask(String taskId) {
return Observable
.fromCallable((Callable<Statement>) () -> {
transactionLogger().logBeforeRead(retrieveActiveTaskStatement, "retrieveTask", taskId);
return retrieveActiveTaskStatement.bind(taskId);
})
.flatMap(statement -> execute(statement).flatMap(resultSet -> {
Row row = resultSet.one();
if (row != null) {
String value = row.getString(0);
Task task = deserializeTask(value);
transactionLogger().logAfterRead(retrieveActiveTaskStatement, "retrieveTask", task);
return Observable.just(task);
} else {
return Observable.error(JobStoreException.taskDoesNotExist(taskId));
}
}));
}
@Override
public Completable storeTask(Task task) {
return Observable.fromCallable((Callable<Statement>) () -> {
String jobId = task.getJobId();
String taskId = task.getId();
checkIfJobIsActive(jobId);
String taskJsonString = ObjectMappers.writeValueAsString(mapper, task);
Statement taskStatement = insertActiveTaskStatement.bind(taskId, taskJsonString);
Statement taskIdStatement = insertActiveTaskIdStatement.bind(jobId, taskId);
BatchStatement batchStatement = new BatchStatement();
batchStatement.add(taskStatement);
batchStatement.add(taskIdStatement);
transactionLogger().logBeforeCreate(insertActiveTaskStatement, "storeTask", task);
return batchStatement;
}).flatMap(statement ->
execute(statement).doOnNext(rs -> transactionLogger().logAfterCreate(insertActiveTaskStatement, "storeTask", task))
).toCompletable();
}
@Override
public Completable updateTask(Task task) {
return Observable.fromCallable((Callable<Statement>) () -> {
String jobId = task.getJobId();
String taskId = task.getId();
checkIfJobIsActive(jobId);
String taskJsonString = ObjectMappers.writeValueAsString(mapper, task);
transactionLogger().logBeforeUpdate(insertActiveTaskStatement, "updateTask", task);
return insertActiveTaskStatement.bind(taskId, taskJsonString);
}).flatMap(statement -> {
transactionLogger().logAfterUpdate(insertActiveTaskStatement, "updateTask", task);
return execute(statement);
}
).toCompletable();
}
@Override
public Completable replaceTask(Task oldTask, Task newTask) {
return Observable.fromCallable((Callable<Statement>) () -> {
String jobId = newTask.getJobId();
checkIfJobIsActive(jobId);
String taskId = newTask.getId();
String taskJsonString = ObjectMappers.writeValueAsString(mapper, newTask);
BatchStatement batchStatement = getArchiveTaskBatchStatement(oldTask);
Statement insertTaskStatement = insertActiveTaskStatement.bind(taskId, taskJsonString);
Statement insertTaskIdStatement = insertActiveTaskIdStatement.bind(jobId, taskId);
batchStatement.add(insertTaskStatement);
batchStatement.add(insertTaskIdStatement);
return batchStatement;
}).flatMap(this::execute).toCompletable();
}
/**
* Moving task between jobs requires the following Cassandra updates:
* <ul>
* <li>Update the active_jobs table with the new jobFrom record</li>
* <li>Update the active_jobs table with the new jobTo record</li>
* <li>Update task record in the active_tasks table (to include the new job id)</li>
* <li>Remove a record from the active_task_ids table for the jobFrom/taskId pair</li>
* <li>Add a new record in the active_task_ids for the jobTo/taskId pair</li>
* </ul>
*/
@Override
public Completable moveTask(Job jobFrom, Job jobTo, Task taskAfter) {
return Observable.fromCallable((Callable<Statement>) () -> {
checkIfJobIsActive(jobFrom.getId());
checkIfJobIsActive(jobTo.getId());
String taskJsonString = ObjectMappers.writeValueAsString(mapper, taskAfter);
transactionLogger().logBeforeUpdate(insertActiveTaskStatement, "moveTask", taskJsonString);
BatchStatement batchStatement = new BatchStatement();
batchStatement.add(insertActiveJobStatement.bind(jobFrom.getId(), ObjectMappers.writeValueAsString(mapper, jobFrom)));
batchStatement.add(insertActiveJobStatement.bind(jobTo.getId(), ObjectMappers.writeValueAsString(mapper, jobTo)));
batchStatement.add(insertActiveTaskStatement.bind(taskAfter.getId(), taskJsonString));
batchStatement.add(deleteActiveTaskIdStatement.bind(jobFrom.getId(), taskAfter.getId()));
batchStatement.add(insertActiveTaskIdStatement.bind(jobTo.getId(), taskAfter.getId()));
return batchStatement;
}).flatMap(this::execute).toCompletable().doOnCompleted(() -> transactionLogger().logAfterUpdate(insertActiveTaskStatement, "moveTask", taskAfter));
}
@Override
public Completable deleteTask(Task task) {
return Observable.fromCallable((Callable<Statement>) () -> {
String jobId = task.getJobId();
checkIfJobIsActive(jobId);
BatchStatement archiveTaskBatchStatement = getArchiveTaskBatchStatement(task);
transactionLogger().logBeforeDelete(archiveTaskBatchStatement, "deleteTask", task);
return archiveTaskBatchStatement;
}).flatMap(statement -> {
transactionLogger().logAfterDelete(statement, "deleteTask", task);
return execute(statement);
}
).toCompletable();
}
/**
* This method reads data from the archive table, and if not found checks the active table for its existence.
* The latter is needed as sometimes a job may not be correctly archived, and we do not have a reconciliation process
* that would fix it.
*/
@Override
public Observable<Job<?>> retrieveArchivedJob(String jobId) {
Observable<Job> action = retrieveEntityById(jobId, Job.class, retrieveArchivedJobStatement)
.switchIfEmpty(retrieveEntityById(jobId, Job.class, retrieveActiveJobStatement)
.filter(job -> job.getStatus().getState() == JobState.Finished)
)
.switchIfEmpty(Observable.error(JobStoreException.jobDoesNotExist(jobId)));
return (Observable) action;
}
/**
* This method reads data from the archive table, and if not found checks the active table for its existence.
* The latter is needed as sometimes a job may not be correctly archived, and we do not have a reconciliation process
* that would fix it.
*/
@Override
public Observable<Task> retrieveArchivedTasksForJob(String jobId) {
return retrieveTasksForJob(jobId, retrieveArchivedTaskIdsForJobStatement, retrieveArchivedTaskStatement)
.switchIfEmpty(retrieveTasksForJob(jobId, retrieveActiveTaskIdsForJobStatement, retrieveActiveTaskStatement)
.filter(task -> task.getStatus().getState() == TaskState.Finished)
);
}
private Observable<Task> retrieveTasksForJob(String jobId, PreparedStatement taskIdStatement, PreparedStatement taskStatement) {
return Observable.fromCallable(() -> taskIdStatement.bind(jobId).setFetchSize(Integer.MAX_VALUE))
.flatMap(retrieveActiveTaskIdsForJob ->
execute(retrieveActiveTaskIdsForJob).flatMap(taskIdsResultSet -> {
List<String> taskIds = taskIdsResultSet.all().stream().map(row -> row.getString(0)).collect(Collectors.toList());
if (taskIds.isEmpty()) {
return Observable.empty();
}
List<Observable<ResultSet>> observables = taskIds.stream()
.map(taskStatement::bind)
.map(this::execute)
.collect(Collectors.toList());
return Observable.merge(observables, getConcurrencyLimit()).flatMapIterable(tasksResultSet -> tasksResultSet.all().stream()
.map(row -> row.getString(0))
.map(this::deserializeTask)
.collect(Collectors.toList()));
}));
}
/**
* This method reads data from the archive table, and if not found checks the active table for its existence.
* The latter is needed as sometimes a task may not be correctly archived, and we do not have a reconciliation process
* that would fix it.
* <p>
* This method should be only used to get state of a task belonging to a finished job. It would return a persisted
* state for a task belonging to an active job as well, as a side effect for the workaround implemented here.
* A caller should not piggyback on this behavior, as it may change at any point in time.
*/
@Override
public Observable<Task> retrieveArchivedTask(String taskId) {
return retrieveEntityById(taskId, Task.class, retrieveArchivedTaskStatement)
.switchIfEmpty(retrieveEntityById(taskId, Task.class, retrieveActiveTaskStatement)
.filter(task -> task.getStatus().getState() == TaskState.Finished)
)
.switchIfEmpty(Observable.error(JobStoreException.taskDoesNotExist(taskId)));
}
/**
* This method counts the number of archived tasks for a given job id.
*/
@Override
public Observable<Long> retrieveArchivedTaskCountForJob(String jobId) {
return retrieveEntityById(jobId, Long.class, retrieveArchivedTasksCountStatement);
}
/**
* This method deletes an archived task.
*/
@Override
public Completable deleteArchivedTask(String jobId, String taskId) {
return Observable.fromCallable((Callable<Statement>) () -> {
BatchStatement deleteArchivedTaskBatchStatement = getDeleteArchivedTaskBatchStatement(jobId, taskId);
transactionLogger().logBeforeDelete(deleteArchivedTaskBatchStatement, "deleteArchivedTask", taskId);
return deleteArchivedTaskBatchStatement;
}).flatMap(statement -> {
transactionLogger().logAfterDelete(statement, "deleteArchivedTask", taskId);
return execute(statement);
}
).toCompletable();
}
private <T> Observable<T> retrieveEntityById(String id, Class<T> type, PreparedStatement preparedStatement) {
return Observable.fromCallable((Callable<Statement>) () -> preparedStatement.bind(id))
.flatMap(this::execute)
.flatMap(resultSet -> {
Row row = resultSet.one();
if (row == null) {
return Observable.empty();
}
try {
if (type == Long.class) {
return Observable.just(type.cast(row.getLong(0)));
}
String value = row.getString(0);
if (type.isAssignableFrom(Job.class)) {
return Observable.just(type.cast(deserializeJob(value)));
}
if (type.isAssignableFrom(Task.class)) {
return Observable.just(type.cast(deserializeTask(value)));
}
return Observable.just(ObjectMappers.readValue(mapper, value, type));
} catch (Exception e) {
return Observable.error(e);
}
});
}
private Job<?> ensureHasVersion(Job<?> job) {
if (job.getVersion() == null || job.getVersion().getTimestamp() < 0) {
if (job.getStatus() != null) {
Version newVersion = Version.newBuilder().withTimestamp(job.getStatus().getTimestamp()).build();
return job.toBuilder().withVersion(newVersion).build();
}
}
return job;
}
private Task ensureHasVersion(Task task) {
if (task.getVersion() == null || task.getVersion().getTimestamp() < 0) {
if (task.getStatus() != null) {
Version newVersion = Version.newBuilder().withTimestamp(task.getStatus().getTimestamp()).build();
return task.toBuilder().withVersion(newVersion).build();
}
}
return task;
}
private Job<?> deserializeJob(String value) {
Job job = ObjectMappers.readValue(mapper, value, Job.class);
job = ensureHasVersion(job);
return job;
}
private Task deserializeTask(String value) {
Task task = ObjectMappers.readValue(mapper, value, Task.class);
// Task attributes field check
if (task.getAttributes() == null) {
task = task.toBuilder().withAttributes(Collections.emptyMap()).build();
}
task = ensureHasVersion(task);
return task;
}
private boolean isJobActive(String jobId) {
return activeJobIdsBucketManager.itemExists(jobId);
}
private BatchStatement getArchiveJobBatchStatement(Job job) {
String jobId = job.getId();
int bucket = activeJobIdsBucketManager.getItemBucket(jobId);
String jobJsonString = writeJobToString(job);
Statement deleteJobStatement = deleteActiveJobStatement.bind(jobId);
Statement deleteJobIdStatement = deleteActiveJobIdStatement.bind(bucket, jobId);
Statement insertJobStatement = insertArchivedJobStatement.bind(jobId, jobJsonString);
BatchStatement statement = new BatchStatement();
statement.add(deleteJobStatement);
statement.add(deleteJobIdStatement);
statement.add(insertJobStatement);
return statement;
}
private BatchStatement getArchiveTaskBatchStatement(Task task) {
String jobId = task.getJobId();
String taskId = task.getId();
String taskJsonString = ObjectMappers.writeValueAsString(mapper, task);
Statement deleteTaskStatement = deleteActiveTaskStatement.bind(taskId);
Statement deleteTaskIdStatement = deleteActiveTaskIdStatement.bind(jobId, taskId);
Statement insertTaskStatement = insertArchivedTaskStatement.bind(taskId, taskJsonString);
Statement insertTaskIdStatement = insertArchivedTaskIdStatement.bind(jobId, taskId);
BatchStatement batchStatement = new BatchStatement();
batchStatement.add(deleteTaskStatement);
batchStatement.add(deleteTaskIdStatement);
batchStatement.add(insertTaskStatement);
batchStatement.add(insertTaskIdStatement);
return batchStatement;
}
private BatchStatement getDeleteArchivedTaskBatchStatement(String jobId, String taskId) {
Statement deleteArchivedTaskIdStatement = deletedArchivedTaskIdStatement.bind(jobId, taskId);
Statement deleteArchivedTaskStatement = deletedArchivedTaskStatement.bind(taskId);
BatchStatement batchStatement = new BatchStatement();
batchStatement.add(deleteArchivedTaskIdStatement);
batchStatement.add(deleteArchivedTaskStatement);
return batchStatement;
}
private Observable<ResultSet> execute(Statement statement) {
return Observable.<ResultSet>create(
emitter -> {
boolean tracingEnabled = configuration.isTracingEnabled();
Statement modifiedStatement = tracingEnabled ? statement.enableTracing() : statement;
ListenableFuture<ResultSet> resultSetFuture = fitDriverInjection
.map(injection -> injection.aroundListenableFuture(
"executeAsync", () -> session.executeAsync(modifiedStatement))
)
.orElseGet(() -> session.executeAsync(modifiedStatement));
Futures.addCallback(resultSetFuture, new FutureCallback<ResultSet>() {
@Override
public void onSuccess(@Nullable ResultSet result) {
if (result != null && tracingEnabled) {
QueryTrace queryTrace = result.getExecutionInfo().getQueryTrace();
if (queryTrace != null) {
logger.info("Executed statement with traceId: {}", queryTrace.getTraceId());
}
}
emitter.onNext(result);
emitter.onCompleted();
}
@Override
public void onFailure(@Nonnull Throwable e) {
emitter.onError(JobStoreException.cassandraDriverError(e));
}
}, MoreExecutors.directExecutor());
emitter.setCancellation(() -> resultSetFuture.cancel(true));
},
Emitter.BackpressureMode.NONE
).doOnError(e -> logger.error("Cassandra operation error: {}", e.getMessage()));
}
private int getConcurrencyLimit() {
return Math.max(2, configuration.getConcurrencyLimit());
}
private void checkIfJobIsActive(String jobId) {
if (!isJobActive(jobId)) {
throw Exceptions.propagate(JobStoreException.jobMustBeActive(jobId));
}
}
private void checkIfJobAlreadyExists(String jobId) {
if (isJobActive(jobId)) {
throw Exceptions.propagate(JobStoreException.jobAlreadyExists(jobId));
}
}
}
| 1,402 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/store/CassandraStoreModule.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.store;
import javax.inject.Singleton;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
import com.netflix.archaius.ConfigProxyFactory;
import com.netflix.titus.api.appscale.store.AppScalePolicyStore;
import com.netflix.titus.api.jobmanager.store.JobStore;
import com.netflix.titus.api.loadbalancer.store.LoadBalancerStore;
public class CassandraStoreModule extends AbstractModule {
@Override
protected void configure() {
bind(AppScalePolicyStore.class).to(CassAppScalePolicyStore.class);
bind(JobStore.class).to(CassandraJobStore.class);
bind(LoadBalancerStore.class).to(CassandraLoadBalancerStore.class);
}
@Provides
@Singleton
public CassandraStoreConfiguration getCassandraStoreConfiguration(ConfigProxyFactory factory) {
return factory.newProxy(CassandraStoreConfiguration.class);
}
}
| 1,403 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/store/CassandraStoreConfiguration.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.store;
import java.util.Collection;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
@Configuration(prefix = "titus.ext.cassandra")
public interface CassandraStoreConfiguration {
@DefaultValue("dev")
String getV2KeySpace();
@DefaultValue("true")
boolean isFailOnInconsistentCapacityGroupData();
@DefaultValue("true")
boolean isFailOnInconsistentAgentData();
@DefaultValue("false")
boolean isFailOnInconsistentLoadBalancerData();
/**
* @return whether or not reading back records from the scheduler store should cause the system to error out.
*/
@DefaultValue("false")
boolean isFailOnInconsistentSchedulerData();
/**
* During bootstrap we run parallel queries on Cassandra cluster. If not constrained, the parallelism level
* would be number_of_buckets * number_of_jobs_per_bucket * number_of_tasks. The concurrency limit is applied
* at each level, so for example with limit 10 we will get up to 1000 concurrent Cassandra query requests.
*/
@DefaultValue("10")
int getConcurrencyLimit();
/**
* Concurrency limit for load balancer target write operations (bulk INSERTs), in number of concurrent queries that
* can be running per method invocation in {@link CassandraLoadBalancerStore#addOrUpdateTargets(Collection)}.
*
* @see CassandraLoadBalancerStore
*/
@DefaultValue("10")
int getLoadBalancerWriteConcurrencyLimit();
/**
* Concurrency limit for load balancer target DELETEs (bulk CQL lightweight transactions), in number of concurrent
* queries that can be running per {@link CassandraLoadBalancerStore#removeDeregisteredTargets(Collection)}
* invocation.
* <p>
* Empirical data has shown that there are no benefits in running concurrent LWT DELETEs on the same partition key,
* so the default is <tt>1</tt> (no concurrency: all LWT DELETEs are serialized). In fact, concurrent LWTs on the
* same partition key (<tt>load_balancer_id</tt> in this case) has shown to put pressure on C* coordinators and to
* be overall slower than serializing all calls.
*
* @see CassandraLoadBalancerStore
*/
@DefaultValue("1")
int getLoadBalancerDeleteConcurrencyLimit();
/**
* @return whether or not each query should have tracing enabled.
*/
@DefaultValue("false")
boolean isTracingEnabled();
}
| 1,404 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/store/CassAppScalePolicyStore.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.store;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Registry;
import com.netflix.titus.api.appscale.model.AutoScalingPolicy;
import com.netflix.titus.api.appscale.model.PolicyConfiguration;
import com.netflix.titus.api.appscale.model.PolicyStatus;
import com.netflix.titus.api.appscale.store.AppScalePolicyStore;
import com.netflix.titus.api.json.ObjectMappers;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Completable;
import rx.Observable;
import rx.schedulers.Schedulers;
@Singleton
public class CassAppScalePolicyStore implements AppScalePolicyStore {
private static Logger log = LoggerFactory.getLogger(CassAppScalePolicyStore.class);
public static final String COLUMN_REF_ID = "ref_id";
public static final String COLUMN_JOB_ID = "job_id";
public static final String COLUMN_VALUE = "value";
public static final String COLUMN_POLICY_ID = "policy_id";
public static final String COLUMN_ALARM_ID = "alarm_id";
public static final String COLUMN_STATUS = "status";
public static final String COLUMN_STATUS_MESSAGE = "status_message";
public static final String METRIC_APP_SCALE_STORE_CREATE_POLICY = "titus.appscale.store.create.policy";
public static final String METRIC_APP_SCALE_STORE_UPDATE_POLICY = "titus.appscale.store.update.policy";
private final CassStoreHelper storeHelper;
private final PreparedStatement insertNewPolicyStmt;
private final PreparedStatement insertJobIdWithPolicyRefStmt;
private final PreparedStatement getPolicyByRefIdStmt;
private final PreparedStatement updatePolicyConfigStmt;
private final PreparedStatement updatePolicyIdStmt;
private final PreparedStatement updateAlarmIdStmt;
private final PreparedStatement updatePolicyStatusStmt;
private final PreparedStatement updateStatusMessageStmt;
private final PreparedStatement getAllJobIdsStmt;
private final Registry registry;
private final Counter createPolicyCounter;
private final Counter updatePolicyCounter;
private Session session;
private final CassandraStoreConfiguration config;
private static String GET_ALL_JOB_IDS = "SELECT * FROM app_scale_jobs;";
private static String GET_POLICY_BY_ID = "SELECT * FROM app_scale_policy where ref_id = ?;";
private static final String INSERT_NEW_POLICY = "INSERT INTO app_scale_policy(ref_id, job_id, status, value) VALUES (?, ?, ?, ?);";
private static final String INSERT_JOB_ID_WITH_POLICY_REF_ID = "INSERT INTO app_scale_jobs(job_id, ref_id) VALUES (?, ?);";
private static final String UPDATE_POLICY_CONFIG = "UPDATE app_scale_policy set value = ? where ref_id = ?;";
private static final String UPDATE_POLICY_ALARM_ID = "UPDATE app_scale_policy set alarm_id = ? where ref_id = ?;";
private static final String UPDATE_POLICY_STATUS = "UPDATE app_scale_policy set status = ? where ref_id = ?;";
private static final String UPDATE_POLICY_POLICY_ID = "UPDATE app_scale_policy set policy_id = ? where ref_id = ?;";
private static final String UPDATE_STATUS_MESSAGE = "UPDATE app_scale_policy set status_message = ? where ref_id = ?;";
private volatile Map<String, AutoScalingPolicy> policies;
private volatile Map<String, List<String>> policyRefIdsForJob;
@Inject
public CassAppScalePolicyStore(Session session, CassandraStoreConfiguration config, Registry registry) {
this.session = session;
this.config = config;
this.registry = registry;
this.insertNewPolicyStmt = this.session.prepare(INSERT_NEW_POLICY);
this.insertJobIdWithPolicyRefStmt = this.session.prepare(INSERT_JOB_ID_WITH_POLICY_REF_ID);
this.getPolicyByRefIdStmt = this.session.prepare(GET_POLICY_BY_ID);
this.updateAlarmIdStmt = this.session.prepare(UPDATE_POLICY_ALARM_ID);
this.updatePolicyConfigStmt = this.session.prepare(UPDATE_POLICY_CONFIG);
this.updatePolicyIdStmt = this.session.prepare(UPDATE_POLICY_POLICY_ID);
this.updatePolicyStatusStmt = this.session.prepare(UPDATE_POLICY_STATUS);
this.updateStatusMessageStmt = this.session.prepare(UPDATE_STATUS_MESSAGE);
this.getAllJobIdsStmt = this.session.prepare(GET_ALL_JOB_IDS);
this.storeHelper = new CassStoreHelper(session, Schedulers.io());
this.policies = new ConcurrentHashMap<>();
this.policyRefIdsForJob = new ConcurrentHashMap<>();
createPolicyCounter = registry.counter(METRIC_APP_SCALE_STORE_CREATE_POLICY);
updatePolicyCounter = registry.counter(METRIC_APP_SCALE_STORE_UPDATE_POLICY);
}
@Override
public Completable init() {
return storeHelper.execute(getAllJobIdsStmt.bind().setFetchSize(Integer.MAX_VALUE))
.flatMap(rs -> Observable.from(rs.all()))
.map(row -> {
String refId = row.getUUID(COLUMN_REF_ID).toString();
String jobId = row.getString(COLUMN_JOB_ID);
updatePolicyRefIdsForJobMap(jobId, refId);
return refId;
})
.map(refId -> getPolicyByRefIdStmt.bind().setUUID(0, UUID.fromString(refId)).setFetchSize(Integer.MAX_VALUE))
.flatMap(storeHelper::execute, config.getConcurrencyLimit())
.flatMap(Observable::from)
.map(this::buildAutoScalingPolicyFromRow)
.map(autoScalingPolicy -> policies.putIfAbsent(autoScalingPolicy.getRefId(), autoScalingPolicy))
.toCompletable();
}
@Override
public Observable<AutoScalingPolicy> retrievePolicies(boolean includeArchived) {
List<AutoScalingPolicy> validPolicies = policies.values().stream()
.filter(autoScalingPolicy ->
autoScalingPolicy.getStatus() == PolicyStatus.Pending ||
autoScalingPolicy.getStatus() == PolicyStatus.Deleting ||
autoScalingPolicy.getStatus() == PolicyStatus.Error ||
autoScalingPolicy.getStatus() == PolicyStatus.Applied ||
(includeArchived && autoScalingPolicy.getStatus() == PolicyStatus.Deleted))
.collect(Collectors.toList());
log.info("Retrieving {} policies, includeArchived={}", validPolicies.size(), includeArchived);
return Observable.from(validPolicies);
}
@Override
public Observable<String> storePolicy(AutoScalingPolicy autoScalingPolicy) {
return Observable.fromCallable(() -> ObjectMappers.writeValueAsString(ObjectMappers.appScalePolicyMapper(), autoScalingPolicy.getPolicyConfiguration())
).flatMap(policyStr -> {
UUID refId = UUID.randomUUID();
BoundStatement statement = insertNewPolicyStmt.bind(refId, autoScalingPolicy.getJobId(), PolicyStatus.Pending.name(), policyStr);
return storeHelper.execute(statement).map(emptyResultSet -> refId);
}).flatMap(refId -> {
BoundStatement statement = insertJobIdWithPolicyRefStmt.bind(autoScalingPolicy.getJobId(), refId);
return storeHelper.execute(statement).map(emptyResultSet -> refId);
}).map(refId -> {
AutoScalingPolicy updatedPolicy = AutoScalingPolicy.newBuilder()
.withAutoScalingPolicy(autoScalingPolicy)
.withStatus(PolicyStatus.Pending)
.withRefId(refId.toString()).build();
policies.putIfAbsent(refId.toString(), updatedPolicy);
createPolicyCounter.increment();
return refId.toString();
}).map(refId -> {
updatePolicyRefIdsForJobMap(autoScalingPolicy.getJobId(), refId);
return refId.toString();
});
}
@Override
public Completable updatePolicyConfiguration(AutoScalingPolicy autoScalingPolicy) {
return Observable.fromCallable(() ->
ObjectMappers.writeValueAsString(ObjectMappers.appScalePolicyMapper(), autoScalingPolicy.getPolicyConfiguration())
).flatMap(policyStr -> {
BoundStatement statement = updatePolicyConfigStmt.bind(policyStr, UUID.fromString(autoScalingPolicy.getRefId()));
return storeHelper.execute(statement);
}).map(storeUpdated -> {
policies.put(autoScalingPolicy.getRefId(), autoScalingPolicy);
updatePolicyCounter.increment();
return storeUpdated;
}).toCompletable();
}
@Override
public Completable updatePolicyId(String policyRefId, String policyId) {
return Observable.fromCallable(() -> {
BoundStatement statement = updatePolicyIdStmt.bind(policyId, UUID.fromString(policyRefId));
return storeHelper.execute(statement);
}).flatMap(storeUpdated -> {
AutoScalingPolicy autoScalingPolicy = policies.get(policyRefId);
AutoScalingPolicy updatedPolicy = AutoScalingPolicy.newBuilder().withAutoScalingPolicy(autoScalingPolicy).withPolicyId(policyId).build();
policies.put(policyRefId, updatedPolicy);
updatePolicyCounter.increment();
return storeUpdated;
}).toCompletable();
}
@Override
public Completable updateAlarmId(String policyRefId, String alarmId) {
return Observable.fromCallable(() -> {
BoundStatement statement = updateAlarmIdStmt.bind(alarmId, UUID.fromString(policyRefId));
return storeHelper.execute(statement);
}).flatMap(storeUpdated -> {
AutoScalingPolicy autoScalingPolicy = policies.get(policyRefId);
AutoScalingPolicy updatedPolicy = AutoScalingPolicy.newBuilder().withAutoScalingPolicy(autoScalingPolicy)
.withAlarmId(alarmId).build();
policies.put(policyRefId, updatedPolicy);
updatePolicyCounter.increment();
return storeUpdated;
}).toCompletable();
}
@Override
public Completable updatePolicyStatus(String policyRefId, PolicyStatus policyStatus) {
return Observable.fromCallable(() -> {
BoundStatement statement = updatePolicyStatusStmt.bind(policyStatus.name(), UUID.fromString(policyRefId));
return storeHelper.execute(statement);
}).flatMap(storeUpdated -> {
AutoScalingPolicy autoScalingPolicy = policies.get(policyRefId);
AutoScalingPolicy updatedPolicy = AutoScalingPolicy.newBuilder().withAutoScalingPolicy(autoScalingPolicy)
.withStatus(policyStatus).build();
policies.put(policyRefId, updatedPolicy);
updatePolicyCounter.increment();
return storeUpdated;
}).toCompletable();
}
@Override
public Completable updateStatusMessage(String policyRefId, String statusMessage) {
return Observable.fromCallable(() -> {
BoundStatement statement = updateStatusMessageStmt.bind(statusMessage, UUID.fromString(policyRefId));
return storeHelper.execute(statement);
}).flatMap(storeUpdated -> {
AutoScalingPolicy autoScalingPolicy = policies.get(policyRefId);
AutoScalingPolicy updatedPolicy = AutoScalingPolicy.newBuilder().withAutoScalingPolicy(autoScalingPolicy)
.withStatusMessage(statusMessage).build();
policies.put(policyRefId, updatedPolicy);
updatePolicyCounter.increment();
return storeUpdated;
}).toCompletable();
}
@Override
public Observable<AutoScalingPolicy> retrievePoliciesForJob(String jobId) {
return Observable.fromCallable(() -> {
if (!policyRefIdsForJob.containsKey(jobId)) {
return new ArrayList<String>();
}
return policyRefIdsForJob.get(jobId);
}).flatMap(refIdList -> Observable.from(refIdList))
.map(refId -> policies.get(refId))
.filter(autoScalingPolicy ->
autoScalingPolicy != null &&
autoScalingPolicy.getStatus() != null &&
(autoScalingPolicy.getStatus() == PolicyStatus.Pending ||
autoScalingPolicy.getStatus() == PolicyStatus.Error ||
autoScalingPolicy.getStatus() == PolicyStatus.Deleting ||
autoScalingPolicy.getStatus() == PolicyStatus.Applied));
}
@Override
public Observable<AutoScalingPolicy> retrievePolicyForRefId(String policyRefId) {
if (policies.containsKey(policyRefId)) {
return Observable.just(policies.get(policyRefId));
}
return Observable.empty();
}
@Override
public Completable removePolicy(String policyRefId) {
return updatePolicyStatus(policyRefId, PolicyStatus.Deleting);
}
private AutoScalingPolicy buildAutoScalingPolicyFromRow(Row row) {
String refId = row.getUUID(COLUMN_REF_ID).toString();
String jobId = row.getString(COLUMN_JOB_ID);
String policyConfigurationStr = row.getString(COLUMN_VALUE);
String policyId = row.getString(COLUMN_POLICY_ID);
String alarmId = row.getString(COLUMN_ALARM_ID);
String status = row.getString(COLUMN_STATUS);
String statusMessage = row.getString(COLUMN_STATUS_MESSAGE);
PolicyConfiguration policyConfiguration = ObjectMappers.readValue(ObjectMappers.appScalePolicyMapper(), policyConfigurationStr, PolicyConfiguration.class);
return AutoScalingPolicy.newBuilder()
.withRefId(refId)
.withJobId(jobId)
.withPolicyConfiguration(policyConfiguration)
.withAlarmId(alarmId)
.withPolicyId(policyId)
.withStatus(PolicyStatus.valueOf(status))
.withStatusMessage(statusMessage)
.build();
}
private void updatePolicyRefIdsForJobMap(String jobId, String refId) {
List<String> existingValue = policyRefIdsForJob.putIfAbsent(jobId, new ArrayList<>(Arrays.asList(refId)));
if (existingValue != null) {
policyRefIdsForJob.computeIfPresent(jobId, (jid, currentList) -> {
currentList.add(refId);
return currentList;
});
}
}
}
| 1,405 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/store/BalancedBucketManager.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.store;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import com.netflix.spectator.api.Gauge;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
/**
* This manager distributes items in a bucket by returning what bucket should be used based on the max bucket size. The balancing strategy
* of the manager is to return the bucket with the least amount of items in it. The max bucket size is a best effort and can be breached.
*/
public class BalancedBucketManager<T> {
private final Object mutex = new Object();
private final int maxBucketSize;
private Registry registry;
private final Map<T, Integer> itemToBucket;
private final Map<Integer, Integer> bucketSizes;
private final AtomicInteger highestBucketIndex;
private final AtomicInteger bucketWithLeastItems;
private final Id bucketSizeId;
public BalancedBucketManager(int initialBucketCount, int maxBucketSize, String metricNameRoot, Registry registry) {
this.maxBucketSize = maxBucketSize;
this.registry = registry;
this.itemToBucket = new ConcurrentHashMap<>();
this.bucketSizes = new ConcurrentHashMap<>();
this.highestBucketIndex = new AtomicInteger();
this.bucketWithLeastItems = new AtomicInteger();
bucketSizeId = registry.createId(metricNameRoot + ".bucketSize");
createInitialBuckets(initialBucketCount);
}
/**
* Returns the next bucket that should be used.
*
* @return bucket index
*/
public int getNextBucket() {
return bucketWithLeastItems.get();
}
/**
* Add an item to the bucket
*
* @param bucket
* @param item
*/
public void addItem(int bucket, T item) {
addItems(bucket, Collections.singletonList(item));
}
/**
* Add a list of items to the bucket
*
* @param bucket
* @param items
*/
public void addItems(int bucket, List<T> items) {
synchronized (mutex) {
int currentBucketSize = bucketSizes.getOrDefault(bucket, 0);
for (T item : items) {
itemToBucket.put(item, bucket);
}
int newBucketSize = currentBucketSize + items.size();
bucketSizes.put(bucket, newBucketSize);
updateBucketCounters();
}
}
/**
* Delete an item
*
* @param item
*/
public void deleteItem(T item) {
synchronized (mutex) {
Integer bucket = itemToBucket.get(item);
if (bucket != null) {
itemToBucket.remove(item);
Integer currentBucketSize = bucketSizes.get(bucket);
bucketSizes.replace(bucket, currentBucketSize, currentBucketSize - 1);
updateBucketCounters();
}
}
}
/**
* Get all items in all buckets.
*
* @return unmodifiable list of the items in all buckets
*/
public List<T> getItems() {
return Collections.unmodifiableList(new ArrayList<>(itemToBucket.keySet()));
}
/**
* Check to see if an item exists in any of the buckets.
*
* @param item
* @return true if an item exists in any bucket.
*/
public boolean itemExists(T item) {
return itemToBucket.containsKey(item);
}
/**
* Get the bucket of an item.
*
* @param item
* @return the bucket of the item or null if the item does not exist.
*/
public int getItemBucket(T item) {
return itemToBucket.get(item);
}
private void createInitialBuckets(int initialBucketCount) {
for (int i = 0; i < initialBucketCount; i++) {
bucketSizes.put(i, 0);
}
updateBucketCounters();
}
private void updateBucketCounters() {
int maxBucket = 0;
int smallestBucket = 0;
int smallestBucketSize = maxBucketSize;
boolean allBucketsFull = true;
for (Map.Entry<Integer, Integer> entry : bucketSizes.entrySet()) {
Integer bucket = entry.getKey();
if (bucket > maxBucket) {
maxBucket = bucket;
}
Integer bucketSize = entry.getValue();
if (bucketSize < smallestBucketSize) {
smallestBucketSize = bucketSize;
smallestBucket = bucket;
}
if (bucketSize < maxBucketSize) {
allBucketsFull = false;
}
updateBucketMetrics(bucket, bucketSize);
}
if (allBucketsFull) {
// only create a new bucket if all existing buckets are full
int newBucket = maxBucket + 1;
bucketSizes.put(newBucket, 0);
highestBucketIndex.getAndSet(newBucket);
bucketWithLeastItems.getAndSet(newBucket);
} else {
highestBucketIndex.getAndSet(maxBucket);
bucketWithLeastItems.getAndSet(smallestBucket);
}
}
private void updateBucketMetrics(int bucket, int bucketSize) {
Gauge bucketSizeGauge = registry.gauge(bucketSizeId.withTag("bucket", Integer.toString(bucket)));
bucketSizeGauge.set(bucketSize);
}
}
| 1,406 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/store/CassandraLoadBalancerStore.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.store;
import java.time.Duration;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Singleton;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.ConsistencyLevel;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.netflix.titus.api.loadbalancer.model.JobLoadBalancer;
import com.netflix.titus.api.loadbalancer.model.JobLoadBalancerState;
import com.netflix.titus.api.loadbalancer.model.LoadBalancerTarget;
import com.netflix.titus.api.loadbalancer.model.LoadBalancerTargetState;
import com.netflix.titus.api.loadbalancer.store.LoadBalancerStore;
import com.netflix.titus.api.loadbalancer.store.LoadBalancerStoreException;
import com.netflix.titus.common.model.sanitizer.EntitySanitizer;
import com.netflix.titus.common.model.sanitizer.ValidationError;
import com.netflix.titus.common.util.guice.annotation.Activator;
import com.netflix.titus.common.util.rx.ReactorExt;
import com.netflix.titus.common.util.tuple.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import rx.Completable;
import rx.Observable;
import rx.schedulers.Schedulers;
import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker;
import static com.datastax.driver.core.querybuilder.QueryBuilder.delete;
import static com.datastax.driver.core.querybuilder.QueryBuilder.eq;
import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto;
import static com.datastax.driver.core.querybuilder.QueryBuilder.select;
import static com.netflix.titus.api.loadbalancer.model.sanitizer.LoadBalancerSanitizerBuilder.LOAD_BALANCER_SANITIZER;
@Singleton
public class CassandraLoadBalancerStore implements LoadBalancerStore {
public static final String LOAD_BALANCER_CASSANDRA_SESSION = "loadbalancer";
private static Logger logger = LoggerFactory.getLogger(CassandraLoadBalancerStore.class);
private static final String TABLE_LOAD_BALANCER_ASSOCIATIONS = "load_balancer_jobs";
private static final String TABLE_LOAD_BALANCER_TARGETS = "load_balancer_targets";
private static final String COLUMN_JOB_ID = "job_id";
private static final String COLUMN_TASK_ID = "task_id";
private static final String COLUMN_LOAD_BALANCER = "load_balancer_id";
private static final String COLUMN_IP_ADDRESS = "ip_address";
private static final String COLUMN_STATE = "state";
private static final Integer FETCH_SIZE = Integer.MAX_VALUE;
private static final long FETCH_TIMEOUT_MS = 120_000;
private final PreparedStatement selectAssociations;
private final PreparedStatement insertAssociation;
private final PreparedStatement deleteAssociation;
private final PreparedStatement selectTargetsForLoadBalancer;
private final PreparedStatement insertTarget;
private final PreparedStatement deleteDeregisteredTarget;
private final CassandraStoreConfiguration configuration;
private final EntitySanitizer entitySanitizer;
private final Session session;
private final CassStoreHelper storeHelper;
/**
* Stores a Job/Load Balancer's current state.
*/
private final ConcurrentMap<JobLoadBalancer, JobLoadBalancer.State> loadBalancerStateMap;
/**
* Optimized index for lookups of associated JobLoadBalancers by Job ID.
* Sets held in here must be all immutable (usually via Collections.unmodifiableSet).
* Sets held here must be sorted to allow sorted page access (usually via JobID natural String ordering).
*/
private final ConcurrentMap<String, SortedSet<JobLoadBalancer>> jobToAssociatedLoadBalancersMap;
private static final String GET_ALL_ASSOCIATIONS = String
.format("SELECT %s, %s, %s FROM %s;",
COLUMN_JOB_ID,
COLUMN_LOAD_BALANCER,
COLUMN_STATE,
TABLE_LOAD_BALANCER_ASSOCIATIONS);
private static final String INSERT_ASSOCIATION = String
.format("INSERT INTO %s(%s, %s, %s) VALUES (?, ?, ?);",
TABLE_LOAD_BALANCER_ASSOCIATIONS,
COLUMN_JOB_ID,
COLUMN_LOAD_BALANCER,
COLUMN_STATE);
private static final String DELETE_ASSOCIATION = String
.format("DELETE FROM %s WHERE %s = ? AND %s = ?",
TABLE_LOAD_BALANCER_ASSOCIATIONS,
COLUMN_JOB_ID,
COLUMN_LOAD_BALANCER);
private static Pair<JobLoadBalancer, JobLoadBalancer.State> buildLoadBalancerStatePairFromRow(Row row) {
String jobId = row.getString(COLUMN_JOB_ID);
String loadBalancerId = row.getString(COLUMN_LOAD_BALANCER);
String state = row.getString(COLUMN_STATE);
JobLoadBalancer.State parsedState;
if (state == null) {
logger.warn("Unexpected null state for association {}:{}", jobId, loadBalancerId);
parsedState = JobLoadBalancer.State.DISSOCIATED;
} else {
parsedState = JobLoadBalancer.State.valueOf(state.toUpperCase());
}
return Pair.of(new JobLoadBalancer(jobId, loadBalancerId), parsedState);
}
private static LoadBalancerTargetState buildLoadBalancerTargetStateFromRow(Row row) {
return new LoadBalancerTargetState(
new LoadBalancerTarget(
row.getString(COLUMN_LOAD_BALANCER),
row.getString(COLUMN_TASK_ID),
row.getString(COLUMN_IP_ADDRESS)
),
LoadBalancerTarget.State.valueOf(row.getString(COLUMN_STATE))
);
}
@Inject
public CassandraLoadBalancerStore(CassandraStoreConfiguration configuration,
@Named(LOAD_BALANCER_SANITIZER) EntitySanitizer entitySanitizer,
@Named(LOAD_BALANCER_CASSANDRA_SESSION) Session session) {
this.configuration = configuration;
this.entitySanitizer = entitySanitizer;
this.session = session;
this.storeHelper = new CassStoreHelper(session, Schedulers.io());
this.loadBalancerStateMap = new ConcurrentHashMap<>();
this.jobToAssociatedLoadBalancersMap = new ConcurrentHashMap<>();
this.selectAssociations = session.prepare(GET_ALL_ASSOCIATIONS).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
this.insertAssociation = session.prepare(INSERT_ASSOCIATION).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
this.deleteAssociation = session.prepare(DELETE_ASSOCIATION).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
this.selectTargetsForLoadBalancer = session.prepare(
select(COLUMN_LOAD_BALANCER, COLUMN_IP_ADDRESS, COLUMN_TASK_ID, COLUMN_STATE)
.from(TABLE_LOAD_BALANCER_TARGETS)
.where(eq(COLUMN_LOAD_BALANCER, bindMarker()))
).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
this.insertTarget = session.prepare(
insertInto(TABLE_LOAD_BALANCER_TARGETS).values(
Arrays.asList(COLUMN_LOAD_BALANCER, COLUMN_IP_ADDRESS, COLUMN_TASK_ID, COLUMN_STATE),
Arrays.asList(bindMarker(), bindMarker(), bindMarker(), bindMarker())
)
).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
this.deleteDeregisteredTarget = session.prepare(
delete().from(TABLE_LOAD_BALANCER_TARGETS)
.where(eq(COLUMN_LOAD_BALANCER, bindMarker()))
.and(eq(COLUMN_IP_ADDRESS, bindMarker()))
.onlyIf(eq(COLUMN_STATE, "DEREGISTERED"))
).setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
}
/**
* Initialize the store from current C* data. Must be called prior to store usage.
*/
@Activator
public void init() {
boolean failOnError = configuration.isFailOnInconsistentLoadBalancerData();
loadAllAssociations(failOnError).block();
}
private Mono<Void> loadAllAssociations(boolean failOnError) {
return ReactorExt.toFlux(storeHelper.execute(selectAssociations.bind()))
.timeout(Duration.ofMillis(FETCH_TIMEOUT_MS))
.next()
.flatMapMany(Flux::fromIterable)
.map(CassandraLoadBalancerStore::buildLoadBalancerStatePairFromRow)
.collect(Object::new, (ignored, loadBalancerStatePair) -> {
JobLoadBalancer jobLoadBalancer = loadBalancerStatePair.getLeft();
JobLoadBalancer.State state = loadBalancerStatePair.getRight();
Set<ValidationError> violations = entitySanitizer.validate(jobLoadBalancer);
if (violations.isEmpty()) {
loadBalancerStateMap.putIfAbsent(jobLoadBalancer, state);
SortedSet<JobLoadBalancer> jobLoadBalancers = jobToAssociatedLoadBalancersMap.getOrDefault(jobLoadBalancer.getJobId(), new TreeSet<>());
jobLoadBalancers.add(jobLoadBalancer);
jobToAssociatedLoadBalancersMap.put(jobLoadBalancer.getJobId(), jobLoadBalancers);
} else {
if (failOnError) {
throw LoadBalancerStoreException.badData(jobLoadBalancer, violations);
}
logger.warn("Ignoring bad association record of {} due to validation constraint violations: violations={}", jobLoadBalancer, violations);
}
})
.then();
}
/**
* Returns an observable stream of the currently associated load balancers for a Job.
*
* @param jobId
* @return
*/
@Override
public Observable<JobLoadBalancer> getAssociatedLoadBalancersForJob(String jobId) {
return Observable.from(getAssociatedLoadBalancersSetForJob(jobId));
}
/**
* This is in the critical path and should be fast, which is why it avoids lock contention, and keeps items indexed
* by jobId yielding O(1).
*
* @param jobId
* @return The current snapshot of what is currently being tracked
*/
@Override
public Set<JobLoadBalancer> getAssociatedLoadBalancersSetForJob(String jobId) {
logger.debug("Getting all associated load balancers for job {}", jobId);
return jobToAssociatedLoadBalancersMap.getOrDefault(jobId, Collections.emptySortedSet());
}
/**
* Returns all current load balancer associations.
*
* @return
*/
@Override
public List<JobLoadBalancerState> getAssociations() {
return loadBalancerStateMap.entrySet().stream()
.map(JobLoadBalancerState::from)
.collect(Collectors.toList());
}
@Override
public List<JobLoadBalancer> getAssociationsPage(int offset, int limit) {
// Create a sorted copy of the current keys to iterate. Keys added/removed after
// the copy is created may lead to staleness in the data being iterated.
// Use native string sorting to determine order.
return jobToAssociatedLoadBalancersMap.keySet().stream()
.flatMap(jobId -> {
SortedSet<JobLoadBalancer> jobLoadBalancerSortedSet = jobToAssociatedLoadBalancersMap.getOrDefault(jobId, Collections.emptySortedSet());
return jobLoadBalancerSortedSet.stream();
})
.sorted()
.skip(offset)
.limit(limit)
.collect(Collectors.toList());
}
/**
* Marks the persisted and in-memory state as Dissociated and removes from association in-memory map.
*
* @param jobLoadBalancer
* @param state
* @return
*/
@Override
public Completable addOrUpdateLoadBalancer(JobLoadBalancer jobLoadBalancer, JobLoadBalancer.State state) {
logger.debug("Updating load balancer {} to state {}", jobLoadBalancer, state);
return Completable.fromAction(() -> {
synchronized (this) {
BoundStatement stmt = insertAssociation.bind(jobLoadBalancer.getJobId(), jobLoadBalancer.getLoadBalancerId(), state.name());
ResultSet rs = session.execute(stmt);
loadBalancerStateMap.put(jobLoadBalancer, state);
if (JobLoadBalancer.State.ASSOCIATED == state) {
addJobLoadBalancerAssociation(jobLoadBalancer);
} else if (JobLoadBalancer.State.DISSOCIATED == state) {
removeJobLoadBalancerAssociation(jobLoadBalancer);
}
}
});
}
/**
* Removes the persisted Job/load balancer and state and removes in-memory state.
*
* @param jobLoadBalancer
* @return
*/
@Override
public Completable removeLoadBalancer(JobLoadBalancer jobLoadBalancer) {
logger.debug("Removing load balancer {}", jobLoadBalancer);
BoundStatement stmt = deleteAssociation.bind(jobLoadBalancer.getJobId(), jobLoadBalancer.getLoadBalancerId());
return storeHelper.execute(stmt)
// Note: If the C* entry doesn't exist, it'll fail here and not remove from the map.
.map(rs -> {
loadBalancerStateMap.remove(jobLoadBalancer);
removeJobLoadBalancerAssociation(jobLoadBalancer);
return jobLoadBalancer;
})
.toCompletable();
}
@Override
public int getNumLoadBalancersForJob(String jobId) {
int loadBalancerCount = 0;
for (Map.Entry<JobLoadBalancer, JobLoadBalancer.State> entry : loadBalancerStateMap.entrySet()) {
if (entry.getKey().getJobId().equals(jobId)) {
loadBalancerCount++;
}
}
return loadBalancerCount;
}
@Override
public Mono<Void> addOrUpdateTargets(Collection<LoadBalancerTargetState> targets) {
List<Mono<Void>> insertOperations = targets.stream()
.map(this::addOrUpdateTarget)
.collect(Collectors.toList());
int limit = configuration.getLoadBalancerWriteConcurrencyLimit();
// prefetch does not matter here because operations don't produce any result (they are Mono<Void>)
return Flux.mergeSequentialDelayError(insertOperations, limit, limit)
.ignoreElements()
.doOnSubscribe(ignored -> {
Map<String, Long> countPerLoadBalancer = targets.stream().collect(Collectors.groupingBy(
target -> target.getLoadBalancerTarget().getLoadBalancerId(),
Collectors.counting()
));
logger.info("Inserting/updating targets: {}", countPerLoadBalancer);
logger.debug("Inserting/updating {} targets. Details: {}", targets.size(), targets);
});
}
private Mono<Void> addOrUpdateTarget(LoadBalancerTargetState target) {
Set<ValidationError> violations = entitySanitizer.validate(target);
if (!violations.isEmpty()) {
if (configuration.isFailOnInconsistentLoadBalancerData()) {
return Mono.error(LoadBalancerStoreException.badData(target, violations));
}
logger.warn("Ignoring bad target record of {} due to validation constraint violations: violations={}", target, violations);
return Mono.empty();
}
// bind asynchronously to avoid sharing the same BoundStatement across all subscriptions
return Mono.fromCallable(() -> insertTarget.bind(
target.getLoadBalancerTarget().getLoadBalancerId(),
target.getIpAddress(),
target.getLoadBalancerTarget().getTaskId(),
target.getState().name()
)).flatMap(statement -> ReactorExt.toMono(storeHelper.execute(statement).toCompletable()));
}
@Override
public Mono<Void> removeDeregisteredTargets(Collection<LoadBalancerTarget> toRemove) {
// bind asynchronously to avoid sharing the same BoundStatement across all subscriptions
Flux<Mono<Void>> deleteOperations = Flux.fromIterable(toRemove)
.map(target -> storeHelper.execute(
deleteDeregisteredTarget.bind(target.getLoadBalancerId(), target.getIpAddress())
).toCompletable())
.map(ReactorExt::toMono);
int limit = configuration.getLoadBalancerDeleteConcurrencyLimit();
// prefetch does not matter here because operations don't produce any result (they are Mono<Void>)
return Flux.mergeSequentialDelayError(deleteOperations, limit, limit)
.ignoreElements()
.doOnSubscribe(ignored -> logger.debug("Removing targets {}", toRemove));
}
@Override
public Flux<LoadBalancerTargetState> getLoadBalancerTargets(String loadBalancerId) {
// bind asynchronously to avoid sharing the same BoundStatement across all subscriptions
return Mono.fromCallable(() -> selectTargetsForLoadBalancer.bind(loadBalancerId).setFetchSize(FETCH_SIZE))
.flatMapMany(selectStmt ->
ReactorExt.toFlux(storeHelper.execute(selectStmt))
.timeout(Duration.ofMillis(FETCH_TIMEOUT_MS))
.next()
.flatMapMany(Flux::fromIterable)
.map(CassandraLoadBalancerStore::buildLoadBalancerTargetStateFromRow)
);
}
/**
* Adds a new Job and associated Load Balancer by replacing any current set of associations
* for the Job.
*
* @param association
*/
private void addJobLoadBalancerAssociation(JobLoadBalancer association) {
jobToAssociatedLoadBalancersMap.compute(association.getJobId(),
(jobId, associations) -> {
if (associations == null) {
associations = new TreeSet<>();
}
// Add all of the current associations back, plus the new association
SortedSet<JobLoadBalancer> copy = new TreeSet<>(associations);
copy.add(association);
// Return the new, unmodifiable instance of the set.
return Collections.unmodifiableSortedSet(copy);
}
);
}
/**
* Removes a Job's associated Load Balancer by replacing any current set of associations
* for the Job.
*
* @param association
*/
private void removeJobLoadBalancerAssociation(JobLoadBalancer association) {
Supplier<TreeSet<JobLoadBalancer>> supplier = () -> new TreeSet<>();
jobToAssociatedLoadBalancersMap.computeIfPresent(association.getJobId(),
(jobId, associations) -> {
final SortedSet<JobLoadBalancer> copy = associations.stream()
.filter(entry -> !entry.equals(association))
.collect(Collectors.toCollection(supplier));
if (copy.isEmpty()) {
return null;
}
return Collections.unmodifiableSortedSet(copy);
}
);
}
}
| 1,407 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/test/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/aws/src/test/java/com/netflix/titus/ext/aws/AwsLoadBalancerConnectorTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws;
import java.util.List;
import com.amazonaws.auth.profile.ProfileCredentialsProvider;
import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancingAsync;
import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancingAsyncClientBuilder;
import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroupAssociationLimitException;
import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroupNotFoundException;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.titus.api.connector.cloud.CloudConnectorException;
import com.netflix.titus.api.connector.cloud.LoadBalancer;
import com.netflix.titus.api.connector.cloud.LoadBalancerConnector;
import com.netflix.titus.api.loadbalancer.service.LoadBalancerException;
import com.netflix.titus.common.util.StringExt;
import com.netflix.titus.ext.aws.loadbalancer.AwsLoadBalancerConnector;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import rx.observers.TestSubscriber;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Tests the AWS load balancer connector. Those tests which require AWS credentials (and are thus not portable) should
* be DISABLED.
* <p>
* In order to execute the disabled tests, the AWS resources to test are expected to have been created already and their
* info is consumed via local credential files.
*/
public class AwsLoadBalancerConnectorTest {
private static final String REGION = "us-east-1";
private static final String DEFAULT_ARN = "arn:aws:elasticloadbalancing:us-east-2:123456789012:loadbalancer/app/my-load-balancer/1234567890123456";
private LoadBalancerConnector awsLoadBalancerConnector;
private final String validIpTargetGroup = StringExt.getNonEmptyOrDefault(System.getenv("validTargetGroup"), DEFAULT_ARN);
private final String invalidIpTargetGroup = StringExt.getNonEmptyOrDefault(System.getenv("invalidTargetGroup"), DEFAULT_ARN);
private final String nonExistentTarget = StringExt.getNonEmptyOrDefault(System.getenv("nonExistentTargetGroup"), DEFAULT_ARN);
private final String targetGroupWithTargets = StringExt.getNonEmptyOrDefault(System.getenv("targetGroupWithTargets"), DEFAULT_ARN);
@Before
public void setUp() {
ProfileCredentialsProvider credentialsProvider = new ProfileCredentialsProvider();
AmazonElasticLoadBalancingAsync albClient = AmazonElasticLoadBalancingAsyncClientBuilder.standard()
.withCredentials(credentialsProvider)
.withRegion(REGION)
.build();
awsLoadBalancerConnector = getAwsLoadBalancerConnector(albClient);
}
@Ignore("AWS dependencies")
@Test
public void validateIpTargetGroupTest() {
TestSubscriber testSubscriber = new TestSubscriber();
awsLoadBalancerConnector.isValid(validIpTargetGroup).subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent();
testSubscriber.assertCompleted();
testSubscriber.assertNoErrors();
}
@Ignore("AWS dependencies")
@Test
public void validateInstanceTargetGroupTest() {
TestSubscriber testSubscriber = new TestSubscriber();
awsLoadBalancerConnector.isValid(invalidIpTargetGroup).subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent();
testSubscriber.assertError(CloudConnectorException.class);
}
@Ignore("AWS dependencies")
@Test
public void validateNonExistentTargetGroupTest() {
TestSubscriber testSubscriber = new TestSubscriber();
awsLoadBalancerConnector.isValid(nonExistentTarget).subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent();
testSubscriber.assertError(TargetGroupNotFoundException.class);
}
@Ignore("AWS dependencies")
@Test
public void testGetIpTargets() {
TestSubscriber testSubscriber = new TestSubscriber();
awsLoadBalancerConnector.getLoadBalancer(targetGroupWithTargets).subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent();
testSubscriber.assertCompleted();
testSubscriber.assertNoErrors();
LoadBalancer loadBalancer = (LoadBalancer) testSubscriber.getOnNextEvents().get(0);
assertEquals(targetGroupWithTargets, loadBalancer.getId());
assertThat(loadBalancer.getRegisteredIps()).isNotEmpty();
}
@Test
public void validateTargetGroupNotFoundExceptionIsTranslatedToRemovedState() {
TestSubscriber testSubscriber = new TestSubscriber();
AmazonElasticLoadBalancingAsync albClient = mock(AmazonElasticLoadBalancingAsync.class);
when(albClient.describeTargetHealthAsync(any(), any())).thenThrow(TargetGroupNotFoundException.class);
awsLoadBalancerConnector = getAwsLoadBalancerConnector(albClient);
awsLoadBalancerConnector.getLoadBalancer(targetGroupWithTargets).subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent();
testSubscriber.assertNoErrors();
testSubscriber.assertValueCount(1);
LoadBalancer loadBalancer = (LoadBalancer) testSubscriber.getOnNextEvents().get(0);
assertEquals(targetGroupWithTargets, loadBalancer.getId());
assertEquals(LoadBalancer.State.REMOVED, loadBalancer.getState());
}
@Test
public void validateExceptionsAreUnmodifiedWithMockClientTest() {
Class defaultExceptionClass = TargetGroupAssociationLimitException.class;
TestSubscriber testSubscriber = new TestSubscriber();
AmazonElasticLoadBalancingAsync albClient = mock(AmazonElasticLoadBalancingAsync.class);
when(albClient.describeTargetHealthAsync(any(), any())).thenThrow(defaultExceptionClass);
awsLoadBalancerConnector = getAwsLoadBalancerConnector(albClient);
awsLoadBalancerConnector.getLoadBalancer(targetGroupWithTargets).subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent();
List<Throwable> errors = testSubscriber.getOnErrorEvents();
assertEquals(1, errors.size());
Throwable throwable = errors.get(0);
assertFalse(throwable instanceof LoadBalancerException);
assertTrue(throwable instanceof TargetGroupAssociationLimitException);
}
private AwsLoadBalancerConnector getAwsLoadBalancerConnector(AmazonElasticLoadBalancingAsync albClient) {
AmazonClientProvider amazonClientProvider = mock(AmazonClientProvider.class);
when(amazonClientProvider.getLoadBalancingClient(any())).thenReturn(albClient);
return new AwsLoadBalancerConnector(amazonClientProvider, new DefaultRegistry());
}
}
| 1,408 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/test/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/aws/src/test/java/com/netflix/titus/ext/aws/AwsIamUtilTest.java | /*
*
* * Copyright 2019 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package com.netflix.titus.ext.aws;
import com.netflix.titus.api.iam.model.IamRole;
import com.netflix.titus.ext.aws.iam.AwsIamUtil;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class AwsIamUtilTest {
/**
* The AWS assume policy is provided by AWS in URL encoded format. The decoded JSON looks like:
* {"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"Service":"ec2.amazonaws.com"},"Action":"sts:AssumeRole"},{"Effect":"Allow","Principal":{"AWS":["AROAIBCLYUHWQPCHAZC34","arn:aws:iam::123456789012:role/myAssumableRole","AROAIBCLYUHWQPCHAZC34"]},"Action":"sts:AssumeRole"}]}
*/
private static final String VALID_ASSUME_POLICY_DOC = "%7B%22Version%22%3A%222012-10-17%22%2C%22Statement%22%3A%5B%7B%22Effect%22%3A%22Allow%22%2C%22Principal%22%3A%7B%22Service%22%3A%22ec2.amazonaws.com%22%7D%2C%22Action%22%3A%22sts%3AAssumeRole%22%7D%2C%7B%22Effect%22%3A%22Allow%22%2C%22Principal%22%3A%7B%22AWS%22%3A%5B%22AIDAJQABLZS4A3QDU576Q%22%2C%22arn%3Aaws%3Aiam%3A%3A123456789012%3Arole%2FmyAssumableRole%22%2C%22AIDAJQABLZS4A3QDU576Q%22%5D%7D%2C%22Action%22%3A%22sts%3AAssumeRole%22%7D%5D%7D";
private static final String INVALID_ASSUME_POLICY_DOC = "%7B%22Version%22%3A%222012-10-17%22%2C%22Statement%22%3A%5B%7B%22Effect%22%3A%22Allow%22%2C%22Principal%22%3A%7B%22Service%22%3A%22ec2.amazonaws.com%22%7D%2C%22Action%22%3A%22sts%3AAssumeRole%22%7D%2C%7B%22Effect%22%3A%22Allow%22%2C%22Principal%22%3A%7B%22AWS%22%3A%5B%22AIDAJQABLZS4A3QDU576Q%22%2C%22arn%3Aaws%3Aiam%3A%3A123456789012%3Arole%2FmyAssumableRole%22%2C%22AIDAJQABLZS4A3QDU576Q%22%5D%7D%2C%22Action%22%3A%22";
private static final String ASSUMABLE_ROLE = "myAssumableRole";
private static final String UNASSUMABLE_ROLE = "myUnassumableRole";
private static final IamRole VALID_IAM_ROLE = IamRole.newBuilder()
.withRoleId("AIDAJQABLZS4A3QDU576Q")
.withRoleName("validRoleName")
.withResourceName("arn:aws:iam::123456789012:role/validRoleName")
.withPolicyDoc(VALID_ASSUME_POLICY_DOC)
.build();
private static final IamRole INVALID_IAM_ROLE = IamRole.newBuilder()
.withRoleId("AIDAJQABLZS4A3QDU576Q")
.withRoleName("invalidRoleName")
.withResourceName("arn:aws:iam::123456789012:role/invalidRoleName")
.withPolicyDoc(INVALID_ASSUME_POLICY_DOC)
.build();
@Test
public void validateAssumablePolicyTest() {
assertThat(AwsIamUtil.canAssume(VALID_IAM_ROLE, ASSUMABLE_ROLE)).isTrue();
}
@Test
public void validateUnassumablePolicyTest() {
assertThat(AwsIamUtil.canAssume(VALID_IAM_ROLE, UNASSUMABLE_ROLE)).isFalse();
}
@Test
public void validateInvalidJsonPolicyTest() {
assertThat(AwsIamUtil.canAssume(INVALID_IAM_ROLE, ASSUMABLE_ROLE)).isFalse();
}
}
| 1,409 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/test/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/aws/src/test/java/com/netflix/titus/ext/aws/AwsObservableExtTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws;
import java.util.concurrent.Future;
import java.util.concurrent.FutureTask;
import com.amazonaws.AmazonWebServiceRequest;
import com.amazonaws.handlers.AsyncHandler;
import org.junit.Test;
import rx.Completable;
import rx.Single;
import rx.observers.AssertableSubscriber;
import rx.schedulers.Schedulers;
import rx.schedulers.TestScheduler;
import static org.junit.Assert.assertEquals;
public class AwsObservableExtTest {
@Test
public void asyncActionCompletable() throws Exception {
AmazonWebServiceRequest someRequest = AmazonWebServiceRequest.NOOP;
final MockAsyncClient<AmazonWebServiceRequest, String> client = new MockAsyncClient<>(someRequest, "some response");
final Completable completable = AwsObservableExt.asyncActionCompletable(factory -> client.someAsyncOperation(factory.handler(
(req, resp) -> {
assertEquals(someRequest, req);
assertEquals("some response", resp);
},
(t) -> {
throw new IllegalStateException("Should never be here");
}
)));
TestScheduler testScheduler = Schedulers.test();
final AssertableSubscriber<Void> subscriber = completable.subscribeOn(testScheduler).test();
testScheduler.triggerActions();
subscriber.assertNotCompleted();
client.run();
testScheduler.triggerActions();
subscriber.assertCompleted();
}
@Test
public void asyncActionCompletableErrors() {
AmazonWebServiceRequest someRequest = AmazonWebServiceRequest.NOOP;
final MockAsyncClient<AmazonWebServiceRequest, String> client = new MockAsyncClient<>(someRequest, "some response");
RuntimeException exception = new RuntimeException("error when initiating an async operation");
final Completable completable = AwsObservableExt.asyncActionCompletable(factory -> client.throwException(exception));
TestScheduler testScheduler = Schedulers.test();
final AssertableSubscriber<Void> subscriber = completable.subscribeOn(testScheduler).test();
testScheduler.triggerActions();
subscriber.assertError(exception);
}
@Test
public void asyncActionSingle() throws Exception {
AmazonWebServiceRequest someRequest = AmazonWebServiceRequest.NOOP;
final MockAsyncClient<AmazonWebServiceRequest, String> client = new MockAsyncClient<>(someRequest, "some response");
Single<String> single = AwsObservableExt.asyncActionSingle(supplier -> client.someAsyncOperation(supplier.handler()));
TestScheduler testScheduler = Schedulers.test();
final AssertableSubscriber<String> subscriber = single.subscribeOn(testScheduler).test();
testScheduler.triggerActions();
subscriber.assertNoValues();
subscriber.assertNotCompleted();
client.run();
testScheduler.triggerActions();
subscriber.assertValueCount(1);
subscriber.assertValue("some response");
subscriber.assertCompleted();
}
@Test
public void asyncActionSingleErrors() {
AmazonWebServiceRequest someRequest = AmazonWebServiceRequest.NOOP;
final MockAsyncClient<AmazonWebServiceRequest, String> client = new MockAsyncClient<>(someRequest, "some response");
RuntimeException exception = new RuntimeException("error when initiating an async operation");
final Single<String> completable = AwsObservableExt.asyncActionSingle(supplier -> client.throwException(exception));
TestScheduler testScheduler = Schedulers.test();
final AssertableSubscriber<String> subscriber = completable.subscribeOn(testScheduler).test();
testScheduler.triggerActions();
subscriber.assertError(exception);
}
private class MockAsyncClient<REQ extends AmazonWebServiceRequest, RES> {
private final REQ request;
private final RES response;
private volatile FutureTask<RES> futureTask;
private MockAsyncClient(REQ request, RES response) {
this.request = request;
this.response = response;
}
Future<RES> someAsyncOperation(AsyncHandler<? super REQ, RES> handler) {
futureTask = new FutureTask<RES>(() -> {
handler.onSuccess(request, response);
return response;
});
return futureTask;
}
Future<RES> throwException(RuntimeException t) {
throw t;
}
private void run() {
futureTask.run();
}
}
}
| 1,410 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/test/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/test/java/com/netflix/titus/ext/aws/cloudwatch/CloudWatchClientTest.java | package com.netflix.titus.ext.aws.cloudwatch;
import java.util.Arrays;
import java.util.List;
import com.amazonaws.services.cloudwatch.model.Dimension;
import com.netflix.titus.api.appscale.model.AlarmConfiguration;
import com.netflix.titus.api.appscale.model.ComparisonOperator;
import com.netflix.titus.api.appscale.model.MetricDimension;
import com.netflix.titus.api.appscale.model.Statistic;
import org.junit.Test;
import static org.assertj.core.api.AssertionsForClassTypes.assertThat;
public class CloudWatchClientTest {
private AlarmConfiguration.Builder getAlarmConfigBuilder() {
return AlarmConfiguration.newBuilder()
.withName("alarm-config-1")
.withMetricName("metric-1")
.withMetricNamespace("standard")
.withStatistic(Statistic.Average)
.withActionsEnabled(true)
.withPeriodSec(60)
.withThreshold(5)
.withEvaluationPeriods(2)
.withComparisonOperator(ComparisonOperator.GreaterThanOrEqualToThreshold);
}
@Test
public void buildDefaultMetricDimensions() {
AlarmConfiguration alarmConfiguration = getAlarmConfigBuilder().build();
List<Dimension> dimensions = CloudWatchClient.buildMetricDimensions(alarmConfiguration, "foo-bar");
assertThat(dimensions).isNotNull();
assertThat(dimensions.size()).isEqualTo(1);
assertThat(dimensions.get(0).getName()).isEqualTo("AutoScalingGroupName");
assertThat(dimensions.get(0).getValue()).isEqualTo("foo-bar");
}
@Test
public void buildCustomMetricDimensions() {
MetricDimension md1 = MetricDimension.newBuilder().withName("foo").withValue("bar").build();
MetricDimension md2 = MetricDimension.newBuilder().withName("service-tier").withValue("1").build();
List<MetricDimension> customMetricDimensions = Arrays.asList(md1, md2);
AlarmConfiguration alarmConfiguration = getAlarmConfigBuilder().withDimensions(customMetricDimensions).build();
List<Dimension> dimensions = CloudWatchClient.buildMetricDimensions(alarmConfiguration, "foo-bar");
assertThat(dimensions).isNotNull();
assertThat(dimensions.size()).isEqualTo(2);
assertThat(dimensions.get(0).getName()).isEqualTo("foo");
assertThat(dimensions.get(0).getValue()).isEqualTo("bar");
assertThat(dimensions.get(1).getName()).isEqualTo("service-tier");
assertThat(dimensions.get(1).getValue()).isEqualTo("1");
}
} | 1,411 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/test/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/test/java/com/netflix/titus/ext/aws/appscale/AWSAppAutoScalingClientTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws.appscale;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import com.amazonaws.handlers.AsyncHandler;
import com.amazonaws.http.HttpResponse;
import com.amazonaws.http.SdkHttpMetadata;
import com.amazonaws.services.applicationautoscaling.AWSApplicationAutoScalingAsync;
import com.amazonaws.services.applicationautoscaling.model.DeleteScalingPolicyRequest;
import com.amazonaws.services.applicationautoscaling.model.DeleteScalingPolicyResult;
import com.amazonaws.services.applicationautoscaling.model.DeregisterScalableTargetRequest;
import com.amazonaws.services.applicationautoscaling.model.DeregisterScalableTargetResult;
import com.amazonaws.services.applicationautoscaling.model.ObjectNotFoundException;
import com.netflix.spectator.api.NoopRegistry;
import javaslang.concurrent.Future;
import org.junit.Test;
import rx.observers.AssertableSubscriber;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class AWSAppAutoScalingClientTest {
@Test
public void deleteScalingPolicyIsIdempotent() {
String jobId = UUID.randomUUID().toString();
String policyId = UUID.randomUUID().toString();
AWSApplicationAutoScalingAsync clientAsync = mock(AWSApplicationAutoScalingAsync.class);
AWSAppScalingConfig config = mock(AWSAppScalingConfig.class);
AWSAppAutoScalingClient autoScalingClient = new AWSAppAutoScalingClient(clientAsync, config, new NoopRegistry());
// delete happens successfully on the first attempt
AtomicBoolean isDeleted = new AtomicBoolean(false);
when(clientAsync.deleteScalingPolicyAsync(any(), any())).thenAnswer(invocation -> {
DeleteScalingPolicyRequest request = invocation.getArgument(0);
AsyncHandler<DeleteScalingPolicyRequest, DeleteScalingPolicyResult> handler = invocation.getArgument(1);
if (isDeleted.get()) {
ObjectNotFoundException notFoundException = new ObjectNotFoundException(policyId + " does not exist");
handler.onError(notFoundException);
return Future.failed(notFoundException);
}
DeleteScalingPolicyResult resultSuccess = new DeleteScalingPolicyResult();
HttpResponse successResponse = new HttpResponse(null, null);
successResponse.setStatusCode(200);
resultSuccess.setSdkHttpMetadata(SdkHttpMetadata.from(successResponse));
isDeleted.set(true);
handler.onSuccess(request, resultSuccess);
return Future.successful(resultSuccess);
});
AssertableSubscriber<Void> firstCall = autoScalingClient.deleteScalingPolicy(policyId, jobId).test();
firstCall.awaitTerminalEvent(2, TimeUnit.SECONDS);
firstCall.assertNoErrors();
firstCall.assertCompleted();
verify(clientAsync, times(1)).deleteScalingPolicyAsync(any(), any());
// second should complete fast when NotFound and not retry with exponential backoff
AssertableSubscriber<Void> secondCall = autoScalingClient.deleteScalingPolicy(policyId, jobId).test();
secondCall.awaitTerminalEvent(2, TimeUnit.SECONDS);
secondCall.assertNoErrors();
secondCall.assertCompleted();
verify(clientAsync, times(2)).deleteScalingPolicyAsync(any(), any());
}
@Test
public void deleteScalableTargetIsIdempotent() {
String jobId = UUID.randomUUID().toString();
String policyId = UUID.randomUUID().toString();
AWSApplicationAutoScalingAsync clientAsync = mock(AWSApplicationAutoScalingAsync.class);
AWSAppScalingConfig config = mock(AWSAppScalingConfig.class);
AWSAppAutoScalingClient autoScalingClient = new AWSAppAutoScalingClient(clientAsync, config, new NoopRegistry());
AtomicBoolean isDeleted = new AtomicBoolean(false);
when(clientAsync.deregisterScalableTargetAsync(any(), any())).thenAnswer(invocation -> {
DeregisterScalableTargetRequest request = invocation.getArgument(0);
AsyncHandler<DeregisterScalableTargetRequest, DeregisterScalableTargetResult> handler = invocation.getArgument(1);
if (isDeleted.get()) {
ObjectNotFoundException notFoundException = new ObjectNotFoundException(policyId + " does not exist");
handler.onError(notFoundException);
return Future.failed(notFoundException);
}
DeregisterScalableTargetResult resultSuccess = new DeregisterScalableTargetResult();
HttpResponse successResponse = new HttpResponse(null, null);
successResponse.setStatusCode(200);
resultSuccess.setSdkHttpMetadata(SdkHttpMetadata.from(successResponse));
isDeleted.set(true);
handler.onSuccess(request, resultSuccess);
return Future.successful(resultSuccess);
});
AssertableSubscriber<Void> firstCall = autoScalingClient.deleteScalableTarget(jobId).test();
firstCall.awaitTerminalEvent(2, TimeUnit.SECONDS);
firstCall.assertNoErrors();
firstCall.assertCompleted();
verify(clientAsync, times(1)).deregisterScalableTargetAsync(any(), any());
// second should complete fast when NotFound and not retry with exponential backoff
AssertableSubscriber<Void> secondCall = autoScalingClient.deleteScalableTarget(jobId).test();
secondCall.awaitTerminalEvent(2, TimeUnit.SECONDS);
secondCall.assertNoErrors();
secondCall.assertCompleted();
verify(clientAsync, times(2)).deregisterScalableTargetAsync(any(), any());
}
}
| 1,412 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/test/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/test/java/com/netflix/titus/ext/aws/appscale/AWSAppAutoScalingUtilTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws.appscale;
import org.junit.Test;
import static org.assertj.core.api.Java6Assertions.assertThat;
public class AWSAppAutoScalingUtilTest {
@Test
public void testGatewayResourceId() {
final String gatewayResourceId = AWSAppAutoScalingUtil.buildGatewayResourceId(
"job1", "Titus", "us-east-1", "stage1");
assertThat(gatewayResourceId).isEqualTo("https://Titus.execute-api.us-east-1.amazonaws.com/stage1/scalableTargetDimensions/job1");
}
@Test
public void testPolicyName() {
final String policyName = AWSAppAutoScalingUtil.buildScalingPolicyName("policy1", "job1");
assertThat(policyName).isEqualTo("job1/policy1");
}
}
| 1,413 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/test/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/test/java/com/netflix/titus/ext/aws/appscale/AppAutoScalingCallbackSpringResourceTest.java | package com.netflix.titus.ext.aws.appscale;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import com.netflix.titus.runtime.endpoint.metadata.spring.CallMetadataAuthentication;
import org.junit.Test;
import org.springframework.http.ResponseEntity;
import rx.Observable;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class AppAutoScalingCallbackSpringResourceTest {
@Test
public void testResponseEntity() {
AppAutoScalingCallbackService appAutoScalingCallbackService = mock(AppAutoScalingCallbackService.class);
String jobId = "job-1";
ScalableTargetResourceInfo scalableTargetResourceInfo = ScalableTargetResourceInfo.newBuilder().actualCapacity(4).desiredCapacity(6).scalingStatus(
DefaultAppAutoScalingCallbackService.ScalingStatus.Pending.name()).build();
CallMetadata callMetadata = CallMetadata.newBuilder().withCallerId("unit-testing").build();
CallMetadataAuthentication callMetadataAuthentication = mock(CallMetadataAuthentication.class);
when(callMetadataAuthentication.getCallMetadata()).thenReturn(callMetadata);
when(appAutoScalingCallbackService.setScalableTargetResourceInfo(jobId, scalableTargetResourceInfo, callMetadata))
.thenReturn(Observable.just(scalableTargetResourceInfo));
AppAutoScalingCallbackSpringResource springResource = new AppAutoScalingCallbackSpringResource(appAutoScalingCallbackService);
ResponseEntity<ScalableTargetResourceInfo> resp = springResource.setScalableTargetResourceInfo(jobId,
scalableTargetResourceInfo, callMetadataAuthentication);
assertThat(resp).isNotNull();
assertThat(resp.getStatusCodeValue()).isEqualTo(200);
assertThat(resp.getBody()).isEqualTo(scalableTargetResourceInfo);
scalableTargetResourceInfo.setDesiredCapacity(-1);
ResponseEntity<ScalableTargetResourceInfo> resp2 = springResource.setScalableTargetResourceInfo(jobId,
scalableTargetResourceInfo, callMetadataAuthentication);
assertThat(resp2).isNotNull();
assertThat(resp2.getStatusCodeValue()).isEqualTo(400);
}
} | 1,414 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/test/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/test/java/com/netflix/titus/ext/aws/supervisor/AsgLocalMasterReadinessResolverTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws.supervisor;
import java.time.Duration;
import java.util.Collections;
import java.util.Iterator;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import com.amazonaws.handlers.AsyncHandler;
import com.amazonaws.services.autoscaling.AmazonAutoScalingAsync;
import com.amazonaws.services.autoscaling.model.AutoScalingGroup;
import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsRequest;
import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsResult;
import com.amazonaws.services.autoscaling.model.TagDescription;
import com.google.common.util.concurrent.Futures;
import com.netflix.titus.api.supervisor.model.ReadinessState;
import com.netflix.titus.api.supervisor.model.ReadinessStatus;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.ext.aws.AwsConfiguration;
import org.junit.Before;
import org.junit.Test;
import reactor.core.scheduler.Schedulers;
import static com.jayway.awaitility.Awaitility.await;
import static com.netflix.titus.ext.aws.supervisor.AsgLocalMasterReadinessResolver.REFRESH_SCHEDULER_DESCRIPTOR;
import static com.netflix.titus.ext.aws.supervisor.AsgLocalMasterReadinessResolver.TAG_MASTER_ENABLED;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class AsgLocalMasterReadinessResolverTest {
private static final String ERROR_MARKER = "errorMarker";
private static final RuntimeException SIMULATED_ERROR = new RuntimeException("simulated error");
private final TitusRuntime titusRuntime = TitusRuntimes.internal();
private final AwsConfiguration configuration = mock(AwsConfiguration.class);
private final AmazonAutoScalingAsync autoScalingClient = mock(AmazonAutoScalingAsync.class);
private final AtomicReference<String> currentTagState = new AtomicReference<>();
private final AtomicInteger invocationCounter = new AtomicInteger();
private AsgLocalMasterReadinessResolver resolver;
@Before
public void setUp() {
when(configuration.getTitusMasterAsgName()).thenReturn("MasterAsg");
when(autoScalingClient.describeAutoScalingGroupsAsync(any(), any())).thenAnswer(invocation -> {
DescribeAutoScalingGroupsRequest request = invocation.getArgument(0);
AsyncHandler asyncHandler = invocation.getArgument(1);
if (ERROR_MARKER.equals(currentTagState.get())) {
asyncHandler.onError(SIMULATED_ERROR);
invocationCounter.incrementAndGet();
return Futures.immediateFailedFuture(SIMULATED_ERROR);
}
DescribeAutoScalingGroupsResult response = new DescribeAutoScalingGroupsResult();
AutoScalingGroup autoScalingGroup = new AutoScalingGroup();
if (currentTagState.get() != null) {
TagDescription tag = new TagDescription();
tag.setKey(TAG_MASTER_ENABLED);
tag.setValue(currentTagState.get());
autoScalingGroup.setTags(Collections.singletonList(tag));
}
response.setAutoScalingGroups(Collections.singletonList(autoScalingGroup));
asyncHandler.onSuccess(request, response);
invocationCounter.incrementAndGet();
return Futures.immediateFuture(response);
});
resolver = new AsgLocalMasterReadinessResolver(
configuration,
autoScalingClient,
REFRESH_SCHEDULER_DESCRIPTOR.toBuilder()
.withInitialDelay(Duration.ofMillis(0))
.withInterval(Duration.ofMillis(1))
.build(),
titusRuntime,
Schedulers.parallel()
);
}
@Test
public void testResolve() {
Iterator<ReadinessStatus> it = newStreamInitiallyDisabled();
// Change to NonLeader
currentTagState.set("true");
awaitState(it, ReadinessState.Enabled);
// Change to Inactive
currentTagState.set("false");
awaitState(it, ReadinessState.Disabled);
// Change to NonLeader
currentTagState.set("true");
awaitState(it, ReadinessState.Enabled);
// Remove tag
currentTagState.set(null);
awaitState(it, ReadinessState.Disabled);
}
@Test
public void testInvalidTagValues() {
Iterator<ReadinessStatus> it = newStreamInitiallyDisabled();
// Change to NonLeader
currentTagState.set("true");
awaitState(it, ReadinessState.Enabled);
// Set bad state, and wait until it is read
currentTagState.set("bad_state");
awaitState(it, ReadinessState.Disabled);
// Change to NonLeader to trigger update
currentTagState.set("true");
awaitState(it, ReadinessState.Enabled);
}
@Test
public void testAwsClientError() {
Iterator<ReadinessStatus> it = newStreamInitiallyDisabled();
// Simulate error
int currentCounter = invocationCounter.get();
currentTagState.set(ERROR_MARKER);
await().until(() -> invocationCounter.get() > currentCounter);
// Change to NonLeader to trigger update
currentTagState.set("true");
awaitState(it, ReadinessState.Enabled);
}
private Iterator<ReadinessStatus> newStreamInitiallyDisabled() {
Iterator<ReadinessStatus> it = resolver.observeLocalMasterReadinessUpdates().toIterable().iterator();
assertThat(it.next().getState()).isEqualTo(ReadinessState.NotReady);
assertThat(it.next().getState()).isEqualTo(ReadinessState.Disabled);
return it;
}
private void awaitState(Iterator<ReadinessStatus> it, ReadinessState state) {
await().until(() -> it.next().getState() == state);
}
} | 1,415 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/AmazonIamAsyncProvider.java | /*
*
* * Copyright 2019 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package com.netflix.titus.ext.aws;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Provider;
import javax.inject.Singleton;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.identitymanagement.AmazonIdentityManagementAsync;
import com.amazonaws.services.identitymanagement.AmazonIdentityManagementAsyncClientBuilder;
import com.netflix.spectator.aws.SpectatorRequestMetricCollector;
import com.netflix.titus.common.runtime.TitusRuntime;
@Singleton
public class AmazonIamAsyncProvider implements Provider<AmazonIdentityManagementAsync> {
private final AmazonIdentityManagementAsync amazonIdentityManagementAsync;
@Inject
public AmazonIamAsyncProvider(AwsConfiguration configuration, AWSCredentialsProvider credentialProvider, TitusRuntime runtime) {
String region = configuration.getRegion().trim().toLowerCase();
this.amazonIdentityManagementAsync = AmazonIdentityManagementAsyncClientBuilder.standard()
.withRegion(region)
.withCredentials(credentialProvider)
.withMetricsCollector(new SpectatorRequestMetricCollector(runtime.getRegistry()))
.build();
}
@Override
public AmazonIdentityManagementAsync get() { return amazonIdentityManagementAsync; }
@PreDestroy
public void shutdown() { amazonIdentityManagementAsync.shutdown(); }
}
| 1,416 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/DataPlaneAmazonAutoScalingAsyncProvider.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.autoscaling.AmazonAutoScalingAsync;
import com.netflix.titus.common.runtime.TitusRuntime;
@Singleton
public class DataPlaneAmazonAutoScalingAsyncProvider extends AmazonAutoScalingAsyncProvider {
public static final String NAME = "dataPlaneAmazonAutoScalingClient";
private final AmazonAutoScalingAsync amazonAutoScaling;
@Inject
public DataPlaneAmazonAutoScalingAsyncProvider(AwsConfiguration configuration, AWSCredentialsProvider credentialProvider, TitusRuntime runtime) {
String region = AwsRegionConfigurationUtil.resolveDataPlaneRegion(configuration);
this.amazonAutoScaling = buildAmazonAutoScalingAsyncClient(region, credentialProvider, runtime);
}
@Override
public AmazonAutoScalingAsync get() {
return amazonAutoScaling;
}
@PreDestroy
public void shutdown() {
amazonAutoScaling.shutdown();
}
}
| 1,417 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/DataPlaneControllerCredentialsProvider.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws;
import javax.inject.Inject;
import javax.inject.Provider;
import javax.inject.Singleton;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider;
import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceAsync;
import com.netflix.titus.common.util.StringExt;
@Singleton
public class DataPlaneControllerCredentialsProvider implements Provider<AWSCredentialsProvider> {
public static final String NAME = "dataPlaneControllerCredentials";
private final AwsConfiguration configuration;
private final AWSSecurityTokenServiceAsync stsClient;
private final AWSCredentialsProvider defaultCredentialsProvider;
@Inject
public DataPlaneControllerCredentialsProvider(AwsConfiguration configuration,
AWSSecurityTokenServiceAsync stsClient,
AWSCredentialsProvider defaultCredentialsProvider) {
this.configuration = configuration;
this.stsClient = stsClient;
this.defaultCredentialsProvider = defaultCredentialsProvider;
}
@Override
public AWSCredentialsProvider get() {
String roleArn = configuration.getDataPlaneControllerRoleArn();
if (StringExt.isEmpty(roleArn)) {
return defaultCredentialsProvider;
}
String roleSessionName = configuration.getDataPlaneControllerRoleSessionName();
int roleSessionDurationSeconds = configuration.getDataPlaneControllerRoleSessionDurationSeconds();
return new STSAssumeRoleSessionCredentialsProvider.Builder(roleArn, roleSessionName)
.withStsClient(stsClient)
.withRoleSessionDurationSeconds(roleSessionDurationSeconds)
.build();
}
}
| 1,418 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/AwsConfiguration.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
@Configuration(prefix = "titus.ext.aws")
public interface AwsConfiguration {
/**
* If set, this region value is used when building {@link com.amazonaws.services.ec2.AmazonEC2Client} and
* {@link com.amazonaws.services.autoscaling.AmazonAutoScalingClient}.
*/
String getDataPlaneRegion();
String getRegion();
/**
* Titus master ASG name.
*/
@DefaultValue("TitusMasterAsgName")
String getTitusMasterAsgName();
@DefaultValue("10000")
long getAwsRequestTimeoutMs();
/**
* Interval at which a cleaner process will run to terminate agents with {@link AwsInstanceCloudConnector#TAG_TERMINATE}
* tag on them.
*/
@DefaultValue("600000")
long getReaperIntervalMs();
/**
* Amount of time to cache IAM role records, before making another call to AWS.
*/
@DefaultValue("60000")
long getIamRoleCacheTimeoutMs();
/**
* IAM role associated with agent instances. Agent instances use this role to assume into container provided
* IAM roles.
*/
@DefaultValue("")
String getDataPlaneAgentRoleArn();
@DefaultValue("titusControlPlaneSession")
String getDataPlaneAgentRoleSessionName();
@DefaultValue("3600")
int getDataPlaneAgentRoleSessionDurationSeconds();
/**
* IAM role ARN to assume into to access AWS API for the data plane account. If not set, it is assumed that
* the control plane and the data plane run in the same account and no cross access is required.
*/
@DefaultValue("")
String getDataPlaneControllerRoleArn();
@DefaultValue("titusControlPlaneSession")
String getDataPlaneControllerRoleSessionName();
@DefaultValue("3600")
int getDataPlaneControllerRoleSessionDurationSeconds();
/**
* IAM role name the control plane uses to manage aws resources such as load balancers.
*/
@DefaultValue("TitusControlPlaneRole")
String getControlPlaneRoleName();
@DefaultValue("titusControlPlaneSession")
String getControlPlaneRoleSessionName();
@DefaultValue("3600")
int getControlPlaneRoleSessionDurationSeconds();
}
| 1,419 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/DataPlaneAgentCredentialsProvider.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws;
import javax.inject.Inject;
import javax.inject.Provider;
import javax.inject.Singleton;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider;
import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceAsync;
import com.netflix.titus.common.util.StringExt;
@Singleton
public class DataPlaneAgentCredentialsProvider implements Provider<AWSCredentialsProvider> {
public static final String NAME = "dataPlaneAgentCredentials";
private final AwsConfiguration configuration;
private final AWSSecurityTokenServiceAsync stsClient;
private final AWSCredentialsProvider defaultCredentialsProvider;
@Inject
public DataPlaneAgentCredentialsProvider(AwsConfiguration configuration,
AWSSecurityTokenServiceAsync stsClient,
AWSCredentialsProvider defaultCredentialsProvider) {
this.configuration = configuration;
this.stsClient = stsClient;
this.defaultCredentialsProvider = defaultCredentialsProvider;
}
@Override
public AWSCredentialsProvider get() {
String roleArn = configuration.getDataPlaneAgentRoleArn();
if (StringExt.isEmpty(roleArn)) {
return defaultCredentialsProvider;
}
String roleSessionName = configuration.getDataPlaneAgentRoleSessionName();
int roleSessionDurationSeconds = configuration.getDataPlaneAgentRoleSessionDurationSeconds();
return new STSAssumeRoleSessionCredentialsProvider.Builder(roleArn, roleSessionName)
.withStsClient(stsClient)
.withRoleSessionDurationSeconds(roleSessionDurationSeconds)
.build();
}
}
| 1,420 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/AmazonAutoScalingProvider.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Provider;
import javax.inject.Singleton;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.client.builder.AwsClientBuilder;
import com.amazonaws.services.autoscaling.AmazonAutoScaling;
import com.amazonaws.services.autoscaling.AmazonAutoScalingAsyncClient;
import com.amazonaws.services.autoscaling.AmazonAutoScalingAsyncClientBuilder;
import com.netflix.spectator.aws.SpectatorRequestMetricCollector;
import com.netflix.titus.common.runtime.TitusRuntime;
@Singleton
class AmazonAutoScalingProvider implements Provider<AmazonAutoScaling> {
private final AmazonAutoScaling amazonAutoScaling;
@Inject
public AmazonAutoScalingProvider(AwsConfiguration coreConfiguration,
AWSCredentialsProvider credentialProvider,
TitusRuntime runtime) {
String region = coreConfiguration.getRegion().trim().toLowerCase();
// TODO We need both sync and async versions. Remove casting once sync is no longer needed.
AmazonAutoScalingAsyncClient amazonAutoScalingClient = (AmazonAutoScalingAsyncClient) AmazonAutoScalingAsyncClientBuilder.standard()
.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration("autoscaling." + region + ".amazonaws.com", region))
.withCredentials(credentialProvider)
.withMetricsCollector(new SpectatorRequestMetricCollector(runtime.getRegistry()))
.build();
this.amazonAutoScaling = amazonAutoScalingClient;
}
@Override
public AmazonAutoScaling get() {
return amazonAutoScaling;
}
@PreDestroy
public void shutdown() {
amazonAutoScaling.shutdown();
}
}
| 1,421 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/AwsModule.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws;
import javax.inject.Named;
import javax.inject.Singleton;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.autoscaling.AmazonAutoScaling;
import com.amazonaws.services.autoscaling.AmazonAutoScalingAsync;
import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancingAsync;
import com.amazonaws.services.identitymanagement.AmazonIdentityManagementAsync;
import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceAsync;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
import com.google.inject.name.Names;
import com.netflix.archaius.ConfigProxyFactory;
import com.netflix.titus.api.connector.cloud.IamConnector;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.ext.aws.appscale.AWSAppScalingConfig;
import com.netflix.titus.ext.aws.iam.AwsIamConnector;
public class AwsModule extends AbstractModule {
@Override
protected void configure() {
bind(AWSSecurityTokenServiceAsync.class).toProvider(AmazonStsAsyncProvider.class);
bind(AmazonElasticLoadBalancingAsync.class).toProvider(AmazonElasticLoadBalancingAsyncProvider.class);
bind(AmazonAutoScaling.class).toProvider(AmazonAutoScalingProvider.class);
bind(AmazonIdentityManagementAsync.class).toProvider(AmazonIamAsyncProvider.class);
bind(AmazonAutoScalingAsync.class).annotatedWith(Names.named(DataPlaneAmazonAutoScalingAsyncProvider.NAME))
.toProvider(DataPlaneAmazonAutoScalingAsyncProvider.class);
bind(AmazonAutoScalingAsync.class).annotatedWith(Names.named(ControlPlaneAmazonAutoScalingAsyncProvider.NAME))
.toProvider(ControlPlaneAmazonAutoScalingAsyncProvider.class);
bind(AWSCredentialsProvider.class)
.annotatedWith(Names.named(DataPlaneControllerCredentialsProvider.NAME))
.toProvider(DataPlaneControllerCredentialsProvider.class);
bind(AWSCredentialsProvider.class)
.annotatedWith(Names.named(DataPlaneAgentCredentialsProvider.NAME))
.toProvider(DataPlaneAgentCredentialsProvider.class);
}
@Provides
@Singleton
public AwsConfiguration getAwsConfiguration(ConfigProxyFactory factory) {
return factory.newProxy(AwsConfiguration.class);
}
@Provides
@Singleton
public AWSAppScalingConfig getAWSAppScalingConfig(ConfigProxyFactory factory) {
return factory.newProxy(AWSAppScalingConfig.class);
}
@Provides
@Singleton
public IamConnector getIamConnector(
AwsConfiguration configuration,
AmazonIdentityManagementAsync iamClient,
@Named(DataPlaneAgentCredentialsProvider.NAME) AWSCredentialsProvider agentAssumedCredentials,
TitusRuntime titusRuntime) {
return new AwsIamConnector(
configuration,
iamClient,
new AmazonStsAsyncProvider(configuration, agentAssumedCredentials, titusRuntime).get(),
titusRuntime.getRegistry()
);
}
}
| 1,422 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/RetryWrapper.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import static com.netflix.titus.common.util.rx.RetryHandlerBuilder.retryHandler;
public class RetryWrapper {
private static final int RETRY_COUNT = 3;
private static final int RETRY_DELAY_SECONDS = 2;
public static <T> Observable<T> wrapWithExponentialRetry(String retryHandlerTitle, Observable<T> sourceObservable) {
return wrapWithExponentialRetry(retryHandlerTitle, RETRY_COUNT, RETRY_DELAY_SECONDS, sourceObservable);
}
public static <T> Observable<T> wrapWithExponentialRetry(String retryHandlerTitle, int retryCount, int retryDelaySeconds,
Observable<T> sourceObservable) {
return sourceObservable.retryWhen(
retryHandler()
.withTitle(retryHandlerTitle)
.withRetryCount(retryCount)
.withRetryDelay(retryDelaySeconds, TimeUnit.SECONDS)
.buildExponentialBackoff());
}
}
| 1,423 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/AwsReactorExt.java | /*
*
* * Copyright 2019 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package com.netflix.titus.ext.aws;
import java.util.concurrent.Future;
import java.util.function.BiFunction;
import java.util.function.Supplier;
import com.amazonaws.AmazonWebServiceRequest;
import com.amazonaws.handlers.AsyncHandler;
import reactor.core.publisher.Mono;
public class AwsReactorExt {
public static <REQUEST extends AmazonWebServiceRequest, RESPONSE> Mono<RESPONSE> toMono(
Supplier<REQUEST> request,
BiFunction<REQUEST, AsyncHandler<REQUEST, RESPONSE>, Future<RESPONSE>> callFun
) {
return Mono.create(emitter -> {
AsyncHandler<REQUEST, RESPONSE> asyncHandler = new AsyncHandler<REQUEST, RESPONSE>() {
@Override
public void onError(Exception exception) { emitter.error(exception); }
@Override
public void onSuccess(REQUEST request, RESPONSE result) { emitter.success(result); }
};
Future<RESPONSE> future = callFun.apply(request.get(), asyncHandler);
emitter.onDispose(() -> {
if (!future.isCancelled() && !future.isDone()) {
future.cancel(true);
}
});
});
}
public static <REQUEST extends AmazonWebServiceRequest, RESPONSE> Mono<RESPONSE> toMono(
REQUEST request,
BiFunction<REQUEST, AsyncHandler<REQUEST, RESPONSE>, Future<RESPONSE>> callFun
) {
Supplier<REQUEST> supplier = () -> request;
return toMono(supplier, callFun);
}
}
| 1,424 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/ControlPlaneAmazonAutoScalingAsyncProvider.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.autoscaling.AmazonAutoScalingAsync;
import com.netflix.titus.common.runtime.TitusRuntime;
@Singleton
public class ControlPlaneAmazonAutoScalingAsyncProvider extends AmazonAutoScalingAsyncProvider {
public static final String NAME = "controlPlaneAmazonAutoScalingClient";
private final AmazonAutoScalingAsync amazonAutoScaling;
@Inject
public ControlPlaneAmazonAutoScalingAsyncProvider(AwsConfiguration configuration, AWSCredentialsProvider credentialProvider, TitusRuntime runtime) {
String region = configuration.getRegion();
this.amazonAutoScaling = buildAmazonAutoScalingAsyncClient(region, credentialProvider, runtime);
}
@Override
public AmazonAutoScalingAsync get() {
return amazonAutoScaling;
}
@PreDestroy
public void shutdown() {
amazonAutoScaling.shutdown();
}
}
| 1,425 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/AmazonStsAsyncProvider.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws;
import javax.inject.Inject;
import javax.inject.Provider;
import javax.inject.Singleton;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.client.builder.AwsClientBuilder;
import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceAsync;
import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceAsyncClientBuilder;
import com.netflix.spectator.aws.SpectatorRequestMetricCollector;
import com.netflix.titus.common.runtime.TitusRuntime;
@Singleton
public class AmazonStsAsyncProvider implements Provider<AWSSecurityTokenServiceAsync> {
private final AWSSecurityTokenServiceAsync amazonStsAsync;
@Inject
public AmazonStsAsyncProvider(AwsConfiguration configuration, AWSCredentialsProvider credentialProvider, TitusRuntime runtime) {
String region = configuration.getRegion().trim().toLowerCase();
this.amazonStsAsync = AWSSecurityTokenServiceAsyncClientBuilder.standard()
.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration("sts." + region + ".amazonaws.com", region))
.withCredentials(credentialProvider)
.withMetricsCollector(new SpectatorRequestMetricCollector(runtime.getRegistry()))
.build();
}
@Override
public AWSSecurityTokenServiceAsync get() {
return amazonStsAsync;
}
}
| 1,426 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/AwsRegionConfigurationUtil.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws;
import com.netflix.titus.common.util.StringExt;
public class AwsRegionConfigurationUtil {
public static String resolveDataPlaneRegion(AwsConfiguration configuration) {
String region = configuration.getDataPlaneRegion().trim().toLowerCase();
if (StringExt.isEmpty(region)) {
region = configuration.getRegion().trim().toLowerCase();
}
return region;
}
}
| 1,427 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/AmazonAutoScalingAsyncProvider.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws;
import javax.inject.Provider;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.client.builder.AwsClientBuilder;
import com.amazonaws.services.autoscaling.AmazonAutoScalingAsync;
import com.amazonaws.services.autoscaling.AmazonAutoScalingAsyncClientBuilder;
import com.netflix.spectator.aws.SpectatorRequestMetricCollector;
import com.netflix.titus.common.runtime.TitusRuntime;
public abstract class AmazonAutoScalingAsyncProvider implements Provider<AmazonAutoScalingAsync> {
protected AmazonAutoScalingAsync buildAmazonAutoScalingAsyncClient(String region, AWSCredentialsProvider credentialProvider, TitusRuntime runtime) {
return AmazonAutoScalingAsyncClientBuilder.standard()
.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration("autoscaling." + region + ".amazonaws.com", region))
.withCredentials(credentialProvider)
.withMetricsCollector(new SpectatorRequestMetricCollector(runtime.getRegistry()))
.build();
}
}
| 1,428 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/AmazonElasticLoadBalancingAsyncProvider.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Provider;
import javax.inject.Singleton;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancingAsync;
import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancingAsyncClientBuilder;
import com.netflix.spectator.aws.SpectatorRequestMetricCollector;
import com.netflix.titus.common.runtime.TitusRuntime;
@Singleton
public class AmazonElasticLoadBalancingAsyncProvider implements Provider<AmazonElasticLoadBalancingAsync> {
private final AmazonElasticLoadBalancingAsync client;
@Inject
public AmazonElasticLoadBalancingAsyncProvider(AwsConfiguration configuration, AWSCredentialsProvider credentialProvider, TitusRuntime runtime) {
final String region = configuration.getRegion().trim().toLowerCase();
this.client = AmazonElasticLoadBalancingAsyncClientBuilder.standard()
.withCredentials(credentialProvider)
.withRegion(region)
.withMetricsCollector(new SpectatorRequestMetricCollector(runtime.getRegistry()))
.build();
}
@Override
public AmazonElasticLoadBalancingAsync get() {
return client;
}
@PreDestroy
public void shutdown() {
client.shutdown();
}
}
| 1,429 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/AwsObservableExt.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws;
import java.util.concurrent.Future;
import com.amazonaws.AmazonWebServiceRequest;
import com.amazonaws.handlers.AsyncHandler;
import rx.Completable;
import rx.CompletableSubscriber;
import rx.Single;
import rx.SingleSubscriber;
import rx.exceptions.Exceptions;
import rx.functions.Action1;
import rx.functions.Action2;
import rx.functions.Func1;
import rx.subscriptions.Subscriptions;
public class AwsObservableExt {
/**
* Wraps usage of async clients from the AWS SDK into a rx.Completable. The action is expected to produce a single
* Future, and the returned Completable will propagate its termination events (error and success) to subscribers.
* <p>
* Subscription cancellations on the returned Completable will cancel the Future if it is still pending.
* <p>
* Usage:
* <p>
* <pre>
* Completable c = AwsObservableExt.asyncActionCompletable(supplier -> client.someAsyncOperation(request, supplier.handler(
* (req, resp) -> doSomethingOnSuccess(req, resp),
* (t) -> doSomethingOnError(t)
* )));
* </pre>
*
* @param action that will be executed when the Completable is subscribed to.
* @return a Completable that completes when the produced future completes
*/
public static Completable asyncActionCompletable(Func1<CompletableHandlerSupplier, Future<?>> action) {
return Completable.create(subscriber -> {
try {
final Future<?> result = action.call(new CompletableHandlerSupplier(subscriber));
subscriber.onSubscribe(Subscriptions.create(() -> result.cancel(true)));
} catch (Throwable t) {
Exceptions.throwIfFatal(t);
subscriber.onError(t);
}
});
}
/**
* Wraps usage of async clients from the AWS SDK into a rx.Single. The action is expected to produce a single
* Future, and the returned Single will propagate its result (error and success) to subscribers.
* <p>
* Subscription cancellations on the returned Single will cancel the Future if it is still pending.
* <p>
* The provided <tt>action</tt> will run on the AWS SDK threadpool. If desired, execution can be brought back to a
* different scheduler with {@link rx.Observable#observeOn}.
* <p>
* Usage:
* <pre>
* {@code
* Single s = AwsObservableExt.asyncActionSingle(supplier -> client.someAsyncOperation(request, supplier.handler()))
* .observeOn(Schedulers.computation());
* }
* </pre>
*
* @param action that will be executed when the Single is subscribed to, and produces a Future
* @param <RES> return type in the Future produced by the action
* @return a Single that propagates the result of the produced future
*/
public static <REQ extends AmazonWebServiceRequest, RES> Single<RES> asyncActionSingle(Func1<SingleHandlerSupplier<RES>, Future<RES>> action) {
return Single.create(subscriber -> {
try {
final Future<RES> result = action.call(new SingleHandlerSupplier<>(subscriber));
subscriber.add(Subscriptions.create(() -> result.cancel(true)));
} catch (Throwable t) {
Exceptions.throwIfFatal(t);
subscriber.onError(t);
}
});
}
public static class CompletableHandlerSupplier {
private final CompletableSubscriber subscriber;
private CompletableHandlerSupplier(CompletableSubscriber subscriber) {
this.subscriber = subscriber;
}
public <REQ extends AmazonWebServiceRequest, RES> AsyncHandler<REQ, RES> handler(Action2<REQ, RES> onSuccessAction, Action1<Exception> onErrorAction) {
return new AsyncHandler<REQ, RES>() {
@Override
public void onError(Exception exception) {
onErrorAction.call(exception);
subscriber.onError(exception);
}
@Override
public void onSuccess(REQ request, RES result) {
onSuccessAction.call(request, result);
subscriber.onCompleted();
}
};
}
}
public static class SingleHandlerSupplier<RES> {
private final SingleSubscriber<? super RES> subscriber;
private SingleHandlerSupplier(SingleSubscriber<? super RES> subscriber) {
this.subscriber = subscriber;
}
public <REQ extends AmazonWebServiceRequest> AsyncHandler<REQ, RES> handler() {
return new AsyncHandler<REQ, RES>() {
@Override
public void onError(Exception exception) {
subscriber.onError(exception);
}
@Override
public void onSuccess(REQ request, RES result) {
subscriber.onSuccess(result);
}
};
}
}
}
| 1,430 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/AmazonClientProvider.java | package com.netflix.titus.ext.aws;
import java.util.HashMap;
import java.util.Map;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.amazonaws.arn.Arn;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider;
import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancingAsync;
import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancingAsyncClientBuilder;
import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceAsync;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.aws.SpectatorRequestMetricCollector;
import com.netflix.titus.common.runtime.TitusRuntime;
@Singleton
public class AmazonClientProvider {
private final AwsConfiguration configuration;
private final AWSSecurityTokenServiceAsync stsClient;
private final Map<String, AWSCredentialsProvider> awsCredentialsByAccountId = new HashMap<>();
private final Map<String, AmazonElasticLoadBalancingAsync> loadBalancerClients = new HashMap<>();
private final Registry registry;
@Inject
public AmazonClientProvider(AwsConfiguration configuration,
AWSSecurityTokenServiceAsync stsClient,
TitusRuntime runtime) {
this.configuration = configuration;
this.stsClient = stsClient;
this.registry = runtime.getRegistry();
}
public AmazonElasticLoadBalancingAsync getLoadBalancingClient(String accountId) {
AmazonElasticLoadBalancingAsync client = loadBalancerClients.get(accountId);
if (client == null) {
synchronized (this) {
client = loadBalancerClients.get(accountId);
if (client == null) {
String region = AwsRegionConfigurationUtil.resolveDataPlaneRegion(configuration);
AWSCredentialsProvider credentialsProvider = getAwsCredentialsProvider(accountId);
client = AmazonElasticLoadBalancingAsyncClientBuilder.standard()
.withCredentials(credentialsProvider)
.withRegion(region)
.withMetricsCollector(new SpectatorRequestMetricCollector(registry))
.build();
loadBalancerClients.put(accountId, client);
}
}
}
return client;
}
private AWSCredentialsProvider getAwsCredentialsProvider(String accountId) {
AWSCredentialsProvider credentialsProvider = awsCredentialsByAccountId.get(accountId);
if (credentialsProvider == null) {
synchronized (this) {
credentialsProvider = awsCredentialsByAccountId.get(accountId);
if (credentialsProvider == null) {
String roleSessionName = configuration.getControlPlaneRoleSessionName();
int roleSessionDurationSeconds = configuration.getControlPlaneRoleSessionDurationSeconds();
Arn roleArn = getControlPlaneRoleArnForAccount(accountId);
credentialsProvider = new STSAssumeRoleSessionCredentialsProvider.Builder(roleArn.toString(), roleSessionName)
.withStsClient(stsClient)
.withRoleSessionDurationSeconds(roleSessionDurationSeconds)
.build();
awsCredentialsByAccountId.put(accountId, credentialsProvider);
}
}
}
return credentialsProvider;
}
private Arn getControlPlaneRoleArnForAccount(String accountId) {
String resource = "role/" + configuration.getControlPlaneRoleName();
return Arn.builder()
.withPartition("aws")
.withService("iam")
.withRegion("")
.withAccountId(accountId)
.withResource(resource)
.build();
}
}
| 1,431 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/cloudwatch/CloudWatchClient.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws.cloudwatch;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.handlers.AsyncHandler;
import com.amazonaws.services.cloudwatch.AmazonCloudWatchAsync;
import com.amazonaws.services.cloudwatch.AmazonCloudWatchAsyncClientBuilder;
import com.amazonaws.services.cloudwatch.model.DeleteAlarmsRequest;
import com.amazonaws.services.cloudwatch.model.DeleteAlarmsResult;
import com.amazonaws.services.cloudwatch.model.Dimension;
import com.amazonaws.services.cloudwatch.model.PutMetricAlarmRequest;
import com.amazonaws.services.cloudwatch.model.PutMetricAlarmResult;
import com.amazonaws.services.cloudwatch.model.ResourceNotFoundException;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Registry;
import com.netflix.titus.api.appscale.model.AlarmConfiguration;
import com.netflix.titus.api.appscale.model.MetricDimension;
import com.netflix.titus.api.appscale.service.AutoScalePolicyException;
import com.netflix.titus.api.connector.cloud.CloudAlarmClient;
import com.netflix.titus.ext.aws.AwsConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Completable;
import rx.Emitter;
import rx.Observable;
import static com.netflix.titus.ext.aws.RetryWrapper.wrapWithExponentialRetry;
@Singleton
public class CloudWatchClient implements CloudAlarmClient {
public static final String AUTO_SCALING_GROUP_NAME = "AutoScalingGroupName";
private static Logger log = LoggerFactory.getLogger(CloudWatchClient.class);
private final AmazonCloudWatchAsync awsCloudWatch;
public static final String METRIC_CLOUD_WATCH_CREATE_ERROR = "titus.cloudwatch.create.error";
public static final String METRIC_CLOUD_WATCH_CREATE_ALARM = "titus.cloudwatch.create.alarm";
public static final String METRIC_CLOUD_WATCH_DELETE_ALARM = "titus.cloudwatch.delete.alarm";
public static final String METRIC_CLOUD_WATCH_DELETE_ERROR = "titus.cloudwatch.delete.error";
private final Counter createAlarmCounter;
private final Counter deleteAlarmCounter;
private final Counter deleteErrorCounter;
private final Counter createErrorCounter;
@Inject
public CloudWatchClient(AWSCredentialsProvider awsCredentialsProvider,
AwsConfiguration awsConfiguration,
Registry registry) {
awsCloudWatch = AmazonCloudWatchAsyncClientBuilder.standard().withCredentials(awsCredentialsProvider)
.withRegion(awsConfiguration.getRegion()).build();
createAlarmCounter = registry.counter(METRIC_CLOUD_WATCH_CREATE_ALARM);
deleteAlarmCounter = registry.counter(METRIC_CLOUD_WATCH_DELETE_ALARM);
deleteErrorCounter = registry.counter(METRIC_CLOUD_WATCH_DELETE_ERROR);
createErrorCounter = registry.counter(METRIC_CLOUD_WATCH_CREATE_ERROR);
}
@Override
public Observable<String> createOrUpdateAlarm(String policyRefId,
String jobId,
AlarmConfiguration alarmConfiguration,
String autoScalingGroup,
List<String> actions) {
List<Dimension> metricDimensions = buildMetricDimensions(alarmConfiguration, autoScalingGroup);
String cloudWatchName = buildCloudWatchName(policyRefId, jobId);
PutMetricAlarmRequest putMetricAlarmRequest = new PutMetricAlarmRequest();
if (alarmConfiguration.getActionsEnabled().isPresent()) {
putMetricAlarmRequest.setActionsEnabled(alarmConfiguration.getActionsEnabled().get());
}
putMetricAlarmRequest.setAlarmActions(actions);
putMetricAlarmRequest.setAlarmName(cloudWatchName);
putMetricAlarmRequest.setDimensions(metricDimensions);
putMetricAlarmRequest.setNamespace(alarmConfiguration.getMetricNamespace());
putMetricAlarmRequest.setComparisonOperator(alarmConfiguration.getComparisonOperator().name());
putMetricAlarmRequest.setStatistic(alarmConfiguration.getStatistic().name());
putMetricAlarmRequest.setEvaluationPeriods(alarmConfiguration.getEvaluationPeriods());
putMetricAlarmRequest.setPeriod(alarmConfiguration.getPeriodSec());
putMetricAlarmRequest.setThreshold(alarmConfiguration.getThreshold());
putMetricAlarmRequest.setMetricName(alarmConfiguration.getMetricName());
return wrapWithExponentialRetry(String.format("createOrUpdateAlarm in policy %s for job %s", policyRefId, jobId),
Observable.create(emitter ->
awsCloudWatch.putMetricAlarmAsync(putMetricAlarmRequest, new AsyncHandler<PutMetricAlarmRequest, PutMetricAlarmResult>() {
@Override
public void onError(Exception exception) {
createErrorCounter.increment();
emitter.onError(AutoScalePolicyException.errorCreatingAlarm(policyRefId, exception.getMessage()));
}
@Override
public void onSuccess(PutMetricAlarmRequest request, PutMetricAlarmResult putMetricAlarmResult) {
int httpStatusCode = putMetricAlarmResult.getSdkHttpMetadata().getHttpStatusCode();
log.info("Created Cloud Watch Alarm {} for {} - status {}", request, jobId, httpStatusCode);
// TODO : how to get ARN created by AWS for this resource ? returning cloudWatchName for now
createAlarmCounter.increment();
emitter.onNext(cloudWatchName);
emitter.onCompleted();
}
}), Emitter.BackpressureMode.NONE));
}
@Override
public Completable deleteAlarm(String policyRefId, String jobId) {
DeleteAlarmsRequest deleteAlarmsRequest = new DeleteAlarmsRequest();
deleteAlarmsRequest.setAlarmNames(Arrays.asList(buildCloudWatchName(policyRefId, jobId)));
return wrapWithExponentialRetry(String.format("deleteAlarm in policy %s for job %s", policyRefId, jobId),
Observable.create(emitter ->
awsCloudWatch.deleteAlarmsAsync(deleteAlarmsRequest, new AsyncHandler<DeleteAlarmsRequest, DeleteAlarmsResult>() {
@Override
public void onError(Exception exception) {
deleteErrorCounter.increment();
if (exception instanceof ResourceNotFoundException) {
emitter.onError(AutoScalePolicyException.unknownScalingPolicy(policyRefId, exception.getMessage()));
} else {
emitter.onError(AutoScalePolicyException.errorDeletingAlarm(policyRefId, exception.getMessage()));
}
}
@Override
public void onSuccess(DeleteAlarmsRequest request, DeleteAlarmsResult deleteAlarmsResult) {
int httpStatusCode = deleteAlarmsResult.getSdkHttpMetadata().getHttpStatusCode();
log.info("Deleted cloud watch alarm for job-id {}, status {}", jobId, httpStatusCode);
deleteAlarmCounter.increment();
emitter.onCompleted();
}
}), Emitter.BackpressureMode.NONE)).toCompletable();
}
private String buildCloudWatchName(String policyRefId, String jobId) {
return String.format("%s/%s", jobId, policyRefId);
}
@VisibleForTesting
static List<Dimension> buildMetricDimensions(AlarmConfiguration alarmConfiguration, String autoScalingGroup) {
List<Dimension> metricDimensions = new ArrayList<>(1);
if (alarmConfiguration.getDimensions() != null && ! alarmConfiguration.getDimensions().isEmpty()) {
for (MetricDimension customMetricDimension : alarmConfiguration.getDimensions()) {
Dimension dimension = new Dimension();
dimension.setName(customMetricDimension.getName());
dimension.setValue(customMetricDimension.getValue());
metricDimensions.add(dimension);
}
} else {
Dimension dimension = new Dimension();
dimension.setName(AUTO_SCALING_GROUP_NAME);
dimension.setValue(autoScalingGroup);
metricDimensions.add(dimension);
}
return metricDimensions;
}
}
| 1,432 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/loadbalancer/AwsLoadBalancerConnectorMetrics.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws.loadbalancer;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Tag;
import com.netflix.titus.common.util.spectator.ExecutionMetrics;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class AwsLoadBalancerConnectorMetrics {
private static final Logger logger = LoggerFactory.getLogger(AwsLoadBalancerMethods.class);
public enum AwsLoadBalancerMethods {
RegisterTargets,
DeregisterTargets,
DescribeTargetGroups,
DescribeTargetHealth
}
private static final String METRICS_ROOT = "titus.loadbalancer.connector";
private final Registry registry;
private final Map<String, ExecutionMetrics> methodMetricsMap;
public AwsLoadBalancerConnectorMetrics(Registry registry) {
this.registry = registry;
methodMetricsMap = new ConcurrentHashMap<>();
for (AwsLoadBalancerMethods methodName : AwsLoadBalancerMethods.values()) {
// Create ExecutionMetrics that are preconfigured with the appropriate tags. This
// allows latency metrics to be collected per method.
List<Tag> tags = new ArrayList<>();
String methodNameStr = methodName.name();
tags.add(new BasicTag("method", methodNameStr));
methodMetricsMap.put(methodNameStr,
new ExecutionMetrics(METRICS_ROOT, AwsLoadBalancerConnector.class, registry, tags));
}
}
public void success(AwsLoadBalancerMethods method, long startTime) {
getOrCreateMetrics(method).success(startTime);
}
public void failure(AwsLoadBalancerMethods method, Throwable error, long startTime) {
if (error.getMessage().contains("Rate exceeded")) {
error = new AwsLoadBalancerRateLimitException(error);
}
getOrCreateMetrics(method).failure(error, startTime);
}
// Creates an execution metric for the methodName if it doesn't exist. Returns the
// metric if it exists already.
private ExecutionMetrics getOrCreateMetrics(AwsLoadBalancerMethods methodName) {
String methodNameStr = methodName.name();
if (methodMetricsMap.containsKey(methodNameStr)) {
return methodMetricsMap.get(methodNameStr);
}
List<Tag> tags = new ArrayList<>();
tags.add(new BasicTag("methodName", methodNameStr));
ExecutionMetrics metric = new ExecutionMetrics(METRICS_ROOT, AwsLoadBalancerConnector.class, registry, tags);
methodMetricsMap.put(methodNameStr, metric);
return metric;
}
}
| 1,433 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/loadbalancer/AwsLoadBalancerConnector.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws.loadbalancer;
import java.util.Collections;
import java.util.Set;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.amazonaws.arn.Arn;
import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancingAsync;
import com.amazonaws.services.elasticloadbalancingv2.model.DeregisterTargetsRequest;
import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsRequest;
import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsResult;
import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetHealthRequest;
import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetHealthResult;
import com.amazonaws.services.elasticloadbalancingv2.model.RegisterTargetsRequest;
import com.amazonaws.services.elasticloadbalancingv2.model.TargetDescription;
import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroupNotFoundException;
import com.netflix.spectator.api.Registry;
import com.netflix.titus.api.connector.cloud.CloudConnectorException;
import com.netflix.titus.api.connector.cloud.LoadBalancer;
import com.netflix.titus.api.connector.cloud.LoadBalancerConnector;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.guice.ProxyType;
import com.netflix.titus.common.util.guice.annotation.ProxyConfiguration;
import com.netflix.titus.ext.aws.AmazonClientProvider;
import com.netflix.titus.ext.aws.AwsObservableExt;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Completable;
import rx.Observable;
import rx.Scheduler;
import rx.Single;
import rx.schedulers.Schedulers;
@Singleton
@ProxyConfiguration(types = {ProxyType.Logging, ProxyType.Spectator})
public class AwsLoadBalancerConnector implements LoadBalancerConnector {
private static final Logger logger = LoggerFactory.getLogger(AwsLoadBalancerConnector.class);
private static final String AWS_IP_TARGET_TYPE = "ip";
private final AmazonClientProvider clientProvider;
private final Scheduler scheduler;
private final Registry registry;
private AwsLoadBalancerConnectorMetrics connectorMetrics;
@Inject
public AwsLoadBalancerConnector(AmazonClientProvider clientProvider, Registry registry) {
this(clientProvider, Schedulers.computation(), registry);
}
private AwsLoadBalancerConnector(AmazonClientProvider clientProvider, Scheduler scheduler, Registry registry) {
this.clientProvider = clientProvider;
this.scheduler = scheduler;
this.registry = registry;
this.connectorMetrics = new AwsLoadBalancerConnectorMetrics(registry);
}
@Override
public Completable registerAll(String loadBalancerId, Set<String> ipAddresses) {
if (CollectionsExt.isNullOrEmpty(ipAddresses)) {
return Completable.complete();
}
// TODO: retry logic
// TODO: handle partial failures in the batch
// TODO: timeouts
final Set<TargetDescription> targetDescriptions = ipAddresses.stream().map(
ipAddress -> new TargetDescription().withId(ipAddress)
).collect(Collectors.toSet());
final RegisterTargetsRequest request = new RegisterTargetsRequest()
.withTargetGroupArn(loadBalancerId)
.withTargets(targetDescriptions);
long startTime = registry.clock().wallTime();
// force observeOn(scheduler) since the callback will be called from the AWS SDK threadpool
return AwsObservableExt.asyncActionCompletable(factory -> getClient(loadBalancerId).registerTargetsAsync(request, factory.handler(
(req, resp) -> {
logger.debug("Registered targets {}", resp);
connectorMetrics.success(AwsLoadBalancerConnectorMetrics.AwsLoadBalancerMethods.RegisterTargets, startTime);
},
(t) -> {
logger.error("Error registering targets on " + loadBalancerId, t);
connectorMetrics.failure(AwsLoadBalancerConnectorMetrics.AwsLoadBalancerMethods.RegisterTargets, t, startTime);
}
))).observeOn(scheduler);
}
@Override
public Completable deregisterAll(String loadBalancerId, Set<String> ipAddresses) {
if (CollectionsExt.isNullOrEmpty(ipAddresses)) {
return Completable.complete();
}
// TODO: retry logic
// TODO: handle partial failures in the batch
// TODO: timeouts
final DeregisterTargetsRequest request = new DeregisterTargetsRequest()
.withTargetGroupArn(loadBalancerId)
.withTargets(ipAddresses.stream().map(
ipAddress -> new TargetDescription().withId(ipAddress)
).collect(Collectors.toSet()));
long startTime = registry.clock().wallTime();
// force observeOn(scheduler) since the callback will be called from the AWS SDK threadpool
return AwsObservableExt.asyncActionCompletable(supplier -> getClient(loadBalancerId).deregisterTargetsAsync(request, supplier.handler(
(req, resp) -> {
logger.debug("Deregistered targets {}", resp);
connectorMetrics.success(AwsLoadBalancerConnectorMetrics.AwsLoadBalancerMethods.DeregisterTargets, startTime);
},
(t) -> {
logger.error("Error deregistering targets on " + loadBalancerId, t);
connectorMetrics.failure(AwsLoadBalancerConnectorMetrics.AwsLoadBalancerMethods.DeregisterTargets, t, startTime);
}
))).observeOn(scheduler);
}
@Override
public Completable isValid(String loadBalancerId) {
final DescribeTargetGroupsRequest request = new DescribeTargetGroupsRequest()
.withTargetGroupArns(loadBalancerId);
long startTime = registry.clock().wallTime();
Single<DescribeTargetGroupsResult> resultSingle = AwsObservableExt.asyncActionSingle(supplier -> getClient(loadBalancerId).describeTargetGroupsAsync(request, supplier.handler()));
return resultSingle
.observeOn(scheduler)
.doOnError(throwable -> {
connectorMetrics.failure(AwsLoadBalancerConnectorMetrics.AwsLoadBalancerMethods.DescribeTargetGroups, throwable, startTime);
})
.flatMapObservable(result -> {
connectorMetrics.success(AwsLoadBalancerConnectorMetrics.AwsLoadBalancerMethods.DescribeTargetGroups, startTime);
return Observable.from(result.getTargetGroups());
})
.flatMap(targetGroup -> {
if (targetGroup.getTargetType().equals(AWS_IP_TARGET_TYPE)) {
return Observable.empty();
}
return Observable.error(CloudConnectorException.invalidArgument(String.format("Target group %s is NOT of required type %s", targetGroup.getTargetGroupArn(), AWS_IP_TARGET_TYPE)));
})
.toCompletable();
}
@Override
public Single<LoadBalancer> getLoadBalancer(String loadBalancerId) {
final DescribeTargetHealthRequest request = new DescribeTargetHealthRequest().withTargetGroupArn(loadBalancerId);
long startTime = registry.clock().wallTime();
Single<DescribeTargetHealthResult> asyncResult = AwsObservableExt.asyncActionSingle(
factory -> getClient(loadBalancerId).describeTargetHealthAsync(request, factory.handler())
);
return asyncResult
.observeOn(scheduler)
.map(result -> {
connectorMetrics.success(AwsLoadBalancerConnectorMetrics.AwsLoadBalancerMethods.DescribeTargetHealth, startTime);
Set<String> ips = result.getTargetHealthDescriptions().stream()
.map(description -> description.getTarget().getId())
.collect(Collectors.toSet());
return new LoadBalancer(loadBalancerId, LoadBalancer.State.ACTIVE, ips);
})
.onErrorResumeNext(throwable -> {
connectorMetrics.failure(AwsLoadBalancerConnectorMetrics.AwsLoadBalancerMethods.DescribeTargetHealth, throwable, startTime);
if (throwable instanceof TargetGroupNotFoundException) {
return Single.just(new LoadBalancer(loadBalancerId, LoadBalancer.State.REMOVED, Collections.emptySet()));
}
return Single.error(throwable);
});
}
private AmazonElasticLoadBalancingAsync getClient(String loadBalancerId) {
Arn arn = Arn.fromString(loadBalancerId);
return clientProvider.getLoadBalancingClient(arn.getAccountId());
}
}
| 1,434 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/loadbalancer/AwsLoadBalancerRateLimitException.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws.loadbalancer;
public class AwsLoadBalancerRateLimitException extends RuntimeException {
/**
* Constructs a new AwsLoadBalancerRateLimitException from the provided Throwable to
* identify rate limiting errors, which AWS currently does not specifically identify.
*
* @param error
*/
public AwsLoadBalancerRateLimitException(Throwable error) {
super(error.getMessage());
}
}
| 1,435 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/iam/AwsIamConnector.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws.iam;
import java.time.Duration;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Singleton;
import com.amazonaws.services.identitymanagement.AmazonIdentityManagementAsync;
import com.amazonaws.services.identitymanagement.model.GetRoleRequest;
import com.amazonaws.services.identitymanagement.model.GetRoleResult;
import com.amazonaws.services.identitymanagement.model.NoSuchEntityException;
import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceAsync;
import com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceException;
import com.amazonaws.services.securitytoken.model.AssumeRoleRequest;
import com.amazonaws.services.securitytoken.model.AssumeRoleResult;
import com.netflix.spectator.api.Registry;
import com.netflix.titus.api.connector.cloud.IamConnector;
import com.netflix.titus.api.iam.model.IamRole;
import com.netflix.titus.api.iam.service.IamConnectorException;
import com.netflix.titus.common.util.cache.Cache;
import com.netflix.titus.common.util.cache.Caches;
import com.netflix.titus.common.util.guice.ProxyType;
import com.netflix.titus.common.util.guice.annotation.ProxyConfiguration;
import com.netflix.titus.common.util.spectator.IamConnectorMetrics;
import com.netflix.titus.common.util.tuple.Either;
import com.netflix.titus.ext.aws.AwsConfiguration;
import com.netflix.titus.ext.aws.AwsReactorExt;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import reactor.core.publisher.Mono;
@Singleton
@ProxyConfiguration(types = {ProxyType.Logging, ProxyType.Spectator})
public class AwsIamConnector implements IamConnector {
private static final Logger logger = LoggerFactory.getLogger(AwsIamConnector.class);
public static final String STS_AGENT = "stsAgent";
private static final long MAX_CACHE_SIZE = 5_000;
/**
* As documented here {@link AssumeRoleRequest}.
*/
private static final int MIN_ASSUMED_ROLE_DURATION_SEC = 900;
private final AwsConfiguration configuration;
private final AmazonIdentityManagementAsync iamClient;
private final AWSSecurityTokenServiceAsync stsAgentClient;
private final Registry registry;
private final IamConnectorMetrics connectorMetrics;
private final Cache<String, IamRole> cache;
private final Cache<String, Either<Boolean, Throwable>> canAssumeCache;
@Inject
public AwsIamConnector(AwsConfiguration configuration,
AmazonIdentityManagementAsync iamClient,
@Named(STS_AGENT) AWSSecurityTokenServiceAsync stsAgentClient,
Registry registry) {
this.configuration = configuration;
this.iamClient = iamClient;
this.stsAgentClient = stsAgentClient;
this.cache = Caches.instrumentedCacheWithMaxSize(
MAX_CACHE_SIZE,
Duration.ofMillis(configuration.getIamRoleCacheTimeoutMs()),
IamConnectorMetrics.METRICS_ROOT + ".awsIamRoleCache",
registry
);
this.canAssumeCache = Caches.instrumentedCacheWithMaxSize(
MAX_CACHE_SIZE,
Duration.ofMillis(configuration.getIamRoleCacheTimeoutMs()),
IamConnectorMetrics.METRICS_ROOT + ".awsCanAssumeIamRoleCache",
registry
);
this.registry = registry;
this.connectorMetrics = new IamConnectorMetrics(AwsIamConnector.class, registry);
}
@PreDestroy
public void shutdown() {
iamClient.shutdown();
}
/**
* Gets an IAM role from AWS. The iamRoleName provided is expected to be an AWS friendly Role name:
* (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names)
* not an ARN. The AWS request will fail if an friendly name is not used.
*/
@Override
public Mono<IamRole> getIamRole(String iamRoleName) {
return Mono.defer(() -> {
IamRole iamRole = cache.getIfPresent(iamRoleName);
if (iamRole != null) {
return Mono.just(iamRole);
}
return getIamRoleFromAws(iamRoleName).doOnNext(fetchedIamRole -> cache.put(iamRoleName, fetchedIamRole));
});
}
private Mono<IamRole> getIamRoleFromAws(String iamRoleName) {
long startTime = registry.clock().wallTime();
return getAwsIamRole(iamRoleName)
.timeout(Duration.ofMillis(configuration.getAwsRequestTimeoutMs()))
.map(getRoleResult -> {
connectorMetrics.success(IamConnectorMetrics.IamMethods.GetIamRole, startTime);
return IamRole.newBuilder()
.withRoleId(getRoleResult.getRole().getRoleId())
.withRoleName(getRoleResult.getRole().getRoleName())
.withResourceName(getRoleResult.getRole().getArn())
.withPolicyDoc(getRoleResult.getRole().getAssumeRolePolicyDocument())
.build();
}
)
.onErrorMap(throwable -> {
// Remap to specific Exception if we got rate limited
if (throwable.getMessage().contains("Rate exceeded")) {
throwable = new AwsIamRateLimitException(throwable);
}
connectorMetrics.failure(IamConnectorMetrics.IamMethods.GetIamRole, throwable, startTime);
if (throwable instanceof NoSuchEntityException) {
return IamConnectorException.iamRoleNotFound(iamRoleName);
}
return IamConnectorException.iamRoleUnexpectedError(iamRoleName, throwable.getMessage());
});
}
@Override
public Mono<Void> canIamAssume(String iamRoleName, String assumeResourceName) {
return getIamRole(iamRoleName)
.flatMap(iamRole -> {
if (AwsIamUtil.canAssume(iamRole, assumeResourceName)) {
return Mono.empty();
}
return Mono.error(IamConnectorException.iamRoleCannotAssume(iamRole.getRoleName(), assumeResourceName));
});
}
@Override
public Mono<Void> canAgentAssume(String iamRoleName) {
return Mono.defer(() -> {
long startTime = registry.clock().wallTime();
// Check cache first
Either<Boolean, Throwable> lastCheck = canAssumeCache.getIfPresent(iamRoleName);
if (lastCheck != null) {
return lastCheck.hasValue() ? Mono.empty() : Mono.error(lastCheck.getError());
}
// Must call AWS STS service
return AwsReactorExt
.<AssumeRoleRequest, AssumeRoleResult>toMono(
() -> new AssumeRoleRequest()
.withRoleSessionName("titusIamRoleValidation")
.withRoleArn(iamRoleName)
.withDurationSeconds(MIN_ASSUMED_ROLE_DURATION_SEC),
stsAgentClient::assumeRoleAsync
)
.flatMap(response -> {
logger.debug("Assumed into: {}", iamRoleName);
canAssumeCache.put(iamRoleName, Either.ofValue(true));
connectorMetrics.success(IamConnectorMetrics.IamMethods.CanAgentAssume, startTime);
return Mono.<Void>empty();
})
.onErrorMap(error -> {
logger.debug("Error: {}", error.getMessage());
connectorMetrics.failure(IamConnectorMetrics.IamMethods.CanAgentAssume, error, startTime);
String errorCode = ((AWSSecurityTokenServiceException) error).getErrorCode();
if ("AccessDenied".equals(errorCode)) {
// STS service returns access denied error with no additional clues. To get more insight we
// would have to make a call to IAM service, but this would require access to all client accounts.
IamConnectorException cannotAssumeError = IamConnectorException.iamRoleCannotAssume(iamRoleName, configuration.getDataPlaneAgentRoleArn());
canAssumeCache.put(iamRoleName, Either.ofError(cannotAssumeError));
return cannotAssumeError;
}
return IamConnectorException.iamRoleUnexpectedError(iamRoleName, error.getMessage());
});
});
}
private Mono<GetRoleResult> getAwsIamRole(String iamRoleName) {
GetRoleRequest request = new GetRoleRequest().withRoleName(iamRoleName);
return AwsReactorExt.toMono(request, iamClient::getRoleAsync);
}
}
| 1,436 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/iam/AwsAssumePrincipal.java | /*
*
* * Copyright 2019 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package com.netflix.titus.ext.aws.iam;
import java.util.Collections;
import java.util.List;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
/**
* Describes an AWS assume principal.
*/
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonDeserialize(using = AwsAssumePrincipalDeserializer.class)
public class AwsAssumePrincipal {
private final List<String> principals;
@JsonCreator
public AwsAssumePrincipal(@JsonProperty("Principal") List<String> principals) {
if (null == principals) {
this.principals = Collections.emptyList();
} else {
this.principals = principals;
}
}
public List<String> getPrincipals() {
return principals;
}
@Override
public String toString() {
return "AwsAssumePrincipal{" +
"principals=" + principals +
'}';
}
}
| 1,437 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/iam/AwsAssumePolicy.java | /*
*
* * Copyright 2019 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package com.netflix.titus.ext.aws.iam;
import java.util.List;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
/**
* Describes an AWS assume policy document.
*/
@JsonIgnoreProperties(ignoreUnknown = true)
public class AwsAssumePolicy {
private final List<AwsAssumeStatement> statements;
@JsonCreator
public AwsAssumePolicy(@JsonProperty("Statement") List<AwsAssumeStatement> statements) {
this.statements = statements;
}
public List<AwsAssumeStatement> getStatements() {
return statements;
}
@Override
public String toString() {
return "AwsAssumePolicy{" +
"statements=" + statements +
'}';
}
}
| 1,438 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/iam/AwsIamUtil.java | /*
*
* * Copyright 2019 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package com.netflix.titus.ext.aws.iam;
import java.net.URLDecoder;
import java.nio.charset.StandardCharsets;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.titus.api.iam.model.IamRole;
import com.netflix.titus.api.json.ObjectMappers;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class AwsIamUtil {
private static final Logger logger = LoggerFactory.getLogger(AwsIamUtil.class);
private static final String ASSUME_ACTION = "sts:AssumeRole";
private static final String ASSUME_EFFECT = "Allow";
/**
* Helper method to deserialize an IAM Role's assume policy and verify if
* another role can assume into it.
*/
public static boolean canAssume(IamRole iamRole, String assumeResourceName) {
try {
// AWS provides us a URL encoded policy doc
String assumePolicyJson = URLDecoder.decode(iamRole.getAssumePolicy(), StandardCharsets.UTF_8.toString());
ObjectMapper objectMapper = ObjectMappers.defaultMapper();
AwsAssumePolicy awsAssumePolicy = objectMapper.readValue(assumePolicyJson, AwsAssumePolicy.class);
// Check if there is a policy statement that allows assumeRole for the provided resource
for (AwsAssumeStatement statement : awsAssumePolicy.getStatements()) {
if (statementCanAssume(statement, assumeResourceName)) {
return true;
}
}
} catch (Exception e) {
logger.warn("Unable to deserialize IAM role assumeRole policy {}: {}", iamRole.getAssumePolicy(), e.getMessage());
return false;
}
return false;
}
private static boolean statementCanAssume(AwsAssumeStatement statement, String assumeResourceName) {
if (statement.getAction().equals(ASSUME_ACTION) &&
statement.getEffect().equals(ASSUME_EFFECT) &&
!statement.getPrincipal().getPrincipals().isEmpty()) {
for (String principalName : statement.getPrincipal().getPrincipals()) {
// Note this checks the resource name matches but does not check the account specifics.
// Consider a deeper check that matches the Agent account as well.
if (principalName.endsWith(assumeResourceName)) {
return true;
}
}
}
return false;
}
}
| 1,439 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/iam/Main.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws.iam;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.profile.ProfileCredentialsProvider;
import com.amazonaws.regions.Region;
import com.amazonaws.regions.Regions;
import com.amazonaws.services.identitymanagement.AmazonIdentityManagementAsyncClientBuilder;
import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceAsync;
import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceAsyncClientBuilder;
import com.google.common.base.Stopwatch;
import com.google.common.collect.ImmutableMap;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.SystemExt;
import com.netflix.titus.common.util.archaius2.Archaius2Ext;
import com.netflix.titus.ext.aws.AmazonStsAsyncProvider;
import com.netflix.titus.ext.aws.AwsConfiguration;
import com.netflix.titus.ext.aws.DataPlaneAgentCredentialsProvider;
import static com.netflix.titus.common.util.CollectionsExt.asSet;
public class Main {
private static final String REGION = "us-east-1";
private static final Set<String> ALL_COMMANDS = asSet("agentAssume");
private static final AwsConfiguration CONFIGURATION = Archaius2Ext.newConfiguration(AwsConfiguration.class,
ImmutableMap.<String, String>builder()
.put("titus.ext.aws.region", REGION)
.putAll(SystemExt.getSystemPropertyMap())
.build()
);
private final AwsIamConnector connector;
private Main(AwsIamConnector connector) {
this.connector = connector;
}
private void canAgentAssume(String roleName) {
try {
connector.canAgentAssume(roleName).block();
} catch (Exception e) {
System.out.println("Agent assume role failed: " + e.getMessage());
e.printStackTrace();
}
}
public static void main(String[] args) {
if (args.length == 0 || !ALL_COMMANDS.contains(args[0])) {
helpAndExit();
}
String cmd = args[0];
if (asSet("agentAssume").contains(cmd) && args.length < 2) {
helpAndExit();
}
List<String> params = args.length == 1 ? Collections.emptyList() : CollectionsExt.asList(args, 1);
Stopwatch started = Stopwatch.createStarted();
try {
Main main = new Main(createConnector());
if (cmd.equals("agentAssume")) {
main.canAgentAssume(params.get(0));
}
} catch (Throwable e) {
e.printStackTrace();
doExit(started, -1);
} finally {
doExit(started, 0);
}
}
private static void doExit(Stopwatch started, int status) {
System.out.println("Finished in " + started.elapsed(TimeUnit.SECONDS) + "sec");
createConnector().shutdown();
System.exit(status);
}
private static AwsIamConnector createConnector() {
AWSCredentialsProvider baseCredentials = new ProfileCredentialsProvider("default");
AWSSecurityTokenServiceAsync stsClient = new AmazonStsAsyncProvider(CONFIGURATION, baseCredentials, TitusRuntimes.internal()).get();
AWSCredentialsProvider credentialsProvider = new DataPlaneAgentCredentialsProvider(CONFIGURATION, stsClient, baseCredentials).get();
Region currentRegion = Regions.getCurrentRegion();
if (currentRegion == null) {
currentRegion = Region.getRegion(Regions.US_EAST_1);
}
return new AwsIamConnector(
CONFIGURATION,
AmazonIdentityManagementAsyncClientBuilder.standard()
.withRegion(currentRegion.getName())
.withCredentials(credentialsProvider)
.build(),
AWSSecurityTokenServiceAsyncClientBuilder.standard()
.withRegion(currentRegion.getName())
.withCredentials(credentialsProvider)
.build(),
new DefaultRegistry()
);
}
private static void helpAndExit() {
System.err.println("Usage: Main [" + ALL_COMMANDS.stream().collect(Collectors.joining(" | ")) + ']');
System.exit(-1);
}
}
| 1,440 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/iam/AwsIamRateLimitException.java | /*
*
* * Copyright 2019 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package com.netflix.titus.ext.aws.iam;
public class AwsIamRateLimitException extends RuntimeException {
/**
* Constructs a new AwsIamRateLimitException from the provided Throwable to
* identify rate limiting errors, which principals currently does not specifically identify.
*/
public AwsIamRateLimitException(Throwable error) {
super(error.getMessage());
}
}
| 1,441 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/iam/AwsAssumeStatement.java | /*
*
* * Copyright 2019 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package com.netflix.titus.ext.aws.iam;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
/**
* Describes an AWS assume policy statement.
*/
@JsonIgnoreProperties(ignoreUnknown = true)
public class AwsAssumeStatement {
private final String effect;
private final AwsAssumePrincipal principal;
private final String action;
@JsonCreator
public AwsAssumeStatement(@JsonProperty("Effect") String effect,
@JsonProperty("Principal") AwsAssumePrincipal principal,
@JsonProperty("Action") String action) {
this.effect = effect;
this.principal = principal;
this.action = action;
}
public String getEffect() {
return effect;
}
public AwsAssumePrincipal getPrincipal() {
return principal;
}
public String getAction() {
return action;
}
@Override
public String toString() {
return "AwsAssumeStatement{" +
"effect='" + effect + '\'' +
", principal=" + principal +
", action='" + action + '\'' +
'}';
}
}
| 1,442 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/iam/AwsAssumePrincipalDeserializer.java | /*
*
* * Copyright 2019 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package com.netflix.titus.ext.aws.iam;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.databind.DeserializationContext;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.deser.std.StdDeserializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Custom Jackson deserializer for handling AWS assume principals that can be encoded as
* either an array of principals or a single principal string value.
*/
public class AwsAssumePrincipalDeserializer extends StdDeserializer<AwsAssumePrincipal> {
private static final Logger logger = LoggerFactory.getLogger(AwsAssumePrincipalDeserializer.class);
private static final String AWS_KEY = "AWS";
public AwsAssumePrincipalDeserializer() {
this(null);
}
public AwsAssumePrincipalDeserializer(Class<?> vc) {
super(vc);
}
@Override
public AwsAssumePrincipal deserialize(JsonParser jp, DeserializationContext ctxt)
throws IOException {
List<String> principals = new ArrayList<>();
JsonNode node = jp.getCodec().readTree(jp);
// Find the specific key
if (node.hasNonNull(AWS_KEY)) {
JsonNode awsNode = node.get(AWS_KEY);
// The key's value may be an array or single string element.
if (awsNode.isArray()) {
awsNode.forEach(principalNode -> principals.add(principalNode.asText()));
} else {
principals.add(awsNode.asText());
}
}
return new AwsAssumePrincipal(principals);
}
}
| 1,443 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/appscale/AWSAppAutoScalingMetrics.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws.appscale;
import com.amazonaws.AmazonServiceException;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
public class AWSAppAutoScalingMetrics {
private final Registry registry;
private static final String METRIC_APP_SCALE_CREATE_TARGET = "titus.appscale.create.target";
private static final String METRIC_APP_SCALE_CREATE_POLICY = "titus.appscale.create.policy";
private static final String METRIC_APP_SCALE_DELETE_TARGET = "titus.appscale.delete.target";
private static final String METRIC_APP_SCALE_DELETE_POLICY = "titus.appscale.delete.policy";
private static final String METRIC_APP_SCALE_GET_TARGET = "titus.appscale.get.target";
private static final String METRIC_APP_SCALE_CREATE_TARGET_ERROR = "titus.appscale.create.target.error";
private static final String METRIC_APP_SCALE_CREATE_POLICY_ERROR = "titus.appscale.create.policy.error";
private static final String METRIC_APP_SCALE_DELETE_TARGET_ERROR = "titus.appscale.delete.target.error";
private static final String METRIC_APP_SCALE_DELETE_POLICY_ERROR = "titus.appscale.delete.policy.error";
private static final String METRIC_APP_SCALE_GET_TARGET_ERROR = "titus.appscale.get.target.error";
private static final String METRIC_ERR_CODE_UNKNOWN = "unknown";
private static final String METRIC_ERR_CODE_TAG = "errorCode";
private final Id createTargetId;
private final Id deleteTargetId;
private final Id getTargetId;
private final Id createPolicyId;
private final Id deletePolicyId;
private final Id createTargetErrorId;
private final Id deleteTargetErrorId;
private final Id getTargetErrorId;
private final Id createPolicyErrorId;
private final Id deletePolicyErrorId;
public AWSAppAutoScalingMetrics(Registry registry) {
this.registry = registry;
createTargetId = registry.createId(METRIC_APP_SCALE_CREATE_TARGET);
deleteTargetId = registry.createId(METRIC_APP_SCALE_DELETE_TARGET);
getTargetId = registry.createId(METRIC_APP_SCALE_GET_TARGET);
createPolicyId = registry.createId(METRIC_APP_SCALE_CREATE_POLICY);
deletePolicyId = registry.createId(METRIC_APP_SCALE_DELETE_POLICY);
createTargetErrorId = registry.createId(METRIC_APP_SCALE_CREATE_TARGET_ERROR);
deleteTargetErrorId = registry.createId(METRIC_APP_SCALE_DELETE_TARGET_ERROR);
getTargetErrorId = registry.createId(METRIC_APP_SCALE_GET_TARGET_ERROR);
createPolicyErrorId = registry.createId(METRIC_APP_SCALE_CREATE_POLICY_ERROR);
deletePolicyErrorId = registry.createId(METRIC_APP_SCALE_DELETE_POLICY_ERROR);
}
void registerAwsCreateTargetSuccess() {
registry.counter(createTargetId).increment();
}
void registerAwsCreateTargetError(Exception exception) {
registry.counter(createTargetErrorId
.withTag(METRIC_ERR_CODE_TAG, getErrorCode(exception))
).increment();
}
void registerAwsDeleteTargetSuccess() {
registry.counter(deleteTargetId).increment();
}
void registerAwsDeleteTargetError(Exception exception) {
registry.counter(deleteTargetErrorId
.withTag(METRIC_ERR_CODE_TAG, getErrorCode(exception))
).increment();
}
void registerAwsGetTargetSuccess() {
registry.counter(getTargetId).increment();
}
void registerAwsGetTargetError(Exception exception) {
registry.counter(getTargetErrorId
.withTag(METRIC_ERR_CODE_TAG, getErrorCode(exception))
).increment();
}
void registerAwsCreatePolicySuccess() {
registry.counter(createPolicyId).increment();
}
void registerAwsCreatePolicyError(Exception exception) {
registry.counter(createPolicyErrorId
.withTag(METRIC_ERR_CODE_TAG, getErrorCode(exception))
).increment();
}
void registerAwsDeletePolicySuccess() {
registry.counter(deletePolicyId).increment();
}
void registerAwsDeletePolicyError(Exception exception) {
registry.counter(deletePolicyErrorId
.withTag(METRIC_ERR_CODE_TAG, getErrorCode(exception))
).increment();
}
private String getErrorCode(Exception exception) {
String errCode = METRIC_ERR_CODE_UNKNOWN;
if (exception instanceof AmazonServiceException) {
errCode = ((AmazonServiceException)exception).getErrorCode();
}
return errCode;
}
}
| 1,444 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/appscale/AppAutoScalingCallbackService.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws.appscale;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import rx.Observable;
public interface AppAutoScalingCallbackService {
Observable<ScalableTargetResourceInfo> getScalableTargetResourceInfo(String jobId, CallMetadata callMetadata);
Observable<ScalableTargetResourceInfo> setScalableTargetResourceInfo(String jobId,
ScalableTargetResourceInfo scalableTargetResourceInfo,
CallMetadata callMetadata);
}
| 1,445 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/appscale/AWSAppAutoScalingClient.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws.appscale;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.handlers.AsyncHandler;
import com.amazonaws.services.applicationautoscaling.AWSApplicationAutoScalingAsync;
import com.amazonaws.services.applicationautoscaling.AWSApplicationAutoScalingAsyncClientBuilder;
import com.amazonaws.services.applicationautoscaling.model.CustomizedMetricSpecification;
import com.amazonaws.services.applicationautoscaling.model.DeleteScalingPolicyRequest;
import com.amazonaws.services.applicationautoscaling.model.DeleteScalingPolicyResult;
import com.amazonaws.services.applicationautoscaling.model.DeregisterScalableTargetRequest;
import com.amazonaws.services.applicationautoscaling.model.DeregisterScalableTargetResult;
import com.amazonaws.services.applicationautoscaling.model.DescribeScalableTargetsRequest;
import com.amazonaws.services.applicationautoscaling.model.DescribeScalableTargetsResult;
import com.amazonaws.services.applicationautoscaling.model.MetricDimension;
import com.amazonaws.services.applicationautoscaling.model.ObjectNotFoundException;
import com.amazonaws.services.applicationautoscaling.model.PutScalingPolicyRequest;
import com.amazonaws.services.applicationautoscaling.model.PutScalingPolicyResult;
import com.amazonaws.services.applicationautoscaling.model.RegisterScalableTargetRequest;
import com.amazonaws.services.applicationautoscaling.model.RegisterScalableTargetResult;
import com.amazonaws.services.applicationautoscaling.model.ScalableTarget;
import com.amazonaws.services.applicationautoscaling.model.StepAdjustment;
import com.amazonaws.services.applicationautoscaling.model.StepScalingPolicyConfiguration;
import com.amazonaws.services.applicationautoscaling.model.TargetTrackingScalingPolicyConfiguration;
import com.amazonaws.services.applicationautoscaling.model.ValidationException;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.spectator.api.Registry;
import com.netflix.titus.api.appscale.model.AutoScalableTarget;
import com.netflix.titus.api.appscale.model.PolicyConfiguration;
import com.netflix.titus.api.appscale.model.PolicyType;
import com.netflix.titus.api.appscale.model.TargetTrackingPolicy;
import com.netflix.titus.api.appscale.service.AutoScalePolicyException;
import com.netflix.titus.api.connector.cloud.AppAutoScalingClient;
import com.netflix.titus.ext.aws.RetryWrapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Completable;
import rx.Emitter;
import rx.Observable;
import static com.netflix.titus.ext.aws.appscale.AWSAppAutoScalingUtil.buildScalingPolicyName;
@Singleton
public class AWSAppAutoScalingClient implements AppAutoScalingClient {
private static Logger logger = LoggerFactory.getLogger(AWSAppAutoScalingClient.class);
public final static String SERVICE_NAMESPACE = "custom-resource";
// AWS requires this field be set to this specific value for all application-autoscaling calls.
public final static String SCALABLE_DIMENSION = "custom-resource:ResourceType:Property";
private final AWSApplicationAutoScalingAsync awsAppAutoScalingClientAsync;
private final AWSAppScalingConfig awsAppScalingConfig;
private final AWSAppAutoScalingMetrics awsAppAutoScalingMetrics;
@Inject
public AWSAppAutoScalingClient(AWSCredentialsProvider awsCredentialsProvider,
AWSAppScalingConfig awsAppScalingConfig,
Registry registry) {
this(AWSApplicationAutoScalingAsyncClientBuilder.standard()
.withCredentials(awsCredentialsProvider)
.withRegion(awsAppScalingConfig.getRegion()).build(),
awsAppScalingConfig, registry);
}
@VisibleForTesting
AWSAppAutoScalingClient(AWSApplicationAutoScalingAsync awsAppAutoScalingClientAsync, AWSAppScalingConfig awsAppScalingConfig, Registry registry) {
this.awsAppAutoScalingClientAsync = awsAppAutoScalingClientAsync;
this.awsAppScalingConfig = awsAppScalingConfig;
this.awsAppAutoScalingMetrics = new AWSAppAutoScalingMetrics(registry);
}
@Override
public Completable createScalableTarget(String jobId, int minCapacity, int maxCapacity) {
RegisterScalableTargetRequest registerScalableTargetRequest = new RegisterScalableTargetRequest();
registerScalableTargetRequest.setMinCapacity(minCapacity);
registerScalableTargetRequest.setMaxCapacity(maxCapacity);
registerScalableTargetRequest.setResourceId(AWSAppAutoScalingUtil.buildGatewayResourceId(jobId,
awsAppScalingConfig.getAWSGatewayEndpointPrefix(),
awsAppScalingConfig.getRegion(),
awsAppScalingConfig.getAWSGatewayEndpointTargetStage()));
registerScalableTargetRequest.setServiceNamespace(SERVICE_NAMESPACE);
registerScalableTargetRequest.setScalableDimension(SCALABLE_DIMENSION);
logger.info("RegisterScalableTargetRequest {}", registerScalableTargetRequest);
return RetryWrapper.wrapWithExponentialRetry(String.format("createScalableTarget for job %s", jobId),
Observable.create(emitter -> awsAppAutoScalingClientAsync.registerScalableTargetAsync(registerScalableTargetRequest, new AsyncHandler<RegisterScalableTargetRequest, RegisterScalableTargetResult>() {
@Override
public void onError(Exception exception) {
logger.error("Register scalable target exception for {} - {}", jobId, exception.getMessage());
awsAppAutoScalingMetrics.registerAwsCreateTargetError(exception);
emitter.onError(exception);
}
@Override
public void onSuccess(RegisterScalableTargetRequest request, RegisterScalableTargetResult registerScalableTargetResult) {
int httpStatusCode = registerScalableTargetResult.getSdkHttpMetadata().getHttpStatusCode();
logger.info("Registered scalable target for success {} - status {}", jobId, httpStatusCode);
awsAppAutoScalingMetrics.registerAwsCreateTargetSuccess();
emitter.onCompleted();
}
}), Emitter.BackpressureMode.NONE)).toCompletable();
}
@Override
public Observable<AutoScalableTarget> getScalableTargetsForJob(String jobId) {
DescribeScalableTargetsRequest describeScalableTargetsRequest = new DescribeScalableTargetsRequest();
describeScalableTargetsRequest.setServiceNamespace(SERVICE_NAMESPACE);
describeScalableTargetsRequest.setScalableDimension(SCALABLE_DIMENSION);
describeScalableTargetsRequest.setResourceIds(Collections.singletonList(
AWSAppAutoScalingUtil.buildGatewayResourceId(jobId,
awsAppScalingConfig.getAWSGatewayEndpointPrefix(),
awsAppScalingConfig.getRegion(),
awsAppScalingConfig.getAWSGatewayEndpointTargetStage())));
return RetryWrapper.wrapWithExponentialRetry(String.format("getScalableTargetsForJob for job %s", jobId),
Observable.create(emitter -> awsAppAutoScalingClientAsync.describeScalableTargetsAsync(describeScalableTargetsRequest,
new AsyncHandler<DescribeScalableTargetsRequest, DescribeScalableTargetsResult>() {
@Override
public void onError(Exception exception) {
logger.error("Get scalable target exception for {} - {}", jobId, exception.getMessage());
awsAppAutoScalingMetrics.registerAwsGetTargetError(exception);
emitter.onError(exception);
}
@Override
public void onSuccess(DescribeScalableTargetsRequest request, DescribeScalableTargetsResult describeScalableTargetsResult) {
awsAppAutoScalingMetrics.registerAwsGetTargetSuccess();
List<ScalableTarget> scalableTargets = describeScalableTargetsResult.getScalableTargets();
scalableTargets.stream()
.map(AWSAppAutoScalingUtil::toAutoScalableTarget)
.forEach(emitter::onNext);
emitter.onCompleted();
}
}), Emitter.BackpressureMode.NONE));
}
@Override
public Observable<String> createOrUpdateScalingPolicy(String policyRefId, String jobId, PolicyConfiguration policyConfiguration) {
PutScalingPolicyRequest putScalingPolicyRequest = new PutScalingPolicyRequest();
putScalingPolicyRequest.setPolicyName(buildScalingPolicyName(policyRefId, jobId));
putScalingPolicyRequest.setPolicyType(policyConfiguration.getPolicyType().name());
putScalingPolicyRequest.setResourceId(
AWSAppAutoScalingUtil.buildGatewayResourceId(jobId,
awsAppScalingConfig.getAWSGatewayEndpointPrefix(),
awsAppScalingConfig.getRegion(),
awsAppScalingConfig.getAWSGatewayEndpointTargetStage()));
putScalingPolicyRequest.setServiceNamespace(SERVICE_NAMESPACE);
putScalingPolicyRequest.setScalableDimension(SCALABLE_DIMENSION);
if (policyConfiguration.getPolicyType() == PolicyType.StepScaling) {
StepScalingPolicyConfiguration stepScalingPolicyConfiguration = new StepScalingPolicyConfiguration();
if (policyConfiguration.getStepScalingPolicyConfiguration().getMetricAggregationType().isPresent()) {
stepScalingPolicyConfiguration.setMetricAggregationType(policyConfiguration.getStepScalingPolicyConfiguration().getMetricAggregationType().get().name());
}
if (policyConfiguration.getStepScalingPolicyConfiguration().getCoolDownSec().isPresent()) {
stepScalingPolicyConfiguration.setCooldown(policyConfiguration.getStepScalingPolicyConfiguration().getCoolDownSec().get());
}
List<com.netflix.titus.api.appscale.model.StepAdjustment> steps = policyConfiguration.getStepScalingPolicyConfiguration().getSteps();
List<StepAdjustment> stepAdjustments = steps.stream()
.map(step -> {
StepAdjustment stepAdjustment = new StepAdjustment();
if (step.getMetricIntervalUpperBound() != null && step.getMetricIntervalUpperBound().isPresent()) {
stepAdjustment.setMetricIntervalUpperBound(step.getMetricIntervalUpperBound().get());
}
if (step.getMetricIntervalLowerBound() != null && step.getMetricIntervalLowerBound().isPresent()) {
stepAdjustment.setMetricIntervalLowerBound(step.getMetricIntervalLowerBound().get());
}
stepAdjustment.setScalingAdjustment(step.getScalingAdjustment());
return stepAdjustment;
})
.collect(Collectors.toList());
stepScalingPolicyConfiguration.setStepAdjustments(stepAdjustments);
if (policyConfiguration.getStepScalingPolicyConfiguration().getAdjustmentType().isPresent()) {
stepScalingPolicyConfiguration.setAdjustmentType(policyConfiguration.getStepScalingPolicyConfiguration().getAdjustmentType().get().name());
}
putScalingPolicyRequest.setStepScalingPolicyConfiguration(stepScalingPolicyConfiguration);
} else if (policyConfiguration.getPolicyType() == PolicyType.TargetTrackingScaling) {
TargetTrackingScalingPolicyConfiguration targetTrackingConfigAws = new TargetTrackingScalingPolicyConfiguration();
TargetTrackingPolicy targetTrackingPolicyInt = policyConfiguration.getTargetTrackingPolicy();
targetTrackingConfigAws.setTargetValue(targetTrackingPolicyInt.getTargetValue());
if (targetTrackingPolicyInt.getDisableScaleIn().isPresent()) {
targetTrackingConfigAws.setDisableScaleIn(targetTrackingPolicyInt.getDisableScaleIn().get());
}
if (targetTrackingPolicyInt.getScaleInCooldownSec().isPresent()) {
targetTrackingConfigAws.setScaleInCooldown(targetTrackingPolicyInt.getScaleInCooldownSec().get());
}
if (targetTrackingPolicyInt.getScaleOutCooldownSec().isPresent()) {
targetTrackingConfigAws.setScaleOutCooldown(targetTrackingPolicyInt.getScaleOutCooldownSec().get());
}
if (targetTrackingPolicyInt.getCustomizedMetricSpecification().isPresent()) {
com.netflix.titus.api.appscale.model.CustomizedMetricSpecification customizedMetricSpecInt =
targetTrackingPolicyInt.getCustomizedMetricSpecification().get();
CustomizedMetricSpecification customizedMetricSpecAws = new CustomizedMetricSpecification();
customizedMetricSpecAws.setDimensions(customizedMetricSpecInt.getMetricDimensionList()
.stream()
.map(metricDimensionInt -> {
MetricDimension metricDimensionAws = new MetricDimension()
.withName(metricDimensionInt.getName())
.withValue(metricDimensionInt.getValue());
return metricDimensionAws;
})
.collect(Collectors.toList()));
customizedMetricSpecAws.setMetricName(customizedMetricSpecInt.getMetricName());
customizedMetricSpecAws.setNamespace(customizedMetricSpecInt.getNamespace());
customizedMetricSpecAws.setStatistic(customizedMetricSpecInt.getStatistic().name());
if (customizedMetricSpecInt.getUnit().isPresent()) {
customizedMetricSpecAws.setUnit(customizedMetricSpecInt.getUnit().get());
}
targetTrackingConfigAws.setCustomizedMetricSpecification(customizedMetricSpecAws);
}
putScalingPolicyRequest.setTargetTrackingScalingPolicyConfiguration(targetTrackingConfigAws);
} else {
return Observable.error(new UnsupportedOperationException(String.format(
"Scaling policy type not supported: jobId=%s, policyRefId=%s, type=%s",
jobId, policyRefId, policyConfiguration.getPolicyType().name()
)));
}
return RetryWrapper.wrapWithExponentialRetry(String.format("createOrUpdateScalingPolicy %s for job %s", policyRefId, jobId),
Observable.create(emitter -> awsAppAutoScalingClientAsync.putScalingPolicyAsync(putScalingPolicyRequest, new AsyncHandler<PutScalingPolicyRequest, PutScalingPolicyResult>() {
@Override
public void onError(Exception exception) {
logger.error("Exception creating scaling policy: jobId={}, policyRefId={}", jobId, policyRefId, exception);
awsAppAutoScalingMetrics.registerAwsCreatePolicyError(exception);
if (exception instanceof ValidationException) {
emitter.onError(AutoScalePolicyException.invalidScalingPolicy(policyRefId, exception.getMessage()));
} else {
emitter.onError(AutoScalePolicyException.errorCreatingPolicy(policyRefId, exception.getMessage()));
}
}
@Override
public void onSuccess(PutScalingPolicyRequest request, PutScalingPolicyResult putScalingPolicyResult) {
String policyARN = putScalingPolicyResult.getPolicyARN();
logger.info("New Scaling policy {} created {} for Job {}", request, policyARN, jobId);
awsAppAutoScalingMetrics.registerAwsCreatePolicySuccess();
emitter.onNext(policyARN);
emitter.onCompleted();
}
}), Emitter.BackpressureMode.NONE));
}
@Override
public Completable deleteScalableTarget(String jobId) {
DeregisterScalableTargetRequest deRegisterRequest = new DeregisterScalableTargetRequest();
deRegisterRequest.setResourceId(
AWSAppAutoScalingUtil.buildGatewayResourceId(jobId,
awsAppScalingConfig.getAWSGatewayEndpointPrefix(),
awsAppScalingConfig.getRegion(),
awsAppScalingConfig.getAWSGatewayEndpointTargetStage()));
deRegisterRequest.setServiceNamespace(SERVICE_NAMESPACE);
deRegisterRequest.setScalableDimension(SCALABLE_DIMENSION);
return RetryWrapper.wrapWithExponentialRetry(String.format("deleteScalableTarget for job %s", jobId),
Observable.create(emitter -> awsAppAutoScalingClientAsync.deregisterScalableTargetAsync(deRegisterRequest, new AsyncHandler<DeregisterScalableTargetRequest, DeregisterScalableTargetResult>() {
@Override
public void onError(Exception exception) {
if (exception instanceof ObjectNotFoundException) {
logger.info("Scalable target does not exist anymore for job {}", jobId);
emitter.onCompleted();
} else {
logger.error("Deregister scalable target exception {} - {}", jobId, exception.getMessage());
awsAppAutoScalingMetrics.registerAwsDeleteTargetError(exception);
emitter.onError(exception);
}
}
@Override
public void onSuccess(DeregisterScalableTargetRequest request, DeregisterScalableTargetResult deregisterScalableTargetResult) {
int httpStatusCode = deregisterScalableTargetResult.getSdkHttpMetadata().getHttpStatusCode();
logger.info("De-registered scalable target for {}, status {}", jobId, httpStatusCode);
awsAppAutoScalingMetrics.registerAwsDeleteTargetSuccess();
emitter.onCompleted();
}
}), Emitter.BackpressureMode.NONE)).toCompletable();
}
@Override
public Completable deleteScalingPolicy(String policyRefId, String jobId) {
DeleteScalingPolicyRequest deleteScalingPolicyRequest = new DeleteScalingPolicyRequest();
deleteScalingPolicyRequest.setResourceId(
AWSAppAutoScalingUtil.buildGatewayResourceId(jobId,
awsAppScalingConfig.getAWSGatewayEndpointPrefix(),
awsAppScalingConfig.getRegion(),
awsAppScalingConfig.getAWSGatewayEndpointTargetStage()));
deleteScalingPolicyRequest.setServiceNamespace(SERVICE_NAMESPACE);
deleteScalingPolicyRequest.setScalableDimension(SCALABLE_DIMENSION);
deleteScalingPolicyRequest.setPolicyName(buildScalingPolicyName(policyRefId, jobId));
return RetryWrapper.wrapWithExponentialRetry(String.format("deleteScalingPolicy %s for job %s", policyRefId, jobId),
Observable.create(emitter -> awsAppAutoScalingClientAsync.deleteScalingPolicyAsync(deleteScalingPolicyRequest, new AsyncHandler<DeleteScalingPolicyRequest, DeleteScalingPolicyResult>() {
@Override
public void onError(Exception exception) {
if (exception instanceof ObjectNotFoundException) {
logger.info("Scaling policy does not exist anymore for job/policyRefId {}/{}", jobId, policyRefId);
emitter.onCompleted();
} else {
logger.error("Delete scaling policy exception {} - {}", jobId, exception.getMessage());
awsAppAutoScalingMetrics.registerAwsDeletePolicyError(exception);
emitter.onError(AutoScalePolicyException.errorDeletingPolicy(policyRefId, exception.getMessage()));
}
}
@Override
public void onSuccess(DeleteScalingPolicyRequest request, DeleteScalingPolicyResult deleteScalingPolicyResult) {
int httpStatusCode = deleteScalingPolicyResult.getSdkHttpMetadata().getHttpStatusCode();
logger.info("Deleted scaling policy for job/policyRefId {}/{}, status - {}", jobId, policyRefId, httpStatusCode);
awsAppAutoScalingMetrics.registerAwsDeletePolicySuccess();
emitter.onCompleted();
}
}), Emitter.BackpressureMode.NONE)).toCompletable();
}
}
| 1,446 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/appscale/AWSAppScalingConfig.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws.appscale;
import com.netflix.archaius.api.annotations.PropertyName;
public interface AWSAppScalingConfig {
@PropertyName(name = "region")
String getRegion();
@PropertyName(name = "netflix.stack")
String getStack();
@PropertyName(name = "aws.gateway.api.prefix")
String getAWSGatewayEndpointPrefix();
@PropertyName(name = "aws.gateway.api.stage")
String getAWSGatewayEndpointTargetStage();
}
| 1,447 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/appscale/ScalableTargetResourceInfo.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws.appscale;
public class ScalableTargetResourceInfo {
private int actualCapacity;
private int desiredCapacity;
private String dimensionName;
private String resourceName;
private String scalableTargetDimensionId;
private String scalingStatus;
private String version;
public ScalableTargetResourceInfo() {}
private ScalableTargetResourceInfo(int actualCapacity, int desiredCapacity,
String dimensionName,
String resourceName,
String scalableTargetDimensionId,
String scalingStatus, String version) {
this.actualCapacity = actualCapacity;
this.desiredCapacity = desiredCapacity;
this.dimensionName = dimensionName;
this.resourceName = resourceName;
this.scalableTargetDimensionId = scalableTargetDimensionId;
this.scalingStatus = scalingStatus;
this.version = version;
}
public int getActualCapacity() {
return actualCapacity;
}
public void setActualCapacity(int actualCapacity) {
this.actualCapacity = actualCapacity;
}
public int getDesiredCapacity() {
return desiredCapacity;
}
public void setDesiredCapacity(int desiredCapacity) {
this.desiredCapacity = desiredCapacity;
}
public String getDimensionName() {
return dimensionName;
}
public void setDimensionName(String dimensionName) {
this.dimensionName = dimensionName;
}
public String getResourceName() {
return resourceName;
}
public void setResourceName(String resourceName) {
this.resourceName = resourceName;
}
public String getScalableTargetDimensionId() {
return scalableTargetDimensionId;
}
public void setScalableTargetDimensionId(String scalableTargetDimensionId) {
this.scalableTargetDimensionId = scalableTargetDimensionId;
}
public String getScalingStatus() {
return scalingStatus;
}
public void setScalingStatus(String scalingStatus) {
this.scalingStatus = scalingStatus;
}
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
@Override
public String toString() {
return "ScalableTargetResourceInfo{" +
"actualCapacity=" + actualCapacity +
", desiredCapacity=" + desiredCapacity +
", dimensionName='" + dimensionName + '\'' +
", resourceName='" + resourceName + '\'' +
", scalableTargetDimensionId='" + scalableTargetDimensionId + '\'' +
", scalingStatus='" + scalingStatus + '\'' +
", version='" + version + '\'' +
'}';
}
public static Builder newBuilder() {
return new Builder();
}
public static class Builder {
private int actualCapacity;
private int desiredCapacity;
private String dimensionName;
private String resourceName;
private String scalableTargetDimensionId;
private String scalingStatus;
private String version;
public Builder actualCapacity(int actualCapacity) {
this.actualCapacity = actualCapacity;
return this;
}
public Builder desiredCapacity(int desiredCapacity) {
this.desiredCapacity = desiredCapacity;
return this;
}
public Builder dimensionName(String dimensionName) {
this.dimensionName = dimensionName;
return this;
}
public Builder resourceName(String resourceName) {
this.resourceName = resourceName;
return this;
}
public Builder scalableTargetDimensionId(String scalableTargetDimensionId) {
this.scalableTargetDimensionId = scalableTargetDimensionId;
return this;
}
public Builder scalingStatus(String scalingStatus) {
this.scalingStatus = scalingStatus;
return this;
}
public Builder version(String version) {
this.version = version;
return this;
}
public ScalableTargetResourceInfo build() {
return new ScalableTargetResourceInfo(actualCapacity, desiredCapacity, dimensionName, resourceName, scalableTargetDimensionId, scalingStatus, version);
}
}
}
| 1,448 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/appscale/AppAutoScalingCallbackSpringResource.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws.appscale;
import javax.inject.Inject;
import com.netflix.titus.runtime.endpoint.common.rest.Responses;
import com.netflix.titus.runtime.endpoint.metadata.spring.CallMetadataAuthentication;
import io.swagger.annotations.Api;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PatchMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
@Api(tags = "Auto scaling")
@RestController
@RequestMapping(path = "/api/v1/scalableTargetDimensions")
public class AppAutoScalingCallbackSpringResource {
private final AppAutoScalingCallbackService awsGatewayCallbackService;
@Inject
public AppAutoScalingCallbackSpringResource(AppAutoScalingCallbackService awsGatewayCallbackService) {
this.awsGatewayCallbackService = awsGatewayCallbackService;
}
@GetMapping(path = "/{scalableTargetDimensionId}", produces = MediaType.APPLICATION_JSON_VALUE)
public ScalableTargetResourceInfo getScalableTargetResourceInfo(@PathVariable("scalableTargetDimensionId") String jobId,
CallMetadataAuthentication authentication) {
return Responses.fromSingleValueObservable(awsGatewayCallbackService.getScalableTargetResourceInfo(jobId, authentication.getCallMetadata()));
}
@PatchMapping(path = "/{scalableTargetDimensionId}", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE)
public ResponseEntity<ScalableTargetResourceInfo> setScalableTargetResourceInfo(@PathVariable("scalableTargetDimensionId") String jobId,
@RequestBody ScalableTargetResourceInfo scalableTargetResourceInfo,
CallMetadataAuthentication authentication) {
if (scalableTargetResourceInfo.getDesiredCapacity() < 0) {
return ResponseEntity.status(HttpStatus.BAD_REQUEST).build();
}
ScalableTargetResourceInfo updatedScalableTargetResourceInfo = Responses.fromSingleValueObservable(awsGatewayCallbackService.setScalableTargetResourceInfo(jobId, scalableTargetResourceInfo, authentication.getCallMetadata()));
return ResponseEntity.status(HttpStatus.OK.value()).body(updatedScalableTargetResourceInfo);
}
}
| 1,449 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/appscale/DefaultAppAutoScalingCallbackService.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws.appscale;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.google.protobuf.UInt32Value;
import com.netflix.titus.api.jobmanager.service.JobManagerException;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.grpc.protogen.Job;
import com.netflix.titus.grpc.protogen.JobCapacityUpdateWithOptionalAttributes;
import com.netflix.titus.grpc.protogen.JobCapacityWithOptionalAttributes;
import com.netflix.titus.grpc.protogen.JobStatus;
import com.netflix.titus.grpc.protogen.Page;
import com.netflix.titus.grpc.protogen.ServiceJobSpec;
import com.netflix.titus.grpc.protogen.TaskQuery;
import com.netflix.titus.runtime.jobmanager.gateway.JobServiceGateway;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
@Singleton
public class DefaultAppAutoScalingCallbackService implements AppAutoScalingCallbackService {
private static final Logger logger = LoggerFactory.getLogger(DefaultAppAutoScalingCallbackService.class);
private final JobServiceGateway jobServiceGateway;
private static final String DIMENSION_NAME = "custom-resource:ResourceType:Property";
public enum ScalingStatus {
Pending,
InProgress,
Successful
}
@Inject
public DefaultAppAutoScalingCallbackService(JobServiceGateway jobServiceGateway) {
this.jobServiceGateway = jobServiceGateway;
}
@Override
public Observable<ScalableTargetResourceInfo> getScalableTargetResourceInfo(String jobId, CallMetadata callMetadata) {
TaskQuery taskQuery = TaskQuery.newBuilder()
.putFilteringCriteria("jobIds", jobId)
.putFilteringCriteria("taskStates", "Started")
.setPage(Page.newBuilder().setPageSize(1).build()).build();
return jobServiceGateway.findTasks(taskQuery, callMetadata)
.map(taskQueryResult -> taskQueryResult.getPagination().getTotalItems())
.flatMap(numStartedTasks -> jobServiceGateway.findJob(jobId, callMetadata).map(job -> Pair.of(job, numStartedTasks)))
.flatMap(jobTasksPair -> {
Job job = jobTasksPair.getLeft();
Integer numRunningTasks = jobTasksPair.getRight();
if (!job.getJobDescriptor().hasService()) {
return Observable.error(JobManagerException.notServiceJob(jobId));
}
ServiceJobSpec jobSpec = job.getJobDescriptor().getService();
ScalableTargetResourceInfo.Builder scalableTargetResourceInfoBuilder = ScalableTargetResourceInfo.newBuilder()
.actualCapacity(jobTasksPair.getRight())
.desiredCapacity(jobSpec.getCapacity().getDesired())
.dimensionName(DIMENSION_NAME)
.resourceName(jobId)
.scalableTargetDimensionId(jobId)
.version(buildVersion(job));
if (jobSpec.getCapacity().getDesired() != numRunningTasks) {
scalableTargetResourceInfoBuilder.scalingStatus(ScalingStatus.InProgress.name());
} else {
scalableTargetResourceInfoBuilder.scalingStatus(ScalingStatus.Successful.name());
}
return Observable.just(scalableTargetResourceInfoBuilder.build());
});
}
@Override
public Observable<ScalableTargetResourceInfo> setScalableTargetResourceInfo(String jobId,
ScalableTargetResourceInfo scalableTargetResourceInfo,
CallMetadata callMetadata) {
logger.info("(BEFORE setting job instances) for jobId {} :: {}", jobId, scalableTargetResourceInfo);
JobCapacityWithOptionalAttributes jobCapacityWithOptionalAttributes = JobCapacityWithOptionalAttributes.newBuilder()
.setDesired(UInt32Value.newBuilder().setValue(scalableTargetResourceInfo.getDesiredCapacity()).build()).build();
JobCapacityUpdateWithOptionalAttributes jobCapacityRequest = JobCapacityUpdateWithOptionalAttributes.newBuilder()
.setJobId(jobId)
.setJobCapacityWithOptionalAttributes(jobCapacityWithOptionalAttributes).build();
return jobServiceGateway.updateJobCapacityWithOptionalAttributes(jobCapacityRequest, callMetadata)
.andThen(getScalableTargetResourceInfo(jobId, callMetadata).map(scalableTargetResourceInfoReturned -> {
scalableTargetResourceInfoReturned.setScalingStatus(ScalingStatus.Pending.name());
logger.info("(set job instances) Returning value Instances for jobId {} :: {}", jobId, scalableTargetResourceInfo);
return scalableTargetResourceInfoReturned;
}));
}
private String buildVersion(Job job) {
List<JobStatus> jobStatusList = new ArrayList<>(job.getStatusHistoryList());
jobStatusList.add(job.getStatus());
Optional<String> timeStampStr = jobStatusList.stream()
.filter(jobStatus -> jobStatus.getState() == JobStatus.JobState.Accepted)
.findFirst()
.map(jobStatus -> String.valueOf(jobStatus.getTimestamp()));
if (timeStampStr.isPresent()) {
return timeStampStr.get();
}
// Returning NoVersion should not have any negative side effect since
// application auto scaling won't actually need this value to
// differentiate between Titus Jobs, because we don't reuse Job IDs.
logger.error("Titus Job {} is missing an Accepted timestamp!", job);
return "NoVersion";
}
}
| 1,450 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/appscale/AWSAppAutoScalingUtil.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws.appscale;
import com.amazonaws.services.applicationautoscaling.model.ScalableTarget;
import com.netflix.titus.api.appscale.model.AutoScalableTarget;
public class AWSAppAutoScalingUtil {
public static String buildGatewayResourceId(String jobId, String awsGatewayEndpointPrefix, String region,
String targetStage) {
return String.format("https://%s.execute-api.%s.amazonaws.com/%s/scalableTargetDimensions/%s",
awsGatewayEndpointPrefix, region, targetStage, jobId);
}
public static String buildScalingPolicyName(String policyRefId, String jobId) {
return String.format("%s/%s", jobId, policyRefId);
}
public static AutoScalableTarget toAutoScalableTarget(ScalableTarget scalableTarget) {
return AutoScalableTarget.newBuilder()
.withResourceId(scalableTarget.getResourceId())
.withMinCapacity(scalableTarget.getMinCapacity())
.withMaxCapacity(scalableTarget.getMaxCapacity())
.build();
}
}
| 1,451 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/appscale/AppAutoScalingCallbackResource.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws.appscale;
import javax.inject.Inject;
import javax.inject.Singleton;
import javax.ws.rs.Consumes;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import com.netflix.titus.api.jobmanager.service.JobManagerConstants;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import com.netflix.titus.runtime.endpoint.common.rest.Responses;
import com.netflix.titus.runtime.endpoint.metadata.CallMetadataResolver;
import io.swagger.annotations.Api;
import io.swagger.jaxrs.PATCH;
@Produces(MediaType.APPLICATION_JSON)
@Consumes(MediaType.APPLICATION_JSON)
@Api(tags = "Auto scaling")
@Path("/v1/scalableTargetDimensions")
@Singleton
public class AppAutoScalingCallbackResource {
private final AppAutoScalingCallbackService awsGatewayCallbackService;
private final CallMetadataResolver callMetadataResolver;
@Inject
public AppAutoScalingCallbackResource(AppAutoScalingCallbackService awsGatewayCallbackService,
CallMetadataResolver callMetadataResolver) {
this.awsGatewayCallbackService = awsGatewayCallbackService;
this.callMetadataResolver = callMetadataResolver;
}
@Path("{scalableTargetDimensionId}")
@GET
@Produces({MediaType.APPLICATION_JSON})
public ScalableTargetResourceInfo getScalableTargetResourceInfo(@PathParam("scalableTargetDimensionId") String jobId) {
return Responses.fromSingleValueObservable(awsGatewayCallbackService.getScalableTargetResourceInfo(jobId, resolveCallMetadata()));
}
@Path("{scalableTargetDimensionId}")
@PATCH
@Produces({MediaType.APPLICATION_JSON})
@Consumes({MediaType.APPLICATION_JSON})
public ScalableTargetResourceInfo setScalableTargetResourceInfo(@PathParam("scalableTargetDimensionId") String jobId, ScalableTargetResourceInfo scalableTargetResourceInfo) {
if (scalableTargetResourceInfo.getDesiredCapacity() < 0) {
throw new WebApplicationException(Response.Status.BAD_REQUEST);
}
return Responses.fromSingleValueObservable(awsGatewayCallbackService.setScalableTargetResourceInfo(jobId, scalableTargetResourceInfo, resolveCallMetadata()));
}
private CallMetadata resolveCallMetadata() {
return callMetadataResolver.resolve().orElse(JobManagerConstants.UNDEFINED_CALL_METADATA);
}
}
| 1,452 |
0 | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws | Create_ds/titus-control-plane/titus-ext/aws/src/main/java/com/netflix/titus/ext/aws/supervisor/AsgLocalMasterReadinessResolver.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.aws.supervisor;
import java.time.Duration;
import java.util.Collections;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Singleton;
import com.amazonaws.services.autoscaling.AmazonAutoScalingAsync;
import com.amazonaws.services.autoscaling.model.AutoScalingGroup;
import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsRequest;
import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsResult;
import com.amazonaws.services.autoscaling.model.TagDescription;
import com.netflix.titus.api.supervisor.model.ReadinessState;
import com.netflix.titus.api.supervisor.model.ReadinessStatus;
import com.netflix.titus.api.supervisor.service.LocalMasterReadinessResolver;
import com.netflix.titus.api.supervisor.service.resolver.PollingLocalMasterReadinessResolver;
import com.netflix.titus.common.framework.scheduler.model.ScheduleDescriptor;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.retry.Retryers;
import com.netflix.titus.common.util.time.Clock;
import com.netflix.titus.ext.aws.AwsConfiguration;
import com.netflix.titus.ext.aws.AwsReactorExt;
import com.netflix.titus.ext.aws.ControlPlaneAmazonAutoScalingAsyncProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Scheduler;
import reactor.core.scheduler.Schedulers;
@Singleton
public class AsgLocalMasterReadinessResolver implements LocalMasterReadinessResolver {
public static final String TAG_MASTER_ENABLED = "titus.masterEnabled";
private static final Logger logger = LoggerFactory.getLogger(AsgLocalMasterReadinessResolver.class);
static final ScheduleDescriptor REFRESH_SCHEDULER_DESCRIPTOR = ScheduleDescriptor.newBuilder()
.withName(AsgLocalMasterReadinessResolver.class.getSimpleName())
.withDescription("Local Master state resolution from ASG metadata")
.withInitialDelay(Duration.ZERO)
.withInterval(Duration.ofSeconds(5))
.withRetryerSupplier(() -> Retryers.exponentialBackoff(100, 1_000, TimeUnit.MILLISECONDS, 5))
.withTimeout(Duration.ofSeconds(5))
.withOnErrorHandler((action, error) -> {
logger.warn("Cannot read master ASG data: {}", error.getMessage());
logger.debug(error.getMessage(), error);
})
.build();
private final AwsConfiguration configuration;
private final AmazonAutoScalingAsync autoScalingClient;
private final Clock clock;
private final PollingLocalMasterReadinessResolver poller;
private final AtomicReference<String> lastTagValue = new AtomicReference<>();
@Inject
public AsgLocalMasterReadinessResolver(AwsConfiguration configuration,
@Named(ControlPlaneAmazonAutoScalingAsyncProvider.NAME) AmazonAutoScalingAsync autoScalingClient,
TitusRuntime titusRuntime) {
this(configuration, autoScalingClient, REFRESH_SCHEDULER_DESCRIPTOR, titusRuntime, Schedulers.parallel());
}
public AsgLocalMasterReadinessResolver(AwsConfiguration configuration,
AmazonAutoScalingAsync autoScalingClient,
ScheduleDescriptor scheduleDescriptor,
TitusRuntime titusRuntime,
Scheduler scheduler) {
this.configuration = configuration;
this.autoScalingClient = autoScalingClient;
this.clock = titusRuntime.getClock();
this.poller = PollingLocalMasterReadinessResolver.newPollingResolver(
refresh(),
scheduleDescriptor,
titusRuntime,
scheduler
);
}
@PreDestroy
public void shutdown() {
poller.shutdown();
}
@Override
public Flux<ReadinessStatus> observeLocalMasterReadinessUpdates() {
return poller.observeLocalMasterReadinessUpdates();
}
private Mono<ReadinessStatus> refresh() {
return AwsReactorExt
.<DescribeAutoScalingGroupsRequest, DescribeAutoScalingGroupsResult>toMono(
() -> {
DescribeAutoScalingGroupsRequest request = new DescribeAutoScalingGroupsRequest();
request.setAutoScalingGroupNames(Collections.singletonList(configuration.getTitusMasterAsgName()));
return request;
},
autoScalingClient::describeAutoScalingGroupsAsync
)
.map(this::resolveStatus);
}
private ReadinessStatus resolveStatus(DescribeAutoScalingGroupsResult response) {
ReadinessState effectiveState = null;
String message = null;
if (response.getAutoScalingGroups().size() < 1) {
setNewTagValue("");
effectiveState = ReadinessState.Disabled;
message = "ASG not found: " + configuration.getTitusMasterAsgName();
} else {
AutoScalingGroup autoScalingGroup = response.getAutoScalingGroups().get(0);
if (autoScalingGroup.getTags() != null) {
for (TagDescription tagDescription : autoScalingGroup.getTags()) {
if (TAG_MASTER_ENABLED.equals(tagDescription.getKey())) {
String value = tagDescription.getValue();
setNewTagValue(value);
boolean enabled = Boolean.valueOf(value);
effectiveState = enabled ? ReadinessState.Enabled : ReadinessState.Disabled;
message = "Set as tag on ASG: " + configuration.getTitusMasterAsgName();
}
}
}
if (effectiveState == null) {
setNewTagValue("");
effectiveState = ReadinessState.Disabled;
message = String.format("ASG tag %s not found: %s", TAG_MASTER_ENABLED, configuration.getTitusMasterAsgName());
}
}
return ReadinessStatus.newBuilder()
.withState(effectiveState)
.withMessage(message)
.withTimestamp(clock.wallTime())
.build();
}
private void setNewTagValue(String newValue) {
if (!newValue.equals(lastTagValue.get())) {
logger.info("New master state tag value: previous={}, new={}", lastTagValue.get(), newValue);
}
lastTagValue.set(newValue);
}
}
| 1,453 |
0 | Create_ds/titus-control-plane/titus-ext/job-validator/src/test/java/com/netflix/titus/ext/jobvalidator | Create_ds/titus-control-plane/titus-ext/job-validator/src/test/java/com/netflix/titus/ext/jobvalidator/s3/ReactorValidationServiceClientTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.jobvalidator.s3;
import java.time.Duration;
import com.netflix.compute.validator.protogen.ValidationServiceGrpc;
import com.netflix.titus.common.util.grpc.reactor.client.ReactorToGrpcClientBuilder;
import io.grpc.Channel;
import org.junit.Test;
import org.mockito.Mockito;
import static org.assertj.core.api.Assertions.assertThat;
public class ReactorValidationServiceClientTest {
@Test
public void testMatchesGrpcApi() {
ReactorValidationServiceClient client = ReactorToGrpcClientBuilder
.newBuilderWithDefaults(ReactorValidationServiceClient.class,
ValidationServiceGrpc.newStub(Mockito.mock(Channel.class)),
ValidationServiceGrpc.getServiceDescriptor(),
Void.class
)
.withTimeout(Duration.ofSeconds(1))
.build();
assertThat(client).isNotNull();
}
} | 1,454 |
0 | Create_ds/titus-control-plane/titus-ext/job-validator/src/test/java/com/netflix/titus/ext/jobvalidator | Create_ds/titus-control-plane/titus-ext/job-validator/src/test/java/com/netflix/titus/ext/jobvalidator/s3/JobS3LogLocationValidatorTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.jobvalidator.s3;
import java.util.Set;
import java.util.function.Function;
import com.netflix.compute.validator.protogen.ComputeValidator;
import com.netflix.compute.validator.protogen.ComputeValidator.S3BucketAccessValidationResponse;
import com.netflix.titus.api.jobmanager.JobAttributes;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.common.model.sanitizer.ValidationError;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.testkit.model.job.JobDescriptorGenerator;
import org.junit.Test;
import reactor.core.publisher.Mono;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class JobS3LogLocationValidatorTest {
private static final JobDescriptor<?> JOB_WITH_DEFAULT_BUCKET = JobDescriptorGenerator.oneTaskBatchJobDescriptor();
private static final JobDescriptor<?> JOB_WITH_CUSTOM_BUCKET = JobDescriptorGenerator.oneTaskBatchJobDescriptor().toBuilder()
.withContainer(JOB_WITH_DEFAULT_BUCKET.getContainer().toBuilder()
.withAttributes(CollectionsExt.asMap(
JobAttributes.JOB_CONTAINER_ATTRIBUTE_S3_BUCKET_NAME, "junitBucket",
JobAttributes.JOB_CONTAINER_ATTRIBUTE_S3_PATH_PREFIX, "junitPrefix"
))
.build()
)
.build();
private final TitusRuntime titusRuntime = TitusRuntimes.internal();
private final ReactorValidationServiceClient validationClient = mock(ReactorValidationServiceClient.class);
private final JobS3LogLocationValidator validator = new JobS3LogLocationValidator(
validationClient,
"defaultBucket",
"defaultPrefix",
Function.identity(),
() -> ValidationError.Type.SOFT,
() -> true,
titusRuntime
);
@Test
public void testNoCustomBucket() {
when(validationClient.validateS3BucketAccess(any())).thenReturn(
Mono.just(S3BucketAccessValidationResponse.getDefaultInstance())
);
Set<ValidationError> result = validator.validate(JOB_WITH_DEFAULT_BUCKET).block();
assertThat(result).isEmpty();
verify(validationClient, times(0)).validateS3BucketAccess(any());
}
@Test
public void testValidCustomBucket() {
when(validationClient.validateS3BucketAccess(any())).thenReturn(
Mono.just(S3BucketAccessValidationResponse.getDefaultInstance())
);
Set<ValidationError> result = validator.validate(JOB_WITH_CUSTOM_BUCKET).block();
assertThat(result).isEmpty();
verify(validationClient, times(1)).validateS3BucketAccess(any());
}
@Test
public void testInValidCustomBucket() {
when(validationClient.validateS3BucketAccess(any())).thenReturn(
Mono.just(S3BucketAccessValidationResponse.newBuilder()
.setFailures(ComputeValidator.ValidationFailures.newBuilder()
.addFailures(ComputeValidator.ValidationFailure.getDefaultInstance())
.build()
)
.build()
)
);
Set<ValidationError> result = validator.validate(JOB_WITH_CUSTOM_BUCKET).block();
assertThat(result).hasSize(1);
verify(validationClient, times(1)).validateS3BucketAccess(any());
}
} | 1,455 |
0 | Create_ds/titus-control-plane/titus-ext/job-validator/src/test/java/com/netflix/titus/ext/jobvalidator | Create_ds/titus-control-plane/titus-ext/job-validator/src/test/java/com/netflix/titus/ext/jobvalidator/ebs/JobEbsVolumeSanitizerTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.jobvalidator.ebs;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import com.netflix.compute.validator.protogen.ComputeValidator;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.ebs.EbsVolume;
import com.netflix.titus.api.jobmanager.service.JobManagerException;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.ext.jobvalidator.s3.ReactorValidationServiceClient;
import com.netflix.titus.testkit.model.job.JobDescriptorGenerator;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import org.junit.Before;
import org.junit.Test;
import reactor.core.publisher.Mono;
import reactor.test.StepVerifier;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class JobEbsVolumeSanitizerTest {
private static final EbsVolume EBS_VOLUME_A = EbsVolume.newBuilder()
.withVolumeId("vol-a")
.withMountPath("/a")
.withMountPermissions(EbsVolume.MountPerm.RW)
.withFsType("xfs")
.build();
private static final EbsVolume EBS_VOLUME_B = EbsVolume.newBuilder()
.withVolumeId("vol-b")
.withMountPath("/b")
.withMountPermissions(EbsVolume.MountPerm.RW)
.withFsType("xfs")
.build();
private static final List<EbsVolume> EBS_VOLUMES = Arrays.asList(EBS_VOLUME_B, EBS_VOLUME_A);
private static final JobDescriptor<?> JOB_WITH_NO_EBS_VOLUMES = JobDescriptorGenerator.oneTaskBatchJobDescriptor();
private static final JobDescriptor<?> JOB_WITH_DEFAULT_MULTIPLE_EBS_VOLUMES = JOB_WITH_NO_EBS_VOLUMES.toBuilder()
.withContainer(JOB_WITH_NO_EBS_VOLUMES.getContainer().toBuilder()
.withContainerResources(JOB_WITH_NO_EBS_VOLUMES.getContainer().getContainerResources().toBuilder()
.withEbsVolumes(EBS_VOLUMES)
.build())
.build())
.build();
private final ReactorValidationServiceClient validationClient = mock(ReactorValidationServiceClient.class);
private final JobEbsVolumeSanitizerConfiguration configuration = mock(JobEbsVolumeSanitizerConfiguration.class);
private final JobEbsVolumeSanitizer sanitizer = new JobEbsVolumeSanitizer(configuration, validationClient, TitusRuntimes.internal());
@Before
public void setUp() {
when(configuration.isEnabled()).thenReturn(true);
when(configuration.getJobEbsSanitizationTimeoutMs()).thenReturn(5_000L);
}
/**
* Tests that EBS volume metadata returned by the validator service gets set properly
* on the sanitized job.
*/
@Test
public void testMetadataAdded() {
ComputeValidator.EbsVolumeValidationRequest request_a = ComputeValidator.EbsVolumeValidationRequest.newBuilder()
.setEbsVolumeId(EBS_VOLUME_A.getVolumeId())
.build();
String azA = "us-east-1a";
int sizeA = 5;
EbsVolume sanitizedEbsVolumeA = EBS_VOLUME_A.toBuilder()
.withVolumeAvailabilityZone(azA)
.withVolumeCapacityGB(sizeA)
.build();
ComputeValidator.EbsVolumeValidationResponse response_a = ComputeValidator.EbsVolumeValidationResponse.newBuilder()
.setSuccess(ComputeValidator.EbsVolumeValidationResponse.Success.newBuilder()
.setEbsVolumeAvailabilityZone(azA)
.setEbsVolumeCapacityGB(sizeA)
.build())
.build();
ComputeValidator.EbsVolumeValidationRequest request_b = ComputeValidator.EbsVolumeValidationRequest.newBuilder()
.setEbsVolumeId(EBS_VOLUME_B.getVolumeId())
.build();
String azB = "us-east-1b";
int sizeB = 10;
EbsVolume sanitizedEbsVolumeB = EBS_VOLUME_B.toBuilder()
.withVolumeAvailabilityZone(azB)
.withVolumeCapacityGB(sizeB)
.build();
ComputeValidator.EbsVolumeValidationResponse response_b = ComputeValidator.EbsVolumeValidationResponse.newBuilder()
.setSuccess(ComputeValidator.EbsVolumeValidationResponse.Success.newBuilder()
.setEbsVolumeAvailabilityZone(azB)
.setEbsVolumeCapacityGB(sizeB)
.build())
.build();
when(validationClient.validateEbsVolume(request_a)).thenReturn(Mono.just(response_a));
when(validationClient.validateEbsVolume(request_b)).thenReturn(Mono.just(response_b));
StepVerifier.create(sanitizer.sanitize(JOB_WITH_DEFAULT_MULTIPLE_EBS_VOLUMES))
.expectNextMatches(operator ->
jobContainsVolumes(operator.apply(JOB_WITH_DEFAULT_MULTIPLE_EBS_VOLUMES),
// Provide volumes in expected order (which should match original order)
Arrays.asList(sanitizedEbsVolumeB, sanitizedEbsVolumeA)))
.verifyComplete();
}
/**
* Test that a job with no EBS volumes is properly handled.
*/
@Test
public void testNoVolumes() {
StepVerifier.create(sanitizer.sanitize(JOB_WITH_NO_EBS_VOLUMES))
.expectNextMatches(operator -> operator.apply(JOB_WITH_NO_EBS_VOLUMES)
.getContainer().getContainerResources().getEbsVolumes().isEmpty())
.verifyComplete();
}
/**
* Tests that validation service error responses are handled properly.
*/
@Test
public void testValidatorError() {
when(validationClient.validateEbsVolume(any())).thenReturn(Mono.error(new StatusRuntimeException(Status.INTERNAL)));
StepVerifier.create(sanitizer.sanitize(JOB_WITH_DEFAULT_MULTIPLE_EBS_VOLUMES))
.expectErrorMatches(throwable -> throwable instanceof IllegalArgumentException &&
throwable.getMessage().contains("EBS volume validation error"))
.verify();
}
/**
* Tests that validations that are unsuccessful/failures are handled properly.
*/
@Test
public void testValidationFailure() {
ComputeValidator.EbsVolumeValidationResponse response = ComputeValidator.EbsVolumeValidationResponse.newBuilder()
.setFailures(ComputeValidator.ValidationFailures.newBuilder()
.addFailures(ComputeValidator.ValidationFailure.newBuilder()
.setErrorCode("notFound")
.setErrorMessage("Volume not found")
.build())
.build())
.build();
when(validationClient.validateEbsVolume(any())).thenReturn(Mono.just(response));
StepVerifier.create(sanitizer.sanitize(JOB_WITH_DEFAULT_MULTIPLE_EBS_VOLUMES))
.expectErrorMatches(throwable -> throwable instanceof JobManagerException &&
throwable.getMessage().contains("Job has invalid EBS volume"))
.verify();
}
private boolean jobContainsVolumes(JobDescriptor<?> jobDescriptor, List<EbsVolume> ebsVolumes) {
return jobDescriptor.getContainer().getContainerResources().getEbsVolumes().equals(ebsVolumes);
}
}
| 1,456 |
0 | Create_ds/titus-control-plane/titus-ext/job-validator/src/main/java/com/netflix/titus/ext/jobvalidator | Create_ds/titus-control-plane/titus-ext/job-validator/src/main/java/com/netflix/titus/ext/jobvalidator/s3/ReactorValidationServiceClient.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.jobvalidator.s3;
import com.netflix.compute.validator.protogen.ComputeValidator.EbsVolumeValidationRequest;
import com.netflix.compute.validator.protogen.ComputeValidator.EbsVolumeValidationResponse;
import com.netflix.compute.validator.protogen.ComputeValidator.IamRoleValidationRequest;
import com.netflix.compute.validator.protogen.ComputeValidator.IamRoleValidationResponse;
import com.netflix.compute.validator.protogen.ComputeValidator.ImageValidationRequest;
import com.netflix.compute.validator.protogen.ComputeValidator.ImageValidationResponse;
import com.netflix.compute.validator.protogen.ComputeValidator.S3BucketAccessValidationRequest;
import com.netflix.compute.validator.protogen.ComputeValidator.S3BucketAccessValidationResponse;
import com.netflix.compute.validator.protogen.ComputeValidator.SecurityGroupValidationRequest;
import com.netflix.compute.validator.protogen.ComputeValidator.SecurityGroupValidationResponse;
import reactor.core.publisher.Mono;
public interface ReactorValidationServiceClient {
// Validates the given security group. If valid, returns all available representations.
Mono<SecurityGroupValidationResponse> validateSecurityGroup(SecurityGroupValidationRequest request);
// Validates IAM role.
Mono<IamRoleValidationResponse> validateIamRole(IamRoleValidationRequest request);
// Checks access rights to a bucket with the give IAM role.
Mono<S3BucketAccessValidationResponse> validateS3BucketAccess(S3BucketAccessValidationRequest request);
// Validates image.
Mono<ImageValidationResponse> validateImage(ImageValidationRequest request);
// Validate EBS volume IDs.
Mono<EbsVolumeValidationResponse> validateEbsVolume(EbsVolumeValidationRequest request);
}
| 1,457 |
0 | Create_ds/titus-control-plane/titus-ext/job-validator/src/main/java/com/netflix/titus/ext/jobvalidator | Create_ds/titus-control-plane/titus-ext/job-validator/src/main/java/com/netflix/titus/ext/jobvalidator/s3/JobS3LogLocationValidator.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.jobvalidator.s3;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.function.Function;
import java.util.function.Supplier;
import com.netflix.compute.validator.protogen.ComputeValidator;
import com.netflix.compute.validator.protogen.ComputeValidator.ValidationFailure;
import com.netflix.titus.api.jobmanager.JobAttributes;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.LogStorageInfos;
import com.netflix.titus.common.model.admission.AdmissionValidator;
import com.netflix.titus.common.model.admission.ValidatorMetrics;
import com.netflix.titus.common.model.sanitizer.ValidationError;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.StringExt;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import reactor.core.publisher.Mono;
public class JobS3LogLocationValidator implements AdmissionValidator<JobDescriptor> {
private static final Logger logger = LoggerFactory.getLogger(JobS3LogLocationValidator.class);
private static final String VALIDATOR_PATH = "/titusS3AccessValidator";
private static final String REASON_ACCESS_DENIED = "accessDenied";
private static final long RETRY_COUNT = 3;
private final ReactorValidationServiceClient validationClient;
private final String defaultBucket;
private final String defaultPathPrefix;
private final Function<String, String> iamRoleArnResolver;
private final Supplier<ValidationError.Type> validationErrorTypeProvider;
private final Supplier<Boolean> enabledSupplier;
private final ValidatorMetrics metrics;
public JobS3LogLocationValidator(ReactorValidationServiceClient validationClient,
String defaultBucket,
String defaultPathPrefix,
Function<String, String> iamRoleArnResolver,
Supplier<ValidationError.Type> validationErrorTypeProvider,
Supplier<Boolean> enabledSupplier,
TitusRuntime titusRuntime) {
this.validationClient = validationClient;
this.defaultBucket = defaultBucket;
this.defaultPathPrefix = defaultPathPrefix;
this.iamRoleArnResolver = iamRoleArnResolver;
this.validationErrorTypeProvider = validationErrorTypeProvider;
this.enabledSupplier = enabledSupplier;
this.metrics = new ValidatorMetrics(JobS3LogLocationValidator.class.getSimpleName(), titusRuntime.getRegistry());
}
@Override
public Mono<Set<ValidationError>> validate(JobDescriptor jobDescriptor) {
if (!enabledSupplier.get()) {
metrics.incrementValidationSkipped(ValidatorMetrics.REASON_DISABLED);
return Mono.just(Collections.emptySet());
}
LogStorageInfos.S3Bucket s3BucketInfo = LogStorageInfos.findCustomS3Bucket(jobDescriptor).orElse(null);
String customPrefix = LogStorageInfos.findCustomPathPrefix(jobDescriptor).orElse(null);
if (s3BucketInfo == null && customPrefix == null) {
metrics.incrementValidationSkipped(ValidatorMetrics.REASON_NOT_APPLICABLE);
return Mono.just(Collections.emptySet());
}
String bucketName = s3BucketInfo == null ? defaultBucket : s3BucketInfo.getBucketName();
String pathPrefix = LogStorageInfos.buildPathPrefix(
customPrefix == null ? defaultPathPrefix : LogStorageInfos.buildPathPrefix(customPrefix, defaultPathPrefix),
VALIDATOR_PATH
);
String iamRole = jobDescriptor.getContainer().getSecurityProfile().getIamRole();
// This condition should never happen, but we are adding this check here just in case.
if (StringExt.isEmpty(iamRole)) {
metrics.incrementValidationError(bucketName, REASON_ACCESS_DENIED);
return Mono.just(Collections.singleton(new ValidationError("iamRole", "IAM role not set")));
}
iamRole = iamRoleArnResolver.apply(iamRole);
Mono<Set<ValidationError>> action = validationClient.validateS3BucketAccess(
ComputeValidator.S3BucketAccessValidationRequest.newBuilder()
.setBucket(bucketName)
.setBucketPrefix(pathPrefix)
.setIamRole(iamRole)
.build()
).map(result -> {
if (result.getResultCase() == ComputeValidator.S3BucketAccessValidationResponse.ResultCase.FAILURES) {
List<ValidationFailure> failures = result.getFailures().getFailuresList();
if (!failures.isEmpty()) {
metrics.incrementValidationError(bucketName, REASON_ACCESS_DENIED);
return toValidationError(failures);
}
}
metrics.incrementValidationSuccess(bucketName);
return Collections.emptySet();
});
return action.retry(RETRY_COUNT)
.onErrorMap(error -> {
logger.warn("S3 validation failure: {}", error.getMessage());
logger.debug("Stack trace", error);
metrics.incrementValidationError(bucketName, error.getClass().getSimpleName());
return new IllegalArgumentException(String.format("S3 bucket validation error: bucket=%s, pathPrefix=%s, error=%s",
bucketName,
pathPrefix,
error.getMessage()
), error);
});
}
@Override
public ValidationError.Type getErrorType() {
return validationErrorTypeProvider.get();
}
private Set<ValidationError> toValidationError(List<ValidationFailure> failures) {
Set<ValidationError> result = new HashSet<>();
failures.forEach(failure -> {
result.add(new ValidationError(
JobAttributes.JOB_CONTAINER_ATTRIBUTE_S3_BUCKET_NAME,
String.format("Access denied: errorCode=%s, errorMessage=%s", failure.getErrorCode(), failure.getErrorMessage())
));
});
return result;
}
}
| 1,458 |
0 | Create_ds/titus-control-plane/titus-ext/job-validator/src/main/java/com/netflix/titus/ext/jobvalidator | Create_ds/titus-control-plane/titus-ext/job-validator/src/main/java/com/netflix/titus/ext/jobvalidator/ebs/JobEbsVolumeSanitizer.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.jobvalidator.ebs;
import java.time.Duration;
import java.util.List;
import java.util.function.Function;
import java.util.function.UnaryOperator;
import java.util.stream.Collectors;
import javax.inject.Inject;
import com.netflix.compute.validator.protogen.ComputeValidator;
import com.netflix.titus.api.jobmanager.JobAttributes;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.ebs.EbsVolume;
import com.netflix.titus.api.jobmanager.service.JobManagerException;
import com.netflix.titus.common.model.admission.AdmissionSanitizer;
import com.netflix.titus.common.model.admission.ValidatorMetrics;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.ext.jobvalidator.s3.ReactorValidationServiceClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Schedulers;
/**
* This {@link com.netflix.titus.common.model.admission.AdmissionSanitizer} sanitizes Job EBS volume
* information. Sanitization adds required EBS volume metadata (e.g., AZ, capacity, etc...) that are
* retrieved from the validation service.
*/
public class JobEbsVolumeSanitizer implements AdmissionSanitizer<JobDescriptor> {
private static final Logger logger = LoggerFactory.getLogger(JobEbsVolumeSanitizer.class);
private static final long RETRY_COUNT = 5;
private final JobEbsVolumeSanitizerConfiguration configuration;
private final ReactorValidationServiceClient validationClient;
private final ValidatorMetrics metrics;
@Inject
public JobEbsVolumeSanitizer(JobEbsVolumeSanitizerConfiguration configuration,
ReactorValidationServiceClient validationClient,
TitusRuntime titusRuntime) {
this.configuration = configuration;
this.validationClient = validationClient;
this.metrics = new ValidatorMetrics(JobEbsVolumeSanitizer.class.getSimpleName(), titusRuntime.getRegistry());
}
/**
* @return a {@link UnaryOperator} that adds sanitized EBS volume List.
*/
@Override
public Mono<UnaryOperator<JobDescriptor>> sanitize(JobDescriptor jobDescriptor) {
if (!configuration.isEnabled()) {
metrics.incrementValidationSkipped(ValidatorMetrics.REASON_DISABLED);
return Mono.just(JobEbsVolumeSanitizer::skipSanitization);
}
List<EbsVolume> ebsVolumes = jobDescriptor.getContainer().getContainerResources().getEbsVolumes();
return Flux.fromIterable(ebsVolumes)
// Execute validation service calls concurrently
.parallel()
.runOn(Schedulers.parallel())
// Validate the volume and update the core EBS volume object
.flatMap(ebsVolume -> getEbsVolumeValidationResponse(ebsVolume)
.flatMap(response -> sanitizeEbsVolume(ebsVolume, response))
.doOnEach(response -> metrics.incrementValidationSuccess(ebsVolume.getVolumeId()))
.doOnError(throwable -> metrics.incrementValidationError(ebsVolume.getVolumeId(), throwable.getMessage())))
// Apply the sanitized volume results in the original order
.sequential()
.collect(Collectors.toMap(EbsVolume::getVolumeId, Function.identity()))
.map(sanitizedVolumeMap -> ebsVolumes.stream()
.map(ebsVolume -> sanitizedVolumeMap.getOrDefault(ebsVolume.getVolumeId(), ebsVolume))
.collect(Collectors.toList()))
// Update the job with sanitized ebs volume list
.map(JobEbsVolumeSanitizer::setEbsFunction)
.timeout(Duration.ofMillis(configuration.getJobEbsSanitizationTimeoutMs()));
}
private Mono<ComputeValidator.EbsVolumeValidationResponse> getEbsVolumeValidationResponse(EbsVolume ebsVolume) {
return validationClient.validateEbsVolume(
ComputeValidator.EbsVolumeValidationRequest.newBuilder()
.setEbsVolumeId(ebsVolume.getVolumeId())
.build())
.retry(RETRY_COUNT)
.onErrorMap(error -> {
logger.warn("EBS volume validation failure: {}", error.getMessage());
logger.debug("Stack trace", error);
metrics.incrementValidationError(ebsVolume.getVolumeId(), error.getClass().getSimpleName());
return new IllegalArgumentException(String.format("EBS volume validation error: bucket=%s, error=%s",
ebsVolume.getVolumeId(),
error.getMessage()
), error);
});
}
private Mono<EbsVolume> sanitizeEbsVolume(EbsVolume ebsVolume, ComputeValidator.EbsVolumeValidationResponse response) {
if (response.getResultCase() == ComputeValidator.EbsVolumeValidationResponse.ResultCase.FAILURES) {
List<ComputeValidator.ValidationFailure> failures = response.getFailures().getFailuresList();
if (!failures.isEmpty()) {
return Mono.error(JobManagerException.invalidContainerResources(ebsVolume, failures.get(0).getErrorMessage()));
}
return Mono.error(JobManagerException.invalidContainerResources(ebsVolume, "Failure reason unknown"));
}
return Mono.just(ebsVolume.toBuilder()
.withVolumeAvailabilityZone(response.getSuccess().getEbsVolumeAvailabilityZone())
.withVolumeCapacityGB(response.getSuccess().getEbsVolumeCapacityGB())
.build());
}
private static UnaryOperator<JobDescriptor> setEbsFunction(List<EbsVolume> ebsVolumes) {
return entity -> entity.toBuilder()
.withContainer(entity.getContainer().toBuilder()
.withContainerResources(entity.getContainer().getContainerResources().toBuilder()
.withEbsVolumes(ebsVolumes)
.build())
.build())
.build();
}
private static JobDescriptor<?> skipSanitization(JobDescriptor<?> jobDescriptor) {
return JobFunctions.appendJobDescriptorAttribute(jobDescriptor,
JobAttributes.JOB_ATTRIBUTES_SANITIZATION_SKIPPED_EBS, true
);
}
}
| 1,459 |
0 | Create_ds/titus-control-plane/titus-ext/job-validator/src/main/java/com/netflix/titus/ext/jobvalidator | Create_ds/titus-control-plane/titus-ext/job-validator/src/main/java/com/netflix/titus/ext/jobvalidator/ebs/JobEbsVolumeSanitizerConfiguration.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.jobvalidator.ebs;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
import com.netflix.titus.common.model.admission.AdmissionValidatorConfiguration;
import com.netflix.titus.common.model.admission.TitusValidatorConfiguration;
@Configuration(prefix = "titus.validate.job.ebs")
public interface JobEbsVolumeSanitizerConfiguration extends AdmissionValidatorConfiguration {
@DefaultValue("true")
boolean isEnabled();
/**
* The aggregate timeout for the validation of all EBS volumes for a job. Since EBS volume
* validation is required to get correct EBS metadata and there may be multiple volumes for
* a job we want to provide sufficient time. However, since sanitization is on the job accept
* path the timeout should not be too large.
* This must be smaller than {@link TitusValidatorConfiguration#getTimeoutMs()} to be effective.
*/
@DefaultValue("4500")
long getJobEbsSanitizationTimeoutMs();
}
| 1,460 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra-testkit/src/main/java/com/netflix/titus/ext/cassandra/testkit | Create_ds/titus-control-plane/titus-ext/cassandra-testkit/src/main/java/com/netflix/titus/ext/cassandra/testkit/junit/TitusCassandraResource.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.testkit.junit;
import com.datastax.driver.core.Session;
import org.cassandraunit.CQLDataLoader;
import org.cassandraunit.dataset.cql.ClassPathCQLDataSet;
import org.cassandraunit.utils.EmbeddedCassandraServerHelper;
import org.junit.rules.ExternalResource;
public class TitusCassandraResource extends ExternalResource {
private static final long STARTUP_TIMEOUT = 30000;
public static final String CASSANDRA_SCHEMA = "CassandraSchema";
private Session session;
@Override
protected void before() throws Throwable {
// This improves boot time by a few seconds
System.setProperty("cassandra.unsafesystem", "true");
EmbeddedCassandraServerHelper.startEmbeddedCassandra(STARTUP_TIMEOUT);
}
@Override
protected void after() {
cleanAll();
}
public Session getSession() {
if (session == null) {
loadSchema();
session = EmbeddedCassandraServerHelper.getSession();
}
return session;
}
public void cleanAll() {
EmbeddedCassandraServerHelper.cleanEmbeddedCassandra();
session = null;
}
private void loadSchema() {
CQLDataLoader dataLoader = new CQLDataLoader(EmbeddedCassandraServerHelper.getSession());
dataLoader.load(new ClassPathCQLDataSet(CASSANDRA_SCHEMA, "Titus"));
}
}
| 1,461 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra-testkit/src/main/java/com/netflix/titus/ext/cassandra/testkit | Create_ds/titus-control-plane/titus-ext/cassandra-testkit/src/main/java/com/netflix/titus/ext/cassandra/testkit/store/EmbeddedCassandraStoreFactory.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.testkit.store;
import java.io.File;
import java.util.Set;
import java.util.stream.Collectors;
import com.datastax.driver.core.Session;
import com.google.common.base.Preconditions;
import com.google.common.io.Files;
import com.netflix.titus.api.jobmanager.store.JobStore;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.ext.cassandra.store.CassandraJobStore;
import com.netflix.titus.ext.cassandra.store.CassandraStoreConfiguration;
import com.netflix.titus.ext.cassandra.tool.snapshot.JobSnapshotLoader;
import org.cassandraunit.CQLDataLoader;
import org.cassandraunit.dataset.cql.ClassPathCQLDataSet;
import org.cassandraunit.utils.EmbeddedCassandraServerHelper;
import static com.netflix.titus.ext.cassandra.tool.CassandraSchemas.JOB_ACTIVE_TABLES;
public class EmbeddedCassandraStoreFactory {
private static final long STARTUP_TIMEOUT = 30_000;
private static final String CASSANDRA_CONFIG = "embedded-cassandra.yaml";
private static final String CASSANDRA_SCHEMA = "tables.cql";
private static final String CASSANDRA_KEYSPACE = "titus_embedded";
private static final CassandraStoreConfiguration CONFIGURATION = new CassandraStoreConfiguration() {
@Override
public String getV2KeySpace() {
return "dev";
}
@Override
public boolean isFailOnInconsistentCapacityGroupData() {
return true;
}
@Override
public boolean isFailOnInconsistentAgentData() {
return true;
}
@Override
public boolean isFailOnInconsistentLoadBalancerData() {
return false;
}
@Override
public boolean isFailOnInconsistentSchedulerData() {
return false;
}
@Override
public int getConcurrencyLimit() {
return 10;
}
@Override
public int getLoadBalancerWriteConcurrencyLimit() {
return 100;
}
@Override
public int getLoadBalancerDeleteConcurrencyLimit() {
return 10;
}
@Override
public boolean isTracingEnabled() {
return false;
}
};
private final Session session;
private final TitusRuntime titusRuntime;
public EmbeddedCassandraStoreFactory(Session session, TitusRuntime titusRuntime) {
this.session = session;
this.titusRuntime = titusRuntime;
}
public void shutdown() {
session.close();
}
public JobStore getJobStore() {
return new CassandraJobStore(CONFIGURATION, session, titusRuntime);
}
public static Builder newBuilder() {
return new Builder();
}
public static class Builder {
private File jobInputFolder;
private TitusRuntime titusRuntime;
public Builder withTitusRuntime(TitusRuntime titusRuntime) {
this.titusRuntime = titusRuntime;
return this;
}
public Builder withJobStoreFiles(File folder) {
Preconditions.checkArgument(folder.exists(), "%s not found", folder);
Preconditions.checkArgument(folder.isDirectory(), "%s is not directory", folder);
Set<String> expectedFiles = JOB_ACTIVE_TABLES.stream().map(t -> t + ".json").collect(Collectors.toSet());
Set<String> foundFiles = expectedFiles.stream().filter(f -> new File(folder, f).exists()).collect(Collectors.toSet());
if (foundFiles.size() != JOB_ACTIVE_TABLES.size()) {
Set<String> missingFiles = CollectionsExt.copyAndRemove(expectedFiles, foundFiles);
throw new IllegalArgumentException("Incomplete set of job files. Missing files: " + missingFiles);
}
this.jobInputFolder = folder;
return this;
}
public EmbeddedCassandraStoreFactory build() {
Session session = createEmbeddedCassandra();
if (jobInputFolder != null) {
loadJobStore(session);
}
if (titusRuntime == null) {
titusRuntime = TitusRuntimes.internal();
}
return new EmbeddedCassandraStoreFactory(session, titusRuntime);
}
private void loadJobStore(Session session) {
new JobSnapshotLoader(session, jobInputFolder).load();
}
private Session createEmbeddedCassandra() {
// Disable fsync for a massive speedup on old platters. This improves boot time by a few seconds.
System.setProperty("cassandra.unsafesystem", "true");
try {
File cassandraTmpDir = Files.createTempDir();
EmbeddedCassandraServerHelper.startEmbeddedCassandra(CASSANDRA_CONFIG, cassandraTmpDir.getAbsolutePath(), STARTUP_TIMEOUT);
} catch (Exception e) {
throw new IllegalStateException("Cannot initialize the embedded Cassandra", e);
}
Session session = EmbeddedCassandraServerHelper.getSession();
session.execute("CREATE KEYSPACE " + CASSANDRA_KEYSPACE + " WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 3 }");
session.execute("USE " + CASSANDRA_KEYSPACE);
CQLDataLoader dataLoader = new CQLDataLoader(session);
dataLoader.load(new ClassPathCQLDataSet(CASSANDRA_SCHEMA, "Titus"));
return session;
}
}
}
| 1,462 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation-springboot/src/main/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation-springboot/src/main/java/com/netflix/titus/supplementary/relocation/RelocationMain.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation;
import javax.inject.Named;
import com.netflix.titus.api.health.HealthIndicator;
import com.netflix.titus.api.health.HealthIndicators;
import com.netflix.titus.common.jhiccup.JHiccupComponent;
import com.netflix.titus.runtime.clustermembership.activation.LeaderActivationComponent;
import com.netflix.titus.runtime.clustermembership.connector.ClusterMembershipInMemoryConnectorComponent;
import com.netflix.titus.runtime.clustermembership.endpoint.grpc.ClusterMembershipGrpcEndpointComponent;
import com.netflix.titus.runtime.clustermembership.endpoint.rest.ClusterMembershipRestEndpointComponent;
import com.netflix.titus.runtime.clustermembership.service.ClusterMembershipServiceComponent;
import com.netflix.titus.runtime.connector.common.reactor.GrpcToReactorClientFactoryComponent;
import com.netflix.titus.runtime.connector.common.reactor.GrpcToReactorServerFactoryComponent;
import com.netflix.titus.runtime.connector.eviction.EvictionConnectorComponent;
import com.netflix.titus.runtime.connector.eviction.EvictionDataReplicationComponent;
import com.netflix.titus.runtime.connector.jobmanager.JobManagementDataReplicationComponent;
import com.netflix.titus.runtime.connector.jobmanager.JobManagerConnectorComponent;
import com.netflix.titus.runtime.connector.titusmaster.ConfigurationLeaderResolverComponent;
import com.netflix.titus.runtime.endpoint.metadata.CallMetadataResolveComponent;
import com.netflix.titus.runtime.endpoint.rest.RestAddOnsComponent;
import com.netflix.titus.supplementary.relocation.connector.NodeDataResolverComponent;
import com.netflix.titus.supplementary.relocation.endpoint.grpc.TaskRelocationGrpcServerRunner;
import io.grpc.Channel;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Import;
import static com.netflix.titus.runtime.connector.titusmaster.ConfigurationLeaderResolverComponent.TITUS_MASTER_CHANNEL;
@SpringBootApplication
@Import({
RelocationTitusRuntimeComponent.class,
JHiccupComponent.class,
CallMetadataResolveComponent.class,
ConfigurationLeaderResolverComponent.class,
GrpcToReactorClientFactoryComponent.class,
GrpcToReactorServerFactoryComponent.class,
// Cluster membership service
ClusterMembershipInMemoryConnectorComponent.class,
ClusterMembershipServiceComponent.class,
ClusterMembershipGrpcEndpointComponent.class,
ClusterMembershipRestEndpointComponent.class,
LeaderActivationComponent.class,
// Agent connector
NodeDataResolverComponent.class,
// Job connector
JobManagerConnectorComponent.class,
JobManagementDataReplicationComponent.class,
// Eviction connector
EvictionConnectorComponent.class,
EvictionDataReplicationComponent.class,
RelocationLeaderActivator.class,
TaskRelocationGrpcServerRunner.class,
RestAddOnsComponent.class
})
public class RelocationMain {
@Bean
public HealthIndicator getHealthIndicator() {
return HealthIndicators.alwaysHealthy();
}
@Bean
@Named(JobManagerConnectorComponent.JOB_MANAGER_CHANNEL)
public Channel getJobManagerChannel(@Named(TITUS_MASTER_CHANNEL) Channel channel) {
return channel;
}
@Bean
@Named(EvictionConnectorComponent.EVICTION_CHANNEL)
public Channel getEvictionChannel(@Named(TITUS_MASTER_CHANNEL) Channel channel) {
return channel;
}
public static void main(String[] args) {
SpringApplication.run(RelocationMain.class, args);
}
}
| 1,463 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation-springboot/src/main/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation-springboot/src/main/java/com/netflix/titus/supplementary/relocation/RelocationTitusRuntimeComponent.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation;
import com.netflix.spectator.api.Registry;
import com.netflix.titus.common.framework.fit.FitComponent;
import com.netflix.titus.common.framework.fit.FitFramework;
import com.netflix.titus.common.framework.fit.adapter.GrpcFitInterceptor;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
@Configuration
public class RelocationTitusRuntimeComponent {
private final TitusRuntime titusRuntime = TitusRuntimes.internal(true);
@Bean
public TitusRuntime getTitusRuntime() {
FitFramework fitFramework = titusRuntime.getFitFramework();
if (fitFramework.isActive()) {
FitComponent root = fitFramework.getRootComponent();
root.createChild(GrpcFitInterceptor.COMPONENT);
}
return titusRuntime;
}
@Bean
public Registry getRegistry(TitusRuntime titusRuntime) {
return titusRuntime.getRegistry();
}
}
| 1,464 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/test/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/test/java/com/netflix/titus/supplementary/taskspublisher/TestUtils.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.taskspublisher;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import com.netflix.titus.grpc.protogen.Task;
import com.netflix.titus.runtime.endpoint.common.EmptyLogStorageInfo;
import com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters;
import com.netflix.titus.testkit.model.job.JobGenerator;
public class TestUtils {
static List<Task> generateSampleTasks(int numTasks) {
return IntStream.range(0, numTasks)
.mapToObj(ignored -> GrpcJobManagementModelConverters.toGrpcTask(JobGenerator.oneBatchTask(), new EmptyLogStorageInfo<>()))
.collect(Collectors.toList());
}
}
| 1,465 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/test/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/test/java/com/netflix/titus/supplementary/taskspublisher/TaskDocumentTest.java | package com.netflix.titus.supplementary.taskspublisher;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import org.junit.Test;
import static org.assertj.core.api.AssertionsForClassTypes.assertThat;
public class TaskDocumentTest {
@Test
public void sanitizeMap() {
Map<String, String> env = new HashMap<>();
String key1 = ".FOO";
String key2 = "foo.bar";
String key3 = "foo..bar";
String key4 = "platform";
String key5 = "foo.bar.";
String key6 = "foo.bar...more";
String key7 = "";
String key8 = " leadingSpaceKey";
String key9 = "trailingSpaceKey ";
env.put(key1, "bar");
env.put(key2, "ok");
env.put(key3, "notOk");
env.put(key4, "titus");
env.put(key5, "notGood");
env.put(key6, "bad");
env.put(key7, "empty value");
env.put(key8, "some value");
env.put(key9, "cmb");
Map<String, String> sanitizeEnvMap = TaskDocument.sanitizeMap(env);
assertThat(sanitizeEnvMap).isNotNull();
assertThat(sanitizeEnvMap.size()).isEqualTo(4);
assertThat(sanitizeEnvMap.containsKey(key1)).isFalse();
assertThat(sanitizeEnvMap.containsKey(key2)).isTrue();
assertThat(sanitizeEnvMap.containsKey(key3)).isFalse();
assertThat(sanitizeEnvMap.containsKey(key4)).isTrue();
assertThat(sanitizeEnvMap.containsKey(key5)).isFalse();
assertThat(sanitizeEnvMap.containsKey(key6)).isFalse();
assertThat(sanitizeEnvMap.containsKey(key7)).isFalse();
assertThat(sanitizeEnvMap.containsKey(key8.trim())).isTrue();
assertThat(sanitizeEnvMap.containsKey(key9.trim())).isTrue();
}
@Test
public void emptyMapCheck() {
Map<String, String> sanitizeEnvMap = TaskDocument.sanitizeMap(null);
assertThat(sanitizeEnvMap).isNotNull();
assertThat(sanitizeEnvMap.size()).isEqualTo(0);
Map<String, String> sanitizedMap2 = TaskDocument.sanitizeMap(Collections.emptyMap());
assertThat(sanitizedMap2).isNotNull();
assertThat(sanitizedMap2.size()).isEqualTo(0);
}
} | 1,466 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/test/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/test/java/com/netflix/titus/supplementary/taskspublisher/TaskEventsGeneratorTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.taskspublisher;
import java.time.Duration;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.ext.elasticsearch.EsClient;
import com.netflix.titus.ext.elasticsearch.model.BulkEsIndexResp;
import com.netflix.titus.ext.elasticsearch.model.BulkEsIndexRespItem;
import com.netflix.titus.ext.elasticsearch.model.EsIndexResp;
import com.netflix.titus.runtime.endpoint.common.EmptyLogStorageInfo;
import com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters;
import com.netflix.titus.supplementary.taskspublisher.TitusClient.JobOrTaskUpdate;
import com.netflix.titus.supplementary.taskspublisher.config.EsPublisherConfiguration;
import com.netflix.titus.supplementary.taskspublisher.es.EsPublisher;
import com.netflix.titus.testkit.model.job.JobGenerator;
import org.junit.Test;
import org.mockito.stubbing.Answer;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Schedulers;
import static org.assertj.core.api.Fail.fail;
import static org.assertj.core.api.Java6Assertions.assertThat;
import static org.mockito.ArgumentMatchers.anyList;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TaskEventsGeneratorTest {
private final TitusRuntime titusRuntime = TitusRuntimes.internal();
private TitusClient mockTitusClient(int numTasks) {
TitusClient titusClient = mock(TitusClient.class);
when(titusClient.getJobAndTaskUpdates()).thenReturn(Flux.fromStream(TestUtils.generateSampleTasks(numTasks).stream().map(JobOrTaskUpdate::taskUpdate)));
when(titusClient.getTask(anyString())).thenReturn(Mono.just(GrpcJobManagementModelConverters.toGrpcTask(JobGenerator.oneBatchTask(), new EmptyLogStorageInfo<>())));
when(titusClient.getJobById(anyString())).thenReturn(Mono.just(GrpcJobManagementModelConverters.toGrpcJob(JobGenerator.oneBatchJob())));
return titusClient;
}
private EsClient<TaskDocument> mockElasticSearchClient() {
EsClient<TaskDocument> esClient = mock(EsClient.class);
when(esClient.bulkIndexDocuments(anyList(), anyString(), anyString())).thenAnswer((Answer<Mono<BulkEsIndexResp>>) invocation -> {
final List<TaskDocument> documents = invocation.getArgument(0);
final List<BulkEsIndexRespItem> bulkEsIndexRespItemList = documents.stream().map(doc -> {
EsIndexResp esIndexResp = new EsIndexResp(true, "created", doc.getId());
return new BulkEsIndexRespItem(esIndexResp);
}).collect(Collectors.toList());
final BulkEsIndexResp bulkEsIndexResp = new BulkEsIndexResp(bulkEsIndexRespItemList);
return Mono.just(bulkEsIndexResp);
});
return esClient;
}
private EsPublisherConfiguration mockEsPublisherConfiguration() {
EsPublisherConfiguration mockConfig = mock(EsPublisherConfiguration.class);
when(mockConfig.getTaskDocumentEsIndexName()).thenReturn("tasks_");
when(mockConfig.getTaskDocumentEsIndexDateSuffixPattern()).thenReturn("yyyyMM");
return mockConfig;
}
@Test
public void checkPublisherState() {
int numTasks = 5;
final TaskEventsGenerator taskEventsGenerator = new TaskEventsGenerator(
mockTitusClient(numTasks),
Collections.emptyMap(), titusRuntime);
try {
EsPublisher esPublisher = new EsPublisher(taskEventsGenerator, mockElasticSearchClient(),
mockEsPublisherConfiguration(), new DefaultRegistry());
esPublisher.activate();
final CountDownLatch latch = new CountDownLatch(1);
Flux.interval(Duration.ofSeconds(1), Schedulers.elastic())
.take(1)
.doOnNext(i -> {
final int numTasksUpdated = esPublisher.getNumTasksPublished();
final int numErrors = esPublisher.getNumErrorsInPublishing();
assertThat(numErrors).isEqualTo(0);
assertThat(numTasksUpdated).isGreaterThanOrEqualTo(numTasks);
latch.countDown();
}).subscribe();
try {
latch.await(2, TimeUnit.MINUTES);
} catch (InterruptedException e) {
fail("Timeout in checkPublisherState ", e);
}
} finally {
taskEventsGenerator.shutdown();
}
}
} | 1,467 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/test/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/test/java/com/netflix/titus/supplementary/taskspublisher/TitusClientImplTest.java | package com.netflix.titus.supplementary.taskspublisher;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.titus.api.jobmanager.model.job.BatchJobTask;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.grpc.protogen.JobChangeNotification;
import com.netflix.titus.grpc.protogen.JobId;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc.JobManagementServiceFutureStub;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc.JobManagementServiceStub;
import com.netflix.titus.grpc.protogen.ObserveJobsQuery;
import com.netflix.titus.grpc.protogen.Task;
import com.netflix.titus.grpc.protogen.TaskId;
import com.netflix.titus.runtime.endpoint.common.EmptyLogStorageInfo;
import com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters;
import com.netflix.titus.testkit.model.job.JobGenerator;
import io.grpc.ManagedChannel;
import io.grpc.Server;
import io.grpc.inprocess.InProcessChannelBuilder;
import io.grpc.inprocess.InProcessServerBuilder;
import io.grpc.stub.StreamObserver;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.assertj.core.api.Java6Assertions.assertThat;
import static org.assertj.core.api.Java6Assertions.fail;
public class TitusClientImplTest {
private static TitusClient titusClient;
private static Server testServer;
private static BatchJobTask taskOne = JobGenerator.oneBatchTask();
private static BatchJobTask taskTwo = JobGenerator.oneBatchTask();
private static BatchJobTask taskThree = JobGenerator.oneBatchTask();
private static BatchJobTask taskFour = JobGenerator.oneBatchTask();
private static BatchJobTask taskFive = JobGenerator.oneBatchTask();
private static Job<BatchJobExt> jobOne = JobGenerator.oneBatchJob();
public static class MockJobManagerService extends JobManagementServiceGrpc.JobManagementServiceImplBase {
@Override
public void findTask(TaskId request, StreamObserver<Task> responseObserver) {
final Task grpcTask = GrpcJobManagementModelConverters.toGrpcTask(taskOne, new EmptyLogStorageInfo<>());
responseObserver.onNext(grpcTask);
responseObserver.onCompleted();
}
@Override
public void findJob(JobId request, StreamObserver<com.netflix.titus.grpc.protogen.Job> responseObserver) {
final com.netflix.titus.grpc.protogen.Job grpcJob = GrpcJobManagementModelConverters.toGrpcJob(jobOne);
responseObserver.onNext(grpcJob);
responseObserver.onCompleted();
}
@Override
public void observeJobs(ObserveJobsQuery request, StreamObserver<JobChangeNotification> responseObserver) {
final List<BatchJobTask> batchJobTasks = Arrays.asList(taskOne, taskTwo, taskThree, taskFour, taskFive);
batchJobTasks.forEach(task -> {
final Task grpcTask = GrpcJobManagementModelConverters.toGrpcTask(task, new EmptyLogStorageInfo<>());
responseObserver.onNext(buildJobChangeNotification(grpcTask));
});
responseObserver.onCompleted();
}
private JobChangeNotification buildJobChangeNotification(Task task) {
return JobChangeNotification.newBuilder()
.setTaskUpdate(JobChangeNotification.TaskUpdate.newBuilder().setTask(task).build())
.build();
}
}
@Before
public void setup() throws IOException {
final MockJobManagerService mockJobManagerService = new MockJobManagerService();
testServer = InProcessServerBuilder
.forName("testServer")
.directExecutor()
.addService(mockJobManagerService)
.build()
.start();
final ManagedChannel channel = InProcessChannelBuilder
.forName("testServer")
.directExecutor()
.usePlaintext()
.build();
final JobManagementServiceStub jobManagementServiceStub = JobManagementServiceGrpc.newStub(channel);
final JobManagementServiceFutureStub jobManagementServiceFutureStub = JobManagementServiceGrpc.newFutureStub(channel);
titusClient = new TitusClientImpl(jobManagementServiceStub, jobManagementServiceFutureStub, new DefaultRegistry());
}
@After
public void cleanup() {
testServer.shutdownNow();
}
@Test
public void getTaskById() {
final CountDownLatch latch = new CountDownLatch(1);
titusClient.getTask(taskOne.getId()).subscribe(task -> {
assertThat(task.getId()).isEqualTo(taskOne.getId());
assertThat(task.getJobId()).isEqualTo(taskOne.getJobId());
latch.countDown();
});
try {
latch.await(1, TimeUnit.SECONDS);
} catch (InterruptedException e) {
fail("getTaskById Timeout ", e);
}
}
@Test
public void getJobById() {
final CountDownLatch latch = new CountDownLatch(1);
titusClient.getJobById(jobOne.getId()).subscribe(job -> {
assertThat(job.getId()).isEqualTo(jobOne.getId());
latch.countDown();
});
try {
latch.await(1, TimeUnit.SECONDS);
} catch (InterruptedException e) {
fail("getJobById Timeout ", e);
}
}
@Test
public void getTaskUpdates() {
final CountDownLatch latch = new CountDownLatch(1);
final AtomicInteger tasksCount = new AtomicInteger(0);
titusClient.getJobAndTaskUpdates().subscribe(task -> {
if (tasksCount.incrementAndGet() == 5) {
latch.countDown();
}
}, e -> fail("getTaskUpdates exception {}", e));
try {
latch.await(1, TimeUnit.SECONDS);
} catch (InterruptedException e) {
fail("getTaskUpdates Timeout ", e);
}
}
}
| 1,468 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/test/java/com/netflix/titus/supplementary/taskspublisher | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/test/java/com/netflix/titus/supplementary/taskspublisher/es/ElasticSearchUtilsTest.java | package com.netflix.titus.supplementary.taskspublisher.es;
import java.text.SimpleDateFormat;
import java.util.Date;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.assertj.core.api.Java6Assertions.assertThat;
public class ElasticSearchUtilsTest {
private static final Logger logger = LoggerFactory.getLogger(ElasticSearchUtilsTest.class);
@Test
public void verifyCurrentEsIndexName() {
SimpleDateFormat indexDateFormat = new SimpleDateFormat("yyyyMM");
String monthlySuffix = indexDateFormat.format(new Date());
String esIndexNameCurrent = ElasticSearchUtils.buildEsIndexNameCurrent("workloads_", indexDateFormat);
assertThat(esIndexNameCurrent).isNotNull();
assertThat(esIndexNameCurrent).isNotEmpty();
assertThat(esIndexNameCurrent).isEqualTo(String.format("workloads_%s", monthlySuffix));
}
}
| 1,469 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary/taskspublisher/TaskPublisherLeaderActivator.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.taskspublisher;
import java.util.Collections;
import java.util.List;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.netflix.titus.api.clustermembership.service.ClusterMembershipService;
import com.netflix.titus.api.common.LeaderActivationListener;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.runtime.clustermembership.activation.LeaderActivationConfiguration;
import com.netflix.titus.runtime.clustermembership.activation.LeaderActivationCoordinator;
import com.netflix.titus.supplementary.taskspublisher.es.EsPublisher;
import static com.netflix.titus.runtime.clustermembership.activation.LeaderActivationCoordinators.coordinatorWithLoggingCallback;
import static com.netflix.titus.runtime.clustermembership.activation.LeaderActivationCoordinators.coordinatorWithSystemExitCallback;
@Singleton
public class TaskPublisherLeaderActivator {
private final LeaderActivationCoordinator coordinator;
@Inject
public TaskPublisherLeaderActivator(LeaderActivationConfiguration configuration,
EsPublisher esPublisher,
ClusterMembershipService membershipService,
TitusRuntime titusRuntime) {
List<LeaderActivationListener> services = Collections.singletonList(esPublisher);
this.coordinator = configuration.isSystemExitOnLeadershipLost()
? coordinatorWithSystemExitCallback(configuration, services, membershipService, titusRuntime)
: coordinatorWithLoggingCallback(configuration, services, membershipService, titusRuntime);
}
@PreDestroy
public void shutdown() {
coordinator.shutdown();
}
}
| 1,470 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary/taskspublisher/TasksPublisher.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.taskspublisher;
public interface TasksPublisher {
int getNumErrorsInPublishing();
int getNumTasksPublished();
}
| 1,471 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary/taskspublisher/TitusClient.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.taskspublisher;
import com.google.common.base.Preconditions;
import com.netflix.titus.grpc.protogen.Job;
import com.netflix.titus.grpc.protogen.Task;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
public interface TitusClient {
Mono<Job> getJobById(String jobId);
Mono<Task> getTask(String taskId);
Flux<JobOrTaskUpdate> getJobAndTaskUpdates();
class JobOrTaskUpdate {
private final Job job;
private final Task task;
private JobOrTaskUpdate(Job job, Task task) {
this.job = job;
this.task = task;
}
public Job getJob() {
Preconditions.checkState(job != null, "Task container");
return job;
}
public Task getTask() {
Preconditions.checkState(task != null, "Job container");
return task;
}
public boolean hasJob() {
return job != null;
}
public boolean hasTask() {
return task != null;
}
public static JobOrTaskUpdate jobUpdate(Job job) {
Preconditions.checkNotNull(job, "Null job");
return new JobOrTaskUpdate(job, null);
}
public static JobOrTaskUpdate taskUpdate(Task task) {
Preconditions.checkNotNull(task, "Null task");
return new JobOrTaskUpdate(null, task);
}
}
}
| 1,472 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary/taskspublisher/TaskDocument.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.taskspublisher;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.netflix.titus.api.endpoint.v2.rest.representation.TitusJobType;
import com.netflix.titus.api.endpoint.v2.rest.representation.TitusTaskState;
import com.netflix.titus.api.jobmanager.model.job.Capacity;
import com.netflix.titus.api.jobmanager.model.job.Container;
import com.netflix.titus.api.jobmanager.model.job.ContainerResources;
import com.netflix.titus.api.jobmanager.model.job.ExecutableStatus;
import com.netflix.titus.api.jobmanager.model.job.Image;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobGroupInfo;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.TaskStatus;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.common.util.StringExt;
import com.netflix.titus.ext.elasticsearch.EsDoc;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static com.netflix.titus.api.jobmanager.TaskAttributes.TASK_ATTRIBUTES_AGENT_ASG;
import static com.netflix.titus.api.jobmanager.TaskAttributes.TASK_ATTRIBUTES_AGENT_HOST;
import static com.netflix.titus.api.jobmanager.TaskAttributes.TASK_ATTRIBUTES_AGENT_INSTANCE_ID;
import static com.netflix.titus.api.jobmanager.TaskAttributes.TASK_ATTRIBUTES_AGENT_ITYPE;
import static com.netflix.titus.api.jobmanager.TaskAttributes.TASK_ATTRIBUTES_AGENT_REGION;
import static com.netflix.titus.api.jobmanager.TaskAttributes.TASK_ATTRIBUTES_AGENT_ZONE;
import static com.netflix.titus.api.jobmanager.TaskAttributes.TASK_ATTRIBUTES_CONTAINER_IP;
import static com.netflix.titus.api.jobmanager.TaskAttributes.TASK_ATTRIBUTES_IP_ALLOCATION_ID;
import static com.netflix.titus.api.jobmanager.TaskAttributes.TASK_ATTRIBUTES_NETWORK_INTERFACE_ID;
import static com.netflix.titus.api.jobmanager.TaskAttributes.TASK_ATTRIBUTES_NETWORK_INTERFACE_INDEX;
import static com.netflix.titus.api.jobmanager.TaskAttributes.TASK_ATTRIBUTES_TIER;
import static com.netflix.titus.api.jobmanager.model.job.TaskStatus.REASON_FAILED;
import static com.netflix.titus.api.jobmanager.model.job.TaskStatus.REASON_NORMAL;
import static com.netflix.titus.api.jobmanager.model.job.TaskStatus.REASON_SCALED_DOWN;
import static com.netflix.titus.api.jobmanager.model.job.TaskStatus.REASON_TASK_KILLED;
public class TaskDocument implements EsDoc {
private static final Logger logger = LoggerFactory.getLogger(TaskDocument.class);
private static final Pattern INVALID_KEY_FORMAT = Pattern.compile("^[.]|[.]{2,}|[.]$|^$");
private String id;
private String instanceId;
private String jobId;
private String host;
private String hostInstanceId;
private String zone;
private String asg;
private String instanceType;
private String region;
private String message;
private String submittedAt;
private String launchedAt;
private String startingAt;
private String startedAt;
private String finishedAt;
private String state;
private Map<String, String> jobLabels;
private ComputedFields computedFields;
private Map<String, String> titusContext;
/* Job Spec Fields */
private String name;
private String applicationName;
private String appName;
private String user;
private TitusJobType type;
private Map<String, String> labels;
private String version;
private String digest;
private String entryPoint;
private Boolean inService;
private int instances;
private int instancesMin;
private int instancesMax;
private int instancesDesired;
private double cpu;
private double memory;
private double networkMbps;
private double disk;
private int gpu;
private int shm;
private String ipAddressAllocationId;
private Map<String, String> env;
private int retries;
private boolean restartOnSuccess;
private Long runtimeLimitSecs;
private boolean allocateIpAddress;
private String iamProfile;
private List<String> securityGroups;
private List<String> softConstraints;
private List<String> hardConstraints;
private String jobGroupStack;
private String jobGroupDetail;
private String jobGroupSequence;
private String capacityGroup;
private String tier;
// Network configuration
private String containerIp;
private String networkInterfaceId;
private String networkInterfaceIndex;
private String networkMode;
public String getName() {
return name;
}
public String getApplicationName() {
return applicationName;
}
public String getAppName() {
return appName;
}
public String getUser() {
return user;
}
public TitusJobType getType() {
return type;
}
public Map<String, String> getLabels() {
return labels;
}
public String getVersion() {
return version;
}
public String getDigest() {
return digest;
}
public String getEntryPoint() {
return entryPoint;
}
public Boolean getInService() {
return inService;
}
public int getInstances() {
return instances;
}
public int getInstancesMin() {
return instancesMin;
}
public int getInstancesMax() {
return instancesMax;
}
public int getInstancesDesired() {
return instancesDesired;
}
public double getCpu() {
return cpu;
}
public double getMemory() {
return memory;
}
public double getNetworkMbps() {
return networkMbps;
}
public double getDisk() {
return disk;
}
public int getShm() {
return shm;
}
public String getIpAddressAllocationId() {
return ipAddressAllocationId;
}
public int getGpu() {
return gpu;
}
public Map<String, String> getEnv() {
return env;
}
public int getRetries() {
return retries;
}
public boolean isRestartOnSuccess() {
return restartOnSuccess;
}
public Long getRuntimeLimitSecs() {
return runtimeLimitSecs;
}
public boolean isAllocateIpAddress() {
return allocateIpAddress;
}
public String getIamProfile() {
return iamProfile;
}
public List<String> getSecurityGroups() {
return securityGroups;
}
public List<String> getSoftConstraints() {
return softConstraints;
}
public List<String> getHardConstraints() {
return hardConstraints;
}
public String getJobGroupStack() {
return jobGroupStack;
}
public String getJobGroupDetail() {
return jobGroupDetail;
}
public String getJobGroupSequence() {
return jobGroupSequence;
}
public String getCapacityGroup() {
return capacityGroup;
}
public String getSubmittedAt() {
return submittedAt;
}
public String getLaunchedAt() {
return launchedAt;
}
public String getStartingAt() {
return startingAt;
}
public String getStartedAt() {
return startedAt;
}
public String getFinishedAt() {
return finishedAt;
}
public String getMessage() {
return message;
}
public String getId() {
return id;
}
public String getInstanceId() {
return instanceId;
}
public String getJobId() {
return jobId;
}
public String getState() {
return state;
}
public Map<String, String> getJobLabels() {
return jobLabels;
}
public String getHost() {
return host;
}
public String getZone() {
return zone;
}
public String getRegion() {
return region;
}
public ComputedFields getComputedFields() {
return computedFields;
}
public Map<String, String> getTitusContext() {
return titusContext;
}
public String getAsg() {
return asg;
}
public String getInstanceType() {
return instanceType;
}
public String getHostInstanceId() {
return hostInstanceId;
}
public String getContainerIp() {
return containerIp;
}
public String getNetworkInterfaceId() {
return networkInterfaceId;
}
public String getNetworkInterfaceIndex() {
return networkInterfaceIndex;
}
public String getTier() {
return tier;
}
public String getNetworkMode() {
return networkMode;
}
public static class ComputedFields {
Long msFromSubmittedToLaunched;
Long msFromLaunchedToStarting;
Long msToStarting;
Long msFromStartingToStarted;
Long msToStarted;
Long msFromStartedToFinished;
Long msToFinished;
public Long getMsFromSubmittedToLaunched() {
return msFromSubmittedToLaunched;
}
public Long getMsFromLaunchedToStarting() {
return msFromLaunchedToStarting;
}
public Long getMsToStarting() {
return msToStarting;
}
public Long getMsFromStartingToStarted() {
return msFromStartingToStarted;
}
public Long getMsToStarted() {
return msToStarted;
}
public Long getMsFromStartedToFinished() {
return msFromStartedToFinished;
}
public Long getMsToFinished() {
return msToFinished;
}
}
public static TaskDocument fromV3Task(Task task, Job job, SimpleDateFormat dateFormat, Map<String, String> context) {
TaskDocument taskDocument = new TaskDocument();
JobDescriptor jobDescriptor = job.getJobDescriptor();
Container container = jobDescriptor.getContainer();
Image image = container.getImage();
ContainerResources containerResources = container.getContainerResources();
JobGroupInfo jobGroupInfo = jobDescriptor.getJobGroupInfo();
taskDocument.name = jobDescriptor.getApplicationName();
taskDocument.applicationName = image.getName();
taskDocument.appName = jobDescriptor.getApplicationName();
taskDocument.user = jobDescriptor.getOwner().getTeamEmail();
taskDocument.labels = sanitizeMap(container.getAttributes());
taskDocument.version = image.getTag();
taskDocument.digest = image.getDigest();
taskDocument.entryPoint = StringExt.concatenate(container.getEntryPoint(), " ");
taskDocument.cpu = containerResources.getCpu();
taskDocument.memory = containerResources.getMemoryMB();
taskDocument.networkMbps = containerResources.getNetworkMbps();
taskDocument.disk = containerResources.getDiskMB();
taskDocument.gpu = containerResources.getGpu();
taskDocument.shm = containerResources.getShmMB();
taskDocument.allocateIpAddress = containerResources.isAllocateIP();
taskDocument.env = sanitizeMap(container.getEnv());
taskDocument.iamProfile = container.getSecurityProfile().getIamRole();
taskDocument.securityGroups = container.getSecurityProfile().getSecurityGroups();
taskDocument.softConstraints = new ArrayList<>(container.getSoftConstraints().keySet());
taskDocument.hardConstraints = new ArrayList<>(container.getHardConstraints().keySet());
taskDocument.capacityGroup = jobDescriptor.getCapacityGroup();
taskDocument.jobGroupStack = jobGroupInfo.getStack();
taskDocument.jobGroupDetail = jobGroupInfo.getDetail();
taskDocument.jobGroupSequence = jobGroupInfo.getSequence();
JobDescriptor.JobDescriptorExt jobDescriptorExt = jobDescriptor.getExtensions();
if (jobDescriptorExt instanceof BatchJobExt) {
BatchJobExt batchJobExt = (BatchJobExt) jobDescriptorExt;
taskDocument.runtimeLimitSecs = batchJobExt.getRuntimeLimitMs();
taskDocument.type = TitusJobType.batch;
taskDocument.inService = false;
taskDocument.instances = batchJobExt.getSize();
taskDocument.instancesMin = batchJobExt.getSize();
taskDocument.instancesMax = batchJobExt.getSize();
taskDocument.instancesDesired = batchJobExt.getSize();
taskDocument.retries = batchJobExt.getRetryPolicy().getRetries();
taskDocument.restartOnSuccess = false;
} else if (jobDescriptorExt instanceof ServiceJobExt) {
ServiceJobExt serviceJobExt = (ServiceJobExt) jobDescriptorExt;
taskDocument.runtimeLimitSecs = 0L;
taskDocument.type = TitusJobType.service;
taskDocument.inService = serviceJobExt.isEnabled();
Capacity capacity = serviceJobExt.getCapacity();
taskDocument.instances = capacity.getDesired();
taskDocument.instancesMin = capacity.getMin();
taskDocument.instancesMax = capacity.getMax();
taskDocument.instancesDesired = capacity.getDesired();
taskDocument.retries = serviceJobExt.getRetryPolicy().getRetries();
taskDocument.restartOnSuccess = false;
}
Map<String, String> taskContext = task.getTaskContext();
taskDocument.id = task.getId();
taskDocument.instanceId = task.getId();
taskDocument.jobId = task.getJobId();
taskDocument.state = toV2TaskState(task.getStatus()).name();
taskDocument.jobLabels = sanitizeMap(job.getJobDescriptor().getAttributes());
taskDocument.host = taskContext.get(TASK_ATTRIBUTES_AGENT_HOST);
taskDocument.tier = taskContext.getOrDefault(TASK_ATTRIBUTES_TIER, "Unknown");
taskDocument.computedFields = new ComputedFields();
final String region = taskContext.get(TASK_ATTRIBUTES_AGENT_REGION);
if (region != null) {
taskDocument.region = region;
}
final String zone = taskContext.get(TASK_ATTRIBUTES_AGENT_ZONE);
if (zone != null) {
taskDocument.zone = zone;
}
final String asg = taskContext.get(TASK_ATTRIBUTES_AGENT_ASG);
if (asg != null) {
taskDocument.asg = asg;
}
final String instanceType = taskContext.get(TASK_ATTRIBUTES_AGENT_ITYPE);
if (instanceType != null) {
taskDocument.instanceType = instanceType;
}
final String instanceId = taskContext.get(TASK_ATTRIBUTES_AGENT_INSTANCE_ID);
if (instanceId != null) {
taskDocument.hostInstanceId = instanceId;
}
final String ipAddressAllocationId = taskContext.get(TASK_ATTRIBUTES_IP_ALLOCATION_ID);
if (ipAddressAllocationId != null) {
taskDocument.ipAddressAllocationId = ipAddressAllocationId;
}
extractNetworkConfigurationData(taskContext, jobDescriptor, taskDocument);
long acceptedAt = findTaskStatus(task, TaskState.Accepted).map(ExecutableStatus::getTimestamp).orElse(0L);
long launchedAt = findTaskStatus(task, TaskState.Launched).map(ExecutableStatus::getTimestamp).orElse(0L);
long startingAt = findTaskStatus(task, TaskState.StartInitiated).map(ExecutableStatus::getTimestamp).orElse(0L);
long startedAt = findTaskStatus(task, TaskState.Started).map(ExecutableStatus::getTimestamp).orElse(0L);
long completedAt = findTaskStatus(task, TaskState.Finished).map(ExecutableStatus::getTimestamp).orElse(0L);
if (acceptedAt > 0) {
taskDocument.submittedAt = doSafeDateFormat(dateFormat, new Date(acceptedAt));
}
if (launchedAt > 0) {
taskDocument.launchedAt = doSafeDateFormat(dateFormat, new Date(launchedAt));
taskDocument.computedFields.msFromSubmittedToLaunched = launchedAt - acceptedAt;
}
if (startingAt > 0) {
taskDocument.startingAt = doSafeDateFormat(dateFormat, new Date(startingAt));
taskDocument.computedFields.msFromLaunchedToStarting = startingAt - launchedAt;
taskDocument.computedFields.msToStarting = startingAt - acceptedAt;
}
if (startedAt > 0) {
taskDocument.startedAt = doSafeDateFormat(dateFormat, new Date(startedAt));
taskDocument.computedFields.msFromStartingToStarted = startedAt - startingAt;
taskDocument.computedFields.msToStarted = startedAt - acceptedAt;
}
if (completedAt > 0) {
taskDocument.finishedAt = doSafeDateFormat(dateFormat, new Date(completedAt));
taskDocument.computedFields.msFromStartedToFinished = completedAt - startedAt;
taskDocument.computedFields.msToFinished = completedAt - acceptedAt;
}
taskDocument.message = task.getStatus().getReasonMessage();
taskDocument.titusContext = context;
return taskDocument;
}
/**
* Formatting may fail for some date values. We do not want to break everything, so for such cases we pass
* the unformatted value.
*/
private static String doSafeDateFormat(SimpleDateFormat dateFormat, Date date) {
try {
return dateFormat.format(date);
} catch (Exception e) {
return "wrong_value_" + date.getTime();
}
}
private static Optional<TaskStatus> findTaskStatus(Task task, TaskState taskState) {
if (task.getStatus().getState() == taskState) {
return Optional.of(task.getStatus());
} else {
return task.getStatusHistory().stream().filter(taskStatus -> taskStatus.getState() == taskState).findFirst();
}
}
private static TitusTaskState toV2TaskState(TaskStatus taskStatus) {
switch (taskStatus.getState()) {
case Accepted:
return TitusTaskState.QUEUED;
case Launched:
return TitusTaskState.DISPATCHED;
case StartInitiated:
return TitusTaskState.STARTING;
case Started:
case KillInitiated:
return TitusTaskState.RUNNING;
case Finished:
String reasonCode = taskStatus.getReasonCode();
if (reasonCode.equalsIgnoreCase(REASON_NORMAL)) {
return TitusTaskState.FINISHED;
} else if (reasonCode.equalsIgnoreCase(REASON_FAILED)) {
return TitusTaskState.FAILED;
} else if (reasonCode.equalsIgnoreCase(REASON_TASK_KILLED) || reasonCode.equalsIgnoreCase(REASON_SCALED_DOWN)) {
return TitusTaskState.STOPPED;
} else if (TaskStatus.isSystemError(taskStatus)) {
return TitusTaskState.CRASHED;
}
return TitusTaskState.FAILED;
default:
return TitusTaskState.FAILED;
}
}
@VisibleForTesting
static Map<String, String> sanitizeMap(Map<String, String> map) {
if (map == null) {
return Collections.emptyMap();
}
return map.keySet().stream().filter(TaskDocument::isSafe).collect(Collectors.toMap(String::trim, map::get));
}
private static boolean isSafe(String key) {
boolean isKeySafeForES = !INVALID_KEY_FORMAT.matcher(key).find();
if (!isKeySafeForES) {
logger.info("Removing invalid attribute \"{}\" from ES task document.", key);
}
return isKeySafeForES;
}
private static void extractNetworkConfigurationData(Map<String, String> taskContext, JobDescriptor jobDescriptor, TaskDocument taskDocument) {
taskDocument.networkInterfaceId = Strings.nullToEmpty(taskContext.get(TASK_ATTRIBUTES_NETWORK_INTERFACE_ID));
taskDocument.containerIp = Strings.nullToEmpty(taskContext.get(TASK_ATTRIBUTES_CONTAINER_IP));
taskDocument.networkInterfaceIndex = Strings.nullToEmpty(taskContext.get(TASK_ATTRIBUTES_NETWORK_INTERFACE_INDEX));
if (jobDescriptor.getNetworkConfiguration() != null) {
taskDocument.networkMode = jobDescriptor.getNetworkConfiguration().getNetworkModeName();
}
}
}
| 1,473 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary/taskspublisher/TaskPublisherRetryUtil.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.taskspublisher;
import java.util.concurrent.TimeUnit;
import com.netflix.titus.common.util.rx.RetryHandlerBuilder;
import reactor.core.scheduler.Schedulers;
import reactor.util.retry.Retry;
public class TaskPublisherRetryUtil {
public static final long INITIAL_RETRY_DELAY_MS = 500;
public static final long MAX_RETRY_DELAY_MS = 2_000;
public static Retry buildRetryHandler(long initialRetryDelayMillis,
long maxRetryDelayMillis, int maxRetries) {
RetryHandlerBuilder retryHandlerBuilder = RetryHandlerBuilder.retryHandler();
if (maxRetries < 0) {
retryHandlerBuilder.withUnlimitedRetries();
} else {
retryHandlerBuilder.withRetryCount(maxRetries);
}
return retryHandlerBuilder
.withDelay(initialRetryDelayMillis, maxRetryDelayMillis, TimeUnit.MILLISECONDS)
.withReactorScheduler(Schedulers.elastic())
.buildRetryExponentialBackoff();
}
}
| 1,474 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary/taskspublisher/TitusClientImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.taskspublisher;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicInteger;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import com.github.benmanes.caffeine.cache.AsyncCacheLoader;
import com.github.benmanes.caffeine.cache.AsyncLoadingCache;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.patterns.PolledMeter;
import com.netflix.titus.grpc.protogen.Job;
import com.netflix.titus.grpc.protogen.JobChangeNotification;
import com.netflix.titus.grpc.protogen.JobId;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc.JobManagementServiceFutureStub;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc.JobManagementServiceStub;
import com.netflix.titus.grpc.protogen.ObserveJobsQuery;
import com.netflix.titus.grpc.protogen.Task;
import com.netflix.titus.grpc.protogen.TaskId;
import com.netflix.titus.supplementary.taskspublisher.es.EsTaskPublisherMetrics;
import io.grpc.Metadata;
import io.grpc.stub.AbstractStub;
import io.grpc.stub.MetadataUtils;
import io.grpc.stub.StreamObserver;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import static com.netflix.titus.runtime.endpoint.metadata.V3HeaderInterceptor.CALLER_ID_KEY;
public class TitusClientImpl implements TitusClient {
private static final Logger logger = LoggerFactory.getLogger(TitusClientImpl.class);
private static final String CLIENT_ID = "tasksPublisher";
private static final int MAX_CACHE_SIZE = 40000;
private final JobManagementServiceStub jobManagementService;
private final JobManagementServiceFutureStub jobManagementServiceFutureStub;
private final Registry registry;
private final AtomicInteger numJobUpdates = new AtomicInteger(0);
private final AtomicInteger numTaskUpdates = new AtomicInteger(0);
private final AtomicInteger numSnapshotUpdates = new AtomicInteger(0);
private final AtomicInteger numMissingJobUpdate = new AtomicInteger(0);
private final AtomicInteger apiErrors = new AtomicInteger(0);
private final AsyncLoadingCache<String, Job> jobs;
public TitusClientImpl(JobManagementServiceStub jobManagementService,
JobManagementServiceFutureStub jobManagementServiceFutureStub,
Registry registry) {
this.jobManagementService = jobManagementService;
this.jobManagementServiceFutureStub = jobManagementServiceFutureStub;
this.registry = registry;
jobs = buildCacheForJobs();
configureMetrics();
}
@Override
public Mono<Task> getTask(String taskId) {
logger.debug("Getting Task information about taskId {}", taskId);
return Mono.create(sink -> attachCallerId(jobManagementService, CLIENT_ID)
.findTask(TaskId.newBuilder().setId(taskId).build(), new StreamObserver<Task>() {
@Override
public void onNext(Task task) {
sink.success(task);
}
@Override
public void onError(Throwable t) {
logger.error("Error fetching task information for task ID = {}", taskId);
apiErrors.incrementAndGet();
sink.error(t);
}
@Override
public void onCompleted() {
}
}));
}
@Override
public Flux<JobOrTaskUpdate> getJobAndTaskUpdates() {
return Flux.create(sink -> attachCallerId(jobManagementService, CLIENT_ID)
.observeJobs(ObserveJobsQuery.newBuilder().build(), new StreamObserver<JobChangeNotification>() {
@Override
public void onNext(JobChangeNotification jobChangeNotification) {
switch (jobChangeNotification.getNotificationCase()) {
case JOBUPDATE:
final Job job = jobChangeNotification.getJobUpdate().getJob();
jobs.put(job.getId(), CompletableFuture.completedFuture(job));
logger.debug("<{}> JobUpdate {}", Thread.currentThread().getName(), jobChangeNotification.getJobUpdate().getJob().getId());
sink.next(JobOrTaskUpdate.jobUpdate(job));
numJobUpdates.incrementAndGet();
break;
case TASKUPDATE:
logger.debug("<{}> TaskUpdate {}", Thread.currentThread().getName(), jobChangeNotification.getTaskUpdate().getTask().getId());
final Task task = jobChangeNotification.getTaskUpdate().getTask();
sink.next(JobOrTaskUpdate.taskUpdate(task));
numTaskUpdates.incrementAndGet();
break;
case SNAPSHOTEND:
logger.info("<{}> SnapshotEnd {}", Thread.currentThread().getName(), jobChangeNotification);
numSnapshotUpdates.incrementAndGet();
break;
default:
logger.error("<{}> Unknown Notification ? {}", Thread.currentThread().getName(), jobChangeNotification.getNotificationCase());
}
}
@Override
public void onError(Throwable t) {
logger.error("Exception in ObserveJobs :: ", t);
apiErrors.incrementAndGet();
sink.error(t);
}
@Override
public void onCompleted() {
logger.info("STREAM completed ?");
sink.complete();
}
}));
}
@Override
public Mono<Job> getJobById(String jobId) {
return Mono.fromFuture(jobs.get(jobId));
}
private void configureMetrics() {
PolledMeter.using(registry)
.withId(registry.createId(EsTaskPublisherMetrics.METRIC_ES_PUBLISHER + "titusApi.errors"))
.monitorValue(apiErrors);
PolledMeter.using(registry)
.withId(registry.createId(EsTaskPublisherMetrics.METRIC_ES_PUBLISHER + "titusApi.numSnapshotUpdates"))
.monitorValue(numSnapshotUpdates);
PolledMeter.using(registry)
.withId(registry.createId(EsTaskPublisherMetrics.METRIC_ES_PUBLISHER + "titusApi.numTaskUpdates"))
.monitorValue(numTaskUpdates);
PolledMeter.using(registry)
.withId(registry.createId(EsTaskPublisherMetrics.METRIC_ES_PUBLISHER + "titusApi.numJobUpdates"))
.monitorValue(numJobUpdates);
PolledMeter.using(registry)
.withId(registry.createId(EsTaskPublisherMetrics.METRIC_ES_PUBLISHER + "titusApi.numMissingJobUpdate"))
.monitorValue(numMissingJobUpdate);
}
private <STUB extends AbstractStub<STUB>> STUB attachCallerId(STUB serviceStub, String callerId) {
Metadata metadata = new Metadata();
metadata.put(CALLER_ID_KEY, callerId);
return serviceStub.withInterceptors(MetadataUtils.newAttachHeadersInterceptor(metadata));
}
private AsyncLoadingCache<String, Job> buildCacheForJobs() {
return Caffeine.newBuilder()
.maximumSize(MAX_CACHE_SIZE)
.buildAsync(new AsyncCacheLoader<String, Job>() {
@Nonnull
@Override
public CompletableFuture<Job> asyncLoad(@Nonnull String jobId, @Nonnull Executor executor) {
ListenableFuture<Job> jobFuture = jobManagementServiceFutureStub.findJob(JobId.newBuilder().setId(jobId).build());
return toCompletableFuture(jobFuture, executor);
}
});
}
private <T> CompletableFuture<T> toCompletableFuture(ListenableFuture<T> listenableFuture, Executor executor) {
CompletableFuture<T> completableFuture = new CompletableFuture<T>() {
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
listenableFuture.cancel(mayInterruptIfRunning);
return super.cancel(mayInterruptIfRunning);
}
};
Futures.addCallback(listenableFuture, new FutureCallback<T>() {
@Override
public void onSuccess(@Nullable T result) {
completableFuture.complete(result);
}
@Override
public void onFailure(Throwable t) {
completableFuture.completeExceptionally(t);
}
}, executor);
return completableFuture;
}
}
| 1,475 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary/taskspublisher/TaskEventsGenerator.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.taskspublisher;
import java.util.HashMap;
import java.util.Map;
import com.netflix.titus.api.jobmanager.JobAttributes;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.grpc.protogen.Job;
import com.netflix.titus.grpc.protogen.Task;
import com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters;
import com.netflix.titus.supplementary.taskspublisher.es.ElasticSearchUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import reactor.core.publisher.ConnectableFlux;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Scheduler;
import reactor.core.scheduler.Schedulers;
public class TaskEventsGenerator {
private static final int THREAD_POOL_LIMIT = 500;
private final Logger logger = LoggerFactory.getLogger(TaskEventsGenerator.class);
private final Map<String, String> taskDocumentBaseContext;
private final TitusRuntime titusRuntime;
private final TitusClient titusClient;
private final Scheduler scheduler;
private ConnectableFlux<TaskDocument> taskEvents;
public TaskEventsGenerator(TitusClient titusClient,
Map<String, String> taskDocumentBaseContext,
TitusRuntime titusRuntime) {
this.titusClient = titusClient;
this.taskDocumentBaseContext = taskDocumentBaseContext;
this.titusRuntime = titusRuntime;
this.scheduler = Schedulers.newBoundedElastic(THREAD_POOL_LIMIT, Integer.MAX_VALUE, "taskEventsGenerator", 60, true);
buildEventStream();
}
public void shutdown() {
scheduler.dispose();
}
public ConnectableFlux<TaskDocument> getTaskEvents() {
return taskEvents;
}
private void buildEventStream() {
taskEvents = titusClient.getJobAndTaskUpdates()
.publishOn(scheduler)
.flatMap(jobOrTaskUpdate -> jobOrTaskUpdate.hasTask() ? Flux.just(jobOrTaskUpdate.getTask()) : Flux.empty())
.map(task -> {
final Mono<Job> jobById = titusClient.getJobById(task.getJobId());
return Pair.of(task, jobById);
})
.flatMap(taskMonoPair -> {
final Task task = taskMonoPair.getLeft();
return taskMonoPair.getRight()
.flatMap(job -> {
try {
final com.netflix.titus.api.jobmanager.model.job.Job coreJob = GrpcJobManagementModelConverters.toCoreJob(job);
final com.netflix.titus.api.jobmanager.model.job.Task coreTask = GrpcJobManagementModelConverters.toCoreTask(coreJob, task);
return Mono.just(TaskDocument.fromV3Task(coreTask, coreJob, ElasticSearchUtils.DATE_FORMAT, buildTaskContext(task)));
} catch (Exception e) {
// If the mapping fails, we do not want to break the pipeline, and possible cause an infinite number
// of retries, each failing on the same bad job/task record. Instead, we log the error.
titusRuntime.getCodeInvariants().unexpectedError(
String.format("Cannot map Titus job/task to ES TaskDocument: job=%s, task=%s", job, task),
e
);
logger.warn("Cannot map Titus job/task to ES TaskDocument", e);
return Mono.empty();
}
}).flux();
})
.doOnError(error -> logger.error("TitusClient event stream error", error))
.retryWhen(TaskPublisherRetryUtil.buildRetryHandler(TaskPublisherRetryUtil.INITIAL_RETRY_DELAY_MS,
TaskPublisherRetryUtil.MAX_RETRY_DELAY_MS, -1))
.publish();
}
private Map<String, String> buildTaskContext(Task task) {
String stack = "";
if (task.getTaskContextMap().containsKey(JobAttributes.JOB_ATTRIBUTES_CELL)) {
stack = task.getTaskContextMap().get(JobAttributes.JOB_ATTRIBUTES_CELL);
}
final HashMap<String, String> taskContext = new HashMap<>(taskDocumentBaseContext);
taskContext.put("stack", stack);
return taskContext;
}
}
| 1,476 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary/taskspublisher/package-info.java | /**
* Entry point for this application. Includes a main method for local debugging
* as well as an entrypoint to be invoked by Servlet 3.0+ containers provided by the
* {@link org.springframework.web.WebApplicationInitializer} interface.
* @see <a href="https://docs.spring.io/spring-boot/docs/current/reference/htmlsingle/#using-boot-using-springbootapplication-annotation">@SpringBootApplication Reference</a>
* @see <a href="https://docs.spring.io/spring-boot/docs/current/reference/htmlsingle/#boot-features-embedded-container-context-initializer">Spring Servlet 3.0+ Support via WebApplicationInitializer</a>
*/
package com.netflix.titus.supplementary.taskspublisher; | 1,477 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary/taskspublisher | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary/taskspublisher/config/TasksPublisherConfiguration.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.taskspublisher.config;
import java.util.Collections;
import java.util.concurrent.TimeUnit;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.ext.elasticsearch.DefaultEsClient;
import com.netflix.titus.ext.elasticsearch.DefaultEsWebClientFactory;
import com.netflix.titus.ext.elasticsearch.EsClient;
import com.netflix.titus.ext.elasticsearch.EsClientConfiguration;
import com.netflix.titus.ext.elasticsearch.EsWebClientFactory;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc.JobManagementServiceFutureStub;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc.JobManagementServiceStub;
import com.netflix.titus.supplementary.taskspublisher.TaskDocument;
import com.netflix.titus.supplementary.taskspublisher.TaskEventsGenerator;
import com.netflix.titus.supplementary.taskspublisher.TitusClient;
import com.netflix.titus.supplementary.taskspublisher.TitusClientImpl;
import com.netflix.titus.supplementary.taskspublisher.es.EsPublisher;
import io.grpc.ManagedChannel;
import io.grpc.netty.shaded.io.grpc.netty.NegotiationType;
import io.grpc.netty.shaded.io.grpc.netty.NettyChannelBuilder;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
@Configuration
public class TasksPublisherConfiguration {
private static final String GRPC_CLIENT_AGENT = "EsTaskPublisher";
private static final int GRPC_KEEP_ALIVE_TIME = 5;
private static final int GRPC_KEEP_ALIVE_TIMEOUT = 10;
@Autowired
@Value("${titus.api.host: 'localhost'}")
private String titusApiHost;
@Autowired
@Value("${titus.api.port: 7001}")
private int titusApiPort;
@Autowired
private EsPublisherConfiguration esPublisherConfiguration;
@Autowired
private EsClientConfiguration esClientConfiguration;
@Bean
@ConditionalOnMissingBean
public JobManagementServiceStub getJobManagementServiceStub() {
return JobManagementServiceGrpc.newStub(getTitusGrpcChannel());
}
@Bean
@ConditionalOnMissingBean
public JobManagementServiceFutureStub getJobManagementServiceFutureStub() {
return JobManagementServiceGrpc.newFutureStub(getTitusGrpcChannel());
}
@Bean
@ConditionalOnMissingBean
public TitusClient getTitusClient() {
return new TitusClientImpl(getJobManagementServiceStub(), getJobManagementServiceFutureStub(), new DefaultRegistry());
}
@Bean
@ConditionalOnMissingBean
public EsClient<TaskDocument> getEsClient() {
return new DefaultEsClient<>(getEsWebClientFactory());
}
@Bean
@ConditionalOnMissingBean
public EsWebClientFactory getEsWebClientFactory() {
return new DefaultEsWebClientFactory(esClientConfiguration);
}
@Bean
@ConditionalOnMissingBean
public TaskEventsGenerator getTaskEventsGenerator(TitusRuntime titusRuntime) {
return new TaskEventsGenerator(getTitusClient(), Collections.emptyMap(), titusRuntime);
}
@Bean
@ConditionalOnMissingBean
public EsPublisher getEsPublisher(TitusRuntime titusRuntime) {
return new EsPublisher(getTaskEventsGenerator(titusRuntime), getEsClient(), esPublisherConfiguration, titusRuntime.getRegistry());
}
private ManagedChannel getTitusGrpcChannel() {
return NettyChannelBuilder.forAddress(titusApiHost, titusApiPort)
.defaultLoadBalancingPolicy("round_robin")
.keepAliveTime(GRPC_KEEP_ALIVE_TIME, TimeUnit.SECONDS)
.keepAliveTimeout(GRPC_KEEP_ALIVE_TIMEOUT, TimeUnit.SECONDS)
.userAgent(GRPC_CLIENT_AGENT)
.negotiationType(NegotiationType.PLAINTEXT)
.build();
}
}
| 1,478 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary/taskspublisher | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary/taskspublisher/config/EsPublisherConfiguration.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.taskspublisher.config;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Configuration;
@Configuration
public class EsPublisherConfiguration {
@Value("${netflix.environment}")
private String env;
@Value("${netflix.account}")
private String account;
@Value("${netflix.region}")
private String region;
@Value("${titus.es.taskDocumentEsIndexDateSuffixPattern}")
private String taskDocumentEsIndexDateSuffixPattern;
@Value("${titus.es.taskDocumentEsIndexName}")
private String taskDocumentEsIndexName;
@Value("${titus.es.publish.enabled}")
private boolean enabled;
public String getTaskDocumentEsIndexDateSuffixPattern() {
return taskDocumentEsIndexDateSuffixPattern;
}
public String getTaskDocumentEsIndexName() {
return taskDocumentEsIndexName;
}
public String getEnv() {
return env;
}
public String getAccount() {
return account;
}
public String getRegion() {
return region;
}
public boolean isEnabled() {
return enabled;
}
}
| 1,479 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary/taskspublisher | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary/taskspublisher/config/package-info.java | /**
* Package by convention in which {@link org.springframework.context.annotation.Configuration} classes
* will reside. It is recommended to keep all of this in a single location for readability, as well as
* ease of classpath-scanning when used in test cases. The {@link org.springframework.boot.autoconfigure.SpringBootApplication}
* annotation will automatically detect these configurations.
* @see <a href="https://docs.spring.io/spring/docs/current/spring-framework-reference/htmlsingle/#beans-java-basic-concepts">Spring @Configuration Reference Docs</a>
* @see <a href="https://docs.spring.io/spring-boot/docs/current/reference/htmlsingle/#using-boot-structuring-your-code">Spring Boot Project Structure Best Practices</a>
*/
package com.netflix.titus.supplementary.taskspublisher.config; | 1,480 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary/taskspublisher | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary/taskspublisher/es/EsTaskPublisherMetrics.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.taskspublisher.es;
public class EsTaskPublisherMetrics {
public static final String METRIC_ROOT = "titus.";
public static final String METRIC_ES_PUBLISHER = METRIC_ROOT + "tasks.es.publish.";
}
| 1,481 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary/taskspublisher | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary/taskspublisher/es/ElasticSearchUtils.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.taskspublisher.es;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.TimeZone;
public class ElasticSearchUtils {
public static final SimpleDateFormat DATE_FORMAT;
static {
DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
DATE_FORMAT.setTimeZone(TimeZone.getTimeZone("UTC"));
}
public static String buildEsIndexNameCurrent(String esIndexPrefix, SimpleDateFormat indexDateFormatSuffix) {
return String.format("%s%s", esIndexPrefix, indexDateFormatSuffix.format(new Date()));
}
}
| 1,482 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary/taskspublisher | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher/src/main/java/com/netflix/titus/supplementary/taskspublisher/es/EsPublisher.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.taskspublisher.es;
import java.text.SimpleDateFormat;
import java.time.Duration;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import com.netflix.spectator.api.Functions;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.patterns.PolledMeter;
import com.netflix.titus.api.common.LeaderActivationListener;
import com.netflix.titus.common.util.rx.ReactorExt;
import com.netflix.titus.ext.elasticsearch.EsClient;
import com.netflix.titus.supplementary.taskspublisher.TaskDocument;
import com.netflix.titus.supplementary.taskspublisher.TaskEventsGenerator;
import com.netflix.titus.supplementary.taskspublisher.TaskPublisherRetryUtil;
import com.netflix.titus.supplementary.taskspublisher.TasksPublisher;
import com.netflix.titus.supplementary.taskspublisher.config.EsPublisherConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import reactor.core.Disposable;
import reactor.core.publisher.BufferOverflowStrategy;
import reactor.core.publisher.ConnectableFlux;
public class EsPublisher implements TasksPublisher, LeaderActivationListener {
private static final Logger logger = LoggerFactory.getLogger(EsPublisher.class);
private static final int MAX_CONCURRENCY = 20;
private static final String ES_RECORD_TYPE = "default";
private static final int MAX_BATCH_SIZE = 100;
private static final int MAX_BACKPRESSURE_BUFFER = 1000;
private final TaskEventsGenerator taskEventsGenerator;
private final EsClient<TaskDocument> esClient;
private final EsPublisherConfiguration esPublisherConfiguration;
private final Registry registry;
private AtomicInteger numErrors = new AtomicInteger(0);
private AtomicInteger numTasksUpdated = new AtomicInteger(0);
private AtomicLong lastPublishedTimestamp;
private Disposable subscription;
private Disposable taskEventsSourceConnection;
private final SimpleDateFormat indexDateFormat;
public EsPublisher(TaskEventsGenerator taskEventsGenerator, EsClient<TaskDocument> esClient,
EsPublisherConfiguration esPublisherConfiguration, Registry registry) {
this.taskEventsGenerator = taskEventsGenerator;
this.esClient = esClient;
this.esPublisherConfiguration = esPublisherConfiguration;
this.indexDateFormat = new SimpleDateFormat(esPublisherConfiguration.getTaskDocumentEsIndexDateSuffixPattern());
this.registry = registry;
configureMetrics();
}
@Override
public void activate() {
ConnectableFlux<TaskDocument> taskEvents = taskEventsGenerator.getTaskEvents();
subscription = taskEvents.bufferTimeout(MAX_BATCH_SIZE, Duration.ofSeconds(5))
.onBackpressureBuffer(MAX_BACKPRESSURE_BUFFER, BufferOverflowStrategy.ERROR)
.flatMap(taskDocuments ->
esClient.bulkIndexDocuments(
taskDocuments,
ElasticSearchUtils.buildEsIndexNameCurrent(esPublisherConfiguration.getTaskDocumentEsIndexName(), indexDateFormat),
ES_RECORD_TYPE)
.retryWhen(TaskPublisherRetryUtil.buildRetryHandler(
TaskPublisherRetryUtil.INITIAL_RETRY_DELAY_MS,
TaskPublisherRetryUtil.MAX_RETRY_DELAY_MS, 3)),
MAX_CONCURRENCY)
.doOnError(e -> {
logger.error("Error in indexing documents (Retrying) : ", e);
numErrors.incrementAndGet();
})
.retryWhen(TaskPublisherRetryUtil.buildRetryHandler(TaskPublisherRetryUtil.INITIAL_RETRY_DELAY_MS,
TaskPublisherRetryUtil.MAX_RETRY_DELAY_MS, -1))
.subscribe(bulkIndexResp -> {
logger.info("Received bulk response for {} items", bulkIndexResp.getItems().size());
lastPublishedTimestamp.set(registry.clock().wallTime());
bulkIndexResp.getItems().forEach(bulkEsIndexRespItem -> {
String indexedItemId = bulkEsIndexRespItem.getIndex().getId();
logger.info("Index result <{}> for task ID {}", bulkEsIndexRespItem.getIndex().getResult(), indexedItemId);
numTasksUpdated.incrementAndGet();
});
},
e -> logger.error("Error in indexing documents ", e));
taskEventsSourceConnection = taskEvents.connect();
}
@Override
public void deactivate() {
ReactorExt.safeDispose(subscription, taskEventsSourceConnection);
}
@Override
public int getNumErrorsInPublishing() {
return numErrors.get();
}
@Override
public int getNumTasksPublished() {
return numTasksUpdated.get();
}
private void configureMetrics() {
PolledMeter.using(registry)
.withId(registry.createId(EsTaskPublisherMetrics.METRIC_ES_PUBLISHER + "errors"))
.monitorValue(numErrors);
PolledMeter.using(registry)
.withId(registry.createId(EsTaskPublisherMetrics.METRIC_ES_PUBLISHER + "numTasksUpdated"))
.monitorValue(numTasksUpdated);
lastPublishedTimestamp = PolledMeter.using(registry)
.withId(registry.createId(EsTaskPublisherMetrics.METRIC_ES_PUBLISHER + "lastPublishedTimestamp"))
.monitorValue(new AtomicLong(registry.clock().wallTime()), Functions.AGE);
}
}
| 1,483 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher-springboot/src/main/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher-springboot/src/main/java/com/netflix/titus/supplementary/taskspublisher/TasksPublisherMain.java | package com.netflix.titus.supplementary.taskspublisher;
import com.netflix.titus.supplementary.taskspublisher.config.TasksPublisherConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.context.annotation.Import;
@SpringBootApplication
@Import({TasksPublisherConfiguration.class})
public class TasksPublisherMain {
private static final Logger logger = LoggerFactory.getLogger(TasksPublisherMain.class);
public static void main(String[] args) {
SpringApplication.run(TasksPublisherMain.class, args);
}
}
| 1,484 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher-springboot/src/main/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/tasks-publisher-springboot/src/main/java/com/netflix/titus/supplementary/taskspublisher/package-info.java | /**
* Entry point for this application. Includes a main method for local debugging
* as well as an entrypoint to be invoked by Servlet 3.0+ containers provided by the
* {@link org.springframework.web.WebApplicationInitializer} interface.
* @see <a href="https://docs.spring.io/spring-boot/docs/current/reference/htmlsingle/#using-boot-using-springbootapplication-annotation">@SpringBootApplication Reference</a>
* @see <a href="https://docs.spring.io/spring-boot/docs/current/reference/htmlsingle/#boot-features-embedded-container-context-initializer">Spring Servlet 3.0+ Support via WebApplicationInitializer</a>
*/
package com.netflix.titus.supplementary.taskspublisher; | 1,485 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/TestableNodeDataResolver.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import com.netflix.titus.supplementary.relocation.connector.TitusNode;
import com.netflix.titus.supplementary.relocation.connector.NodeDataResolver;
public class TestableNodeDataResolver implements NodeDataResolver {
private final ConcurrentMap<String, ConcurrentMap<String, TitusNode>> nodesByServerGroups = new ConcurrentHashMap<>();
@Override
public Map<String, TitusNode> resolve() {
Map<String, TitusNode> all = new HashMap<>();
nodesByServerGroups.forEach((serverGroupId, nodes) -> all.putAll(nodes));
return all;
}
@Override
public long getStalenessMs() {
return 0;
}
public Map<String, TitusNode> getNodes(String serverGroupId) {
return nodesByServerGroups.get(serverGroupId);
}
public void addNode(TitusNode node) {
nodesByServerGroups.computeIfAbsent(node.getServerGroupId(), id -> new ConcurrentHashMap<>()).put(node.getId(), node);
}
public TitusNode getNode(String agentId) {
for (ConcurrentMap<String, TitusNode> nodes : nodesByServerGroups.values()) {
if (nodes.containsKey(agentId)) {
return nodes.get(agentId);
}
}
return null;
}
}
| 1,486 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/RelocationConnectorStubs.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation;
import java.util.ArrayList;
import java.util.List;
import com.google.common.base.Preconditions;
import com.netflix.titus.api.eviction.service.ReadOnlyEvictionOperations;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.runtime.connector.eviction.EvictionDataReplicator;
import com.netflix.titus.runtime.connector.eviction.EvictionServiceClient;
import com.netflix.titus.runtime.connector.jobmanager.JobDataReplicator;
import com.netflix.titus.runtime.connector.jobmanager.JobManagementClient;
import com.netflix.titus.supplementary.relocation.connector.TitusNode;
import com.netflix.titus.supplementary.relocation.connector.NodeDataResolver;
import com.netflix.titus.testkit.model.eviction.EvictionComponentStub;
import com.netflix.titus.testkit.model.job.JobComponentStub;
import org.springframework.context.ApplicationContext;
import org.springframework.context.support.StaticApplicationContext;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class RelocationConnectorStubs {
private final TitusRuntime titusRuntime;
private volatile int ipCounter;
private final TestableNodeDataResolver nodeDataResolver = new TestableNodeDataResolver();
private final JobComponentStub jobComponentStub;
private final ReadOnlyJobOperations jobOperations;
private final EvictionComponentStub evictionComponentStub;
public RelocationConnectorStubs() {
this(TitusRuntimes.test());
}
public RelocationConnectorStubs(TitusRuntime titusRuntime) {
this.titusRuntime = titusRuntime;
this.jobComponentStub = new JobComponentStub(titusRuntime);
this.jobOperations = jobComponentStub.getJobOperations();
this.evictionComponentStub = new EvictionComponentStub(jobComponentStub, titusRuntime);
}
public ApplicationContext getApplicationContext() {
StaticApplicationContext context = new StaticApplicationContext();
context.getBeanFactory().registerSingleton("titusRuntime", titusRuntime);
context.getBeanFactory().registerSingleton("nodeDataResolver", nodeDataResolver);
context.getBeanFactory().registerSingleton("readOnlyJobOperations", jobOperations);
context.getBeanFactory().registerSingleton("readOnlyEvictionOperations", evictionComponentStub.getEvictionOperations());
context.getBeanFactory().registerSingleton("evictionServiceClient", evictionComponentStub.getEvictionServiceClient());
context.getBeanFactory().registerSingleton("jobManagementClient", mock(JobManagementClient.class));
JobDataReplicator jobDataReplicator = mock(JobDataReplicator.class);
when(jobDataReplicator.getStalenessMs()).thenReturn(0L);
context.getBeanFactory().registerSingleton("jobDataReplicator", jobDataReplicator);
EvictionDataReplicator evictionDataReplicator = mock(EvictionDataReplicator.class);
when(evictionDataReplicator.getStalenessMs()).thenReturn(0L);
context.getBeanFactory().registerSingleton("evictionDataReplicator", evictionDataReplicator);
context.refresh();
return context;
}
public TitusRuntime getTitusRuntime() {
return titusRuntime;
}
public ReadOnlyJobOperations getJobOperations() {
return jobComponentStub.getJobOperations();
}
public ReadOnlyEvictionOperations getEvictionOperations() {
return evictionComponentStub.getEvictionOperations();
}
public EvictionServiceClient getEvictionServiceClient() {
return evictionComponentStub.getEvictionServiceClient();
}
public NodeDataResolver getNodeDataResolver() {
return nodeDataResolver;
}
public RelocationConnectorStubs addActiveInstanceGroup(String serverGroupId, int size) {
for (int i = 0; i < size; i++) {
nodeDataResolver.addNode(TitusNode.newBuilder()
.withId(serverGroupId + "#" + i)
.withIpAddress(nextIpAddress())
.withServerGroupId(serverGroupId)
.build()
);
}
return this;
}
public RelocationConnectorStubs addRemovableInstanceGroup(String serverGroupId, int size) {
for (int i = 0; i < size; i++) {
nodeDataResolver.addNode(TitusNode.newBuilder()
.withId(serverGroupId + "#" + i)
.withIpAddress(nextIpAddress())
.withServerGroupId(serverGroupId)
.withRelocationRequired(true)
.build()
);
}
return this;
}
public RelocationConnectorStubs addJob(Job<?> job) {
jobComponentStub.createJobAndTasks(job);
return this;
}
public void addJobAttribute(String jobId, String attributeName, Object attributeValue) {
jobComponentStub.addJobAttribute(jobId, attributeName, "" + attributeValue);
}
public void addTaskAttribute(String taskId, String attributeName, Object attributeValue) {
jobComponentStub.addTaskAttribute(taskId, attributeName, "" + attributeValue);
}
public RelocationConnectorStubs place(String instanceGroupId, Task... tasks) {
List<TitusNode> nodes = new ArrayList<>(nodeDataResolver.getNodes(instanceGroupId).values());
int counter = 0;
for (Task task : tasks) {
TitusNode node = nodes.get(counter++ % nodes.size());
jobComponentStub.place(task.getId(), node.getId(), node.getIpAddress());
}
return this;
}
public RelocationConnectorStubs placeOnAgent(String agentId, Task... tasks) {
TitusNode agent = nodeDataResolver.getNode(agentId);
for (Task task : tasks) {
jobComponentStub.place(task.getId(), agent.getId(), agent.getIpAddress());
}
return this;
}
public RelocationConnectorStubs setQuota(String jobId, int quota) {
Preconditions.checkArgument(jobComponentStub.getJobOperations().getJob(jobId).isPresent());
evictionComponentStub.setJobQuota(jobId, quota);
return this;
}
public void markNodeRelocationRequired(String nodeId) {
TitusNode node = Preconditions.checkNotNull(nodeDataResolver.getNode(nodeId));
nodeDataResolver.addNode(node.toBuilder().withRelocationRequired(true).build());
}
private String nextIpAddress() {
return "1.1.1." + ipCounter++;
}
}
| 1,487 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/TestDataFactory.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.disruptionbudget.DisruptionBudget;
import com.netflix.titus.api.jobmanager.model.job.disruptionbudget.RelocationLimitDisruptionBudgetPolicy;
import com.netflix.titus.api.jobmanager.model.job.disruptionbudget.SelfManagedDisruptionBudgetPolicy;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.Evaluators;
import com.netflix.titus.runtime.connector.kubernetes.fabric8io.Fabric8IOConnector;
import com.netflix.titus.testkit.model.job.JobGenerator;
import io.fabric8.kubernetes.api.model.Node;
import io.fabric8.kubernetes.api.model.NodeBuilder;
import io.fabric8.kubernetes.api.model.NodeCondition;
import io.fabric8.kubernetes.api.model.NodeConditionBuilder;
import io.fabric8.kubernetes.api.model.Taint;
import io.fabric8.kubernetes.api.model.TaintBuilder;
import io.fabric8.kubernetes.client.informers.SharedIndexInformer;
import io.fabric8.kubernetes.client.informers.cache.Indexer;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.ofBatchSize;
import static com.netflix.titus.runtime.kubernetes.KubeConstants.NODE_LABEL_MACHINE_GROUP;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskBatchJobDescriptor;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestDataFactory {
public static final String ACTIVE_INSTANCE_GROUP_ID = "active1";
public static final String REMOVABLE_INSTANCE_GROUP_ID = "removable1";
public static DisruptionBudget newSelfManagedDisruptionBudget(long relocationTimeMs) {
return DisruptionBudget.newBuilder()
.withDisruptionBudgetPolicy(SelfManagedDisruptionBudgetPolicy.newBuilder()
.withRelocationTimeMs(relocationTimeMs)
.build()
)
.build();
}
public static DisruptionBudget newRelocationLimitDisruptionBudget(int limit) {
return DisruptionBudget.newBuilder()
.withDisruptionBudgetPolicy(RelocationLimitDisruptionBudgetPolicy.newBuilder()
.withLimit(limit)
.build()
)
.build();
}
public static Job<BatchJobExt> newBatchJob(String jobId, int size, DisruptionBudget disruptionBudget) {
return JobGenerator.batchJobs(oneTaskBatchJobDescriptor().toBuilder()
.withDisruptionBudget(disruptionBudget)
.build()
.but(ofBatchSize(size))
).getValue().but(JobFunctions.withJobId(jobId));
}
public static RelocationConnectorStubs activeRemovableSetup() {
return activeRemovableSetup(TitusRuntimes.test());
}
public static RelocationConnectorStubs activeRemovableSetup(TitusRuntime titusRuntime) {
return new RelocationConnectorStubs(titusRuntime)
.addActiveInstanceGroup(ACTIVE_INSTANCE_GROUP_ID, 10)
.addRemovableInstanceGroup(REMOVABLE_INSTANCE_GROUP_ID, 10);
}
public static Fabric8IOConnector mockFabric8IOConnector(Node... nodes) {
Fabric8IOConnector fabric8IOConnector = mock(Fabric8IOConnector.class);
SharedIndexInformer<Node> nodeInformer = mock(SharedIndexInformer.class);
Indexer<Node> nodeIndexer = mock(Indexer.class);
List<Node> nodeIndex = new ArrayList<>(Arrays.asList(nodes));
when(fabric8IOConnector.getNodeInformer()).thenReturn(nodeInformer);
when(nodeInformer.getIndexer()).thenReturn(nodeIndexer);
when(nodeIndexer.list()).thenAnswer(invocation -> nodeIndex);
when(nodeIndexer.getByKey(anyString())).thenAnswer(invocation -> {
String key = invocation.getArgument(0);
Optional<Node> nodeOpt = nodeIndex.stream().filter(node -> node.getMetadata() != null &&
node.getMetadata().getName() != null && node.getMetadata().getName().equals(key)).findFirst();
return nodeOpt.orElse(null);
});
return fabric8IOConnector;
}
public static Node newNode(String id) {
return new NodeBuilder()
.withNewMetadata()
.withName(id)
.withLabels(CollectionsExt.asMap(NODE_LABEL_MACHINE_GROUP, "serverGroup1"))
.endMetadata()
.withNewSpec()
.endSpec()
.withNewStatus()
.endStatus()
.build();
}
public static void addNodeCondition(Node node, String conditionType, String conditionValue) {
List<NodeCondition> conditions = Evaluators.getOrDefault(node.getStatus().getConditions(), Collections.emptyList());
NodeCondition nodeCondition = new NodeConditionBuilder()
.withType(conditionType)
.withStatus(conditionValue)
.withMessage("Msg for " + conditionType)
.withReason("Reason for " + conditionType)
.build();
conditions.add(nodeCondition);
node.getStatus().setConditions(conditions);
}
public static void addNodeTaint(Node node, String key, String value, String effect) {
List<Taint> taints = Evaluators.getOrDefault(node.getSpec().getTaints(), Collections.emptyList());
Taint taint = new TaintBuilder().withKey(key).withValue(value).withEffect(effect).build();
taints.add(taint);
node.getSpec().setTaints(taints);
}
}
| 1,488 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/AbstractTaskRelocationTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation;
import com.netflix.titus.api.eviction.service.ReadOnlyEvictionOperations;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.archaius2.Archaius2Ext;
import com.netflix.titus.common.util.time.TestClock;
import com.netflix.titus.runtime.connector.eviction.EvictionServiceClient;
import com.netflix.titus.runtime.connector.kubernetes.fabric8io.Fabric8IOConnector;
import com.netflix.titus.supplementary.relocation.connector.NodeDataResolver;
public abstract class AbstractTaskRelocationTest {
protected final TitusRuntime titusRuntime;
protected final TestClock clock;
protected final RelocationConfiguration configuration = Archaius2Ext.newConfiguration(RelocationConfiguration.class);
protected final Fabric8IOConnector kubeApiFacade = TestDataFactory.mockFabric8IOConnector();
protected final RelocationConnectorStubs relocationConnectorStubs;
protected final NodeDataResolver nodeDataResolver;
protected final ReadOnlyJobOperations jobOperations;
protected final ReadOnlyEvictionOperations evictionOperations;
protected final EvictionServiceClient evictionServiceClient;
protected AbstractTaskRelocationTest(RelocationConnectorStubs relocationConnectorStubs) {
this.relocationConnectorStubs = relocationConnectorStubs;
this.titusRuntime = relocationConnectorStubs.getTitusRuntime();
this.clock = (TestClock) titusRuntime.getClock();
this.nodeDataResolver = relocationConnectorStubs.getNodeDataResolver();
this.jobOperations = relocationConnectorStubs.getJobOperations();
this.evictionOperations = relocationConnectorStubs.getEvictionOperations();
this.evictionServiceClient = relocationConnectorStubs.getEvictionServiceClient();
}
}
| 1,489 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/descheduler/TaskMigrationDeschedulerTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.descheduler;
import java.time.Duration;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
import com.netflix.titus.api.jobmanager.TaskAttributes;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.common.data.generator.MutableDataGenerator;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.time.TestClock;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.runtime.RelocationAttributes;
import com.netflix.titus.runtime.connector.eviction.EvictionConfiguration;
import com.netflix.titus.supplementary.relocation.RelocationConnectorStubs;
import com.netflix.titus.supplementary.relocation.connector.TitusNode;
import com.netflix.titus.supplementary.relocation.connector.NodeDataResolver;
import com.netflix.titus.supplementary.relocation.model.DeschedulingFailure;
import com.netflix.titus.supplementary.relocation.model.DeschedulingResult;
import com.netflix.titus.testkit.model.job.JobGenerator;
import com.netflix.titus.testkit.model.job.JobTestFunctions;
import org.junit.Before;
import org.junit.Test;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.ofServiceSize;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.withApplicationName;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.withDisruptionBudget;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.withJobId;
import static com.netflix.titus.testkit.model.eviction.DisruptionBudgetGenerator.budget;
import static com.netflix.titus.testkit.model.eviction.DisruptionBudgetGenerator.selfManagedPolicy;
import static com.netflix.titus.testkit.model.eviction.DisruptionBudgetGenerator.unlimitedRate;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskServiceJobDescriptor;
import static com.netflix.titus.testkit.model.job.JobTestFunctions.toTaskMap;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TaskMigrationDeschedulerTest {
private final TitusRuntime titusRuntime = TitusRuntimes.test();
private final TestClock clock = (TestClock) titusRuntime.getClock();
private final MutableDataGenerator<Job<ServiceJobExt>> jobGenerator = new MutableDataGenerator<>(
JobGenerator.serviceJobs(oneTaskServiceJobDescriptor().but(
ofServiceSize(4),
withDisruptionBudget(budget(selfManagedPolicy(30_000), unlimitedRate(), Collections.emptyList()))
))
);
private final RelocationConnectorStubs relocationConnectorStubs = new RelocationConnectorStubs()
.addActiveInstanceGroup("active1", 10)
.addRemovableInstanceGroup("removable1", 10)
.addJob(jobGenerator.getValue().but(withJobId("job1")).but(withApplicationName("app1")))
.addJob(jobGenerator.getValue().but(withJobId("job2")))
.addJob(jobGenerator.getValue().but(withJobId("jobToMigrate")));
private final ReadOnlyJobOperations jobOperations = relocationConnectorStubs.getJobOperations();
private final NodeDataResolver nodeDataResolver = relocationConnectorStubs.getNodeDataResolver();
@Before
public void setUp() {
// So it does not start at 0.
clock.advanceTime(Duration.ofDays(1));
}
@Test
public void testImmediateMigrations() {
relocationConnectorStubs.addJobAttribute("jobToMigrate", RelocationAttributes.RELOCATION_REQUIRED_BY_IMMEDIATELY, "" + (clock.wallTime() + 1));
Task task0 = jobOperations.getTasks("jobToMigrate").get(0);
relocationConnectorStubs.place("active1", task0);
Map<String, DeschedulingResult> immediateEvictions = newDescheduler(Collections.emptyMap()).findAllImmediateEvictions();
assertThat(immediateEvictions).hasSize(1).containsKey(task0.getId());
}
@Test
public void testSelfMigration() {
Task job1Task0 = jobOperations.getTasks("job1").get(0);
relocationConnectorStubs.place("removable1", job1Task0);
relocationConnectorStubs.setQuota("job1", 1);
TaskRelocationPlan job1Task0Plan = TaskRelocationPlan.newBuilder()
.withTaskId(job1Task0.getId())
.withRelocationTime(Long.MAX_VALUE / 2)
.build();
Optional<Pair<TitusNode, List<Task>>> results = newDescheduler(Collections.singletonMap(job1Task0.getId(), job1Task0Plan)).nextBestMatch();
assertThat(results).isEmpty();
}
@Test
public void testSelfMigrationAfterDeadline() {
Task job1Task0 = jobOperations.getTasks("job1").get(0);
relocationConnectorStubs.place("removable1", job1Task0);
relocationConnectorStubs.setQuota("job1", 1);
TaskRelocationPlan job1Task0Plan = TaskRelocationPlan.newBuilder()
.withTaskId(job1Task0.getId())
.withRelocationTime(clock.wallTime() - 1)
.build();
Optional<Pair<TitusNode, List<Task>>> results = newDescheduler(Collections.singletonMap(job1Task0.getId(), job1Task0Plan)).nextBestMatch();
assertThat(results).isNotEmpty();
}
@Test
public void testFitness() {
List<TitusNode> removableAgents = nodeDataResolver.resolve().values().stream()
.filter(n -> n.getServerGroupId().equals("removable1"))
.collect(Collectors.toList());
String agent1 = removableAgents.get(0).getId();
String agent2 = removableAgents.get(1).getId();
List<Task> tasksOfJob1 = jobOperations.getTasks("job1");
relocationConnectorStubs.placeOnAgent(agent1, tasksOfJob1.get(0), tasksOfJob1.get(1));
relocationConnectorStubs.placeOnAgent(agent2, tasksOfJob1.get(2));
relocationConnectorStubs.setQuota("job1", 1);
Optional<Pair<TitusNode, List<Task>>> results = newDescheduler(Collections.emptyMap()).nextBestMatch();
assertThat(results).isPresent();
assertThat(results.get().getLeft().getId()).isEqualTo(agent2);
EvictionQuotaTracker evictionQuotaTracker = mock(EvictionQuotaTracker.class);
when(evictionQuotaTracker.getSystemEvictionQuota()).thenReturn(0L);
when(evictionQuotaTracker.isSystemDisruptionWindowOpen()).thenReturn(true);
when(evictionQuotaTracker.getJobEvictionQuota("job1")).thenReturn(1L);
TaskMigrationDescheduler taskMigrationDescheduler = newDescheduler(evictionQuotaTracker, () -> "app1*");
Optional<Pair<TitusNode, List<Task>>> results2 = taskMigrationDescheduler.nextBestMatch();
assertThat(results2).isNotPresent();
when(evictionQuotaTracker.isSystemDisruptionWindowOpen()).thenReturn(false);
Optional<Pair<TitusNode, List<Task>>> results3 = taskMigrationDescheduler.nextBestMatch();
assertThat(results3).isPresent();
assertThat(results3.get().getLeft().getId()).isEqualTo(agent2);
// job1 (app1) not exempt from system disruption window
Optional<Pair<TitusNode, List<Task>>> results4 = newDescheduler(evictionQuotaTracker, () -> "foo*").nextBestMatch();
assertThat(results4).isNotPresent();
}
@Test
public void testFailures() {
Task job1Task0 = jobOperations.getTasks("job1").get(0);
relocationConnectorStubs.place("removable1", job1Task0);
relocationConnectorStubs.setQuota("job1", 0);
DeschedulingFailure failure = newDescheduler(Collections.emptyMap()).getDeschedulingFailure(job1Task0);
assertThat(failure.getReasonMessage()).contains("job quota");
}
@Test
public void testJobRequiredMigrationBy() {
Task job1Task0 = jobOperations.getTasks("job1").get(0);
relocationConnectorStubs.place("active1", job1Task0);
relocationConnectorStubs.setQuota("job1", 1);
relocationConnectorStubs.addJobAttribute("job1", RelocationAttributes.RELOCATION_REQUIRED_BY, "" + clock.wallTime());
clock.advanceTime(Duration.ofSeconds(1));
Map<String, DeschedulingResult> results = newDescheduler(Collections.emptyMap()).findRequestedJobOrTaskMigrations();
assertThat(results).isNotEmpty();
}
@Test
public void testTaskRequiredMigration() {
Task job1Task0 = jobOperations.getTasks("job1").get(0);
relocationConnectorStubs.place("active1", job1Task0);
relocationConnectorStubs.setQuota("job1", 1);
relocationConnectorStubs.addTaskAttribute(job1Task0.getId(), RelocationAttributes.RELOCATION_REQUIRED, "true");
clock.advanceTime(Duration.ofSeconds(1));
Map<String, DeschedulingResult> results = newDescheduler(Collections.emptyMap()).findRequestedJobOrTaskMigrations();
assertThat(results).isNotEmpty();
}
@Test
public void testSystemQuotaExemption() {
Task job1Task0 = jobOperations.getTasks("job1").get(0);
relocationConnectorStubs.place("active1", job1Task0);
relocationConnectorStubs.addTaskAttribute(job1Task0.getId(), RelocationAttributes.RELOCATION_REQUIRED, "true");
EvictionQuotaTracker evictionQuotaTracker = mock(EvictionQuotaTracker.class);
when(evictionQuotaTracker.getSystemEvictionQuota()).thenReturn(0L);
when(evictionQuotaTracker.isSystemDisruptionWindowOpen()).thenReturn(false);
when(evictionQuotaTracker.getJobEvictionQuota("job1")).thenReturn(1L);
Map<String, DeschedulingResult> results = newDescheduler(evictionQuotaTracker, () -> "app2").findRequestedJobOrTaskMigrations();
assertThat(results).isEmpty();
Map<String, DeschedulingResult> results2 = newDescheduler(evictionQuotaTracker, () -> "app1").findRequestedJobOrTaskMigrations();
assertThat(results2).isNotEmpty();
// system window open with quota = 0
when(evictionQuotaTracker.isSystemDisruptionWindowOpen()).thenReturn(true);
Map<String, DeschedulingResult> results3 = newDescheduler(evictionQuotaTracker, () -> "app1").findRequestedJobOrTaskMigrations();
assertThat(results3).isEmpty();
}
@Test
public void testAgentInstanceRequiredMigration() {
Task job1Task0 = jobOperations.getTasks("job1").get(0);
relocationConnectorStubs.place("active1", job1Task0);
relocationConnectorStubs.setQuota("job1", 1);
job1Task0 = jobOperations.findTaskById(job1Task0.getId()).get().getRight();
relocationConnectorStubs.markNodeRelocationRequired(job1Task0.getTaskContext().get(TaskAttributes.TASK_ATTRIBUTES_AGENT_INSTANCE_ID));
Optional<Pair<TitusNode, List<Task>>> results = newDescheduler(Collections.emptyMap()).nextBestMatch();
assertThat(results).isNotEmpty();
}
private TaskMigrationDescheduler newDescheduler(Map<String, TaskRelocationPlan> plannedAheadTaskRelocationPlans) {
Map<String, Task> tasksById = toTaskMap(jobOperations.getTasks());
//noinspection unchecked
return new TaskMigrationDescheduler(
plannedAheadTaskRelocationPlans,
new EvacuatedAgentsAllocationTracker(nodeDataResolver.resolve(), tasksById),
new EvictionQuotaTracker(relocationConnectorStubs.getEvictionOperations(), JobTestFunctions.toJobMap(jobOperations.getJobs())),
() -> "foo|bar",
jobOperations.getJobs().stream().collect(Collectors.toMap(Job::getId, j -> j)),
tasksById,
titusRuntime);
}
private TaskMigrationDescheduler newDescheduler(EvictionQuotaTracker evictionQuotaTracker, EvictionConfiguration evictionConfiguration) {
Map<String, Task> tasksById = toTaskMap(jobOperations.getTasks());
//noinspection unchecked
return new TaskMigrationDescheduler(
Collections.emptyMap(),
new EvacuatedAgentsAllocationTracker(nodeDataResolver.resolve(), tasksById),
evictionQuotaTracker,
evictionConfiguration,
jobOperations.getJobs().stream().collect(Collectors.toMap(Job::getId, j -> j)),
tasksById,
titusRuntime);
}
} | 1,490 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/descheduler/DefaultDeschedulerServiceTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.descheduler;
import java.time.Duration;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import com.netflix.titus.api.eviction.service.ReadOnlyEvictionOperations;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.ServiceJobTask;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan.TaskRelocationReason;
import com.netflix.titus.common.data.generator.MutableDataGenerator;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.archaius2.Archaius2Ext;
import com.netflix.titus.common.util.time.TestClock;
import com.netflix.titus.runtime.RelocationAttributes;
import com.netflix.titus.supplementary.relocation.RelocationConfiguration;
import com.netflix.titus.supplementary.relocation.RelocationConnectorStubs;
import com.netflix.titus.supplementary.relocation.TestDataFactory;
import com.netflix.titus.supplementary.relocation.connector.KubernetesNodeDataResolver;
import com.netflix.titus.supplementary.relocation.connector.TitusNode;
import com.netflix.titus.supplementary.relocation.model.DeschedulingResult;
import com.netflix.titus.testkit.model.job.JobGenerator;
import org.junit.Test;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.ofServiceSize;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.withDisruptionBudget;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.withJobId;
import static com.netflix.titus.testkit.model.eviction.DisruptionBudgetGenerator.budget;
import static com.netflix.titus.testkit.model.eviction.DisruptionBudgetGenerator.selfManagedPolicy;
import static com.netflix.titus.testkit.model.eviction.DisruptionBudgetGenerator.unlimitedRate;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskServiceJobDescriptor;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class DefaultDeschedulerServiceTest {
private final TitusRuntime titusRuntime = TitusRuntimes.test();
private final TestClock clock = (TestClock) titusRuntime.getClock();
protected final RelocationConfiguration configuration = Archaius2Ext.newConfiguration(RelocationConfiguration.class);
private final MutableDataGenerator<Job<ServiceJobExt>> jobGenerator = new MutableDataGenerator<>(
JobGenerator.serviceJobs(oneTaskServiceJobDescriptor().but(
ofServiceSize(4),
withDisruptionBudget(budget(selfManagedPolicy(0), unlimitedRate(), Collections.emptyList()))
))
);
private final RelocationConnectorStubs dataGenerator = new RelocationConnectorStubs()
.addActiveInstanceGroup("active1", 10)
.addRemovableInstanceGroup("removable1", 10)
.addJob(jobGenerator.getValue().but(withJobId("job1")))
.addJob(jobGenerator.getValue().but(withJobId("job2")))
.addJob(jobGenerator.getValue().but(withJobId("jobImmediate")));
private final ReadOnlyJobOperations jobOperations = dataGenerator.getJobOperations();
private final DefaultDeschedulerService deschedulerService = new DefaultDeschedulerService(
dataGenerator.getJobOperations(),
dataGenerator.getEvictionOperations(),
dataGenerator.getNodeDataResolver(),
() -> "foo|bar",
titusRuntime
);
@Test
public void testAllExpectedJobMigrationsAreFound() {
List<Task> tasksOfJob1 = jobOperations.getTasks("job1");
dataGenerator.place("active1", tasksOfJob1.get(0), tasksOfJob1.get(1));
dataGenerator.place("removable1", tasksOfJob1.get(2), tasksOfJob1.get(3));
dataGenerator.setQuota("job1", 2);
List<Task> tasksOfJob2 = jobOperations.getTasks("job2");
dataGenerator.place("active1", tasksOfJob2.get(0), tasksOfJob2.get(1));
dataGenerator.place("removable1", tasksOfJob2.get(2), tasksOfJob2.get(3));
dataGenerator.setQuota("job2", 2);
dataGenerator.addJobAttribute("jobImmediate", RelocationAttributes.RELOCATION_REQUIRED_BY_IMMEDIATELY, "" + (clock.wallTime() + 1));
Task taskImmediate = jobOperations.getTasks("jobImmediate").get(0);
dataGenerator.place("active1", taskImmediate);
List<DeschedulingResult> results = deschedulerService.deschedule(Collections.emptyMap());
assertThat(results).hasSize(5);
for (DeschedulingResult result : results) {
boolean isImmediateJobMigration = result.getTask().getId().equals(taskImmediate.getId());
if (isImmediateJobMigration) {
assertThat(result.getAgentInstance().getServerGroupId()).isEqualTo("active1");
} else {
assertThat(result.getAgentInstance().getServerGroupId()).isEqualTo("removable1");
}
TaskRelocationPlan plan = result.getTaskRelocationPlan();
if (plan.getTaskId().startsWith("jobImmediate")) {
assertThat(plan.getReason()).isEqualTo(TaskRelocationReason.TaskMigration);
} else {
assertThat(plan.getReason()).isEqualTo(TaskRelocationReason.AgentEvacuation);
}
if (isImmediateJobMigration) {
assertThat(plan.getReasonMessage()).containsSequence("Job marked for immediate eviction");
} else {
assertThat(plan.getReasonMessage()).isEqualTo("Enough quota to migrate the task (no migration delay configured)");
}
}
}
@Test
public void verifySelfManagedRelocationPlanWithDelay() {
verifyRelocationPlan(10_000, "Agent instance tagged for eviction");
}
@Test
public void verifyRelocationPlanWithNoDelay() {
verifyRelocationPlan(0, "Enough quota to migrate the task (no migration delay configured)");
}
private void verifyRelocationPlan(long relocationDelay, String reasonMessage) {
ReadOnlyJobOperations jobOperations = mock(ReadOnlyJobOperations.class);
DefaultDeschedulerService dds = new DefaultDeschedulerService(
jobOperations,
mock(ReadOnlyEvictionOperations.class),
new KubernetesNodeDataResolver(configuration, TestDataFactory.mockFabric8IOConnector(), node -> true),
() -> "foo|bar",
titusRuntime
);
Job<ServiceJobExt> job = JobGenerator.serviceJobs(
oneTaskServiceJobDescriptor()
.but(ofServiceSize(2),
withDisruptionBudget(budget(selfManagedPolicy(relocationDelay), unlimitedRate(), Collections.emptyList()))))
.getValue();
ServiceJobTask task = JobGenerator.serviceTasks(job).getValue();
when(jobOperations.getJob(job.getId())).thenReturn(Optional.of(job));
TitusNode node = TitusNode.newBuilder()
.withId("node1")
.withServerGroupId("asg1")
.withRelocationRequired(true).withBadCondition(false).build();
// Advance test clock
long clockAdvancedMs = 5_000;
TestClock testClock = (TestClock) titusRuntime.getClock();
testClock.advanceTime(Duration.ofMillis(clockAdvancedMs));
Optional<TaskRelocationPlan> relocationPlanForTask = dds.getRelocationPlanForTask(node, task, Collections.emptyMap());
assertThat(relocationPlanForTask).isPresent();
assertThat(relocationPlanForTask.get().getTaskId()).isEqualTo(task.getId());
// relocation time is expected to be decision clock time + retentionTimeMs
assertThat(relocationPlanForTask.get().getRelocationTime()).isEqualTo(relocationDelay + clockAdvancedMs);
assertThat(relocationPlanForTask.get().getDecisionTime()).isEqualTo(clockAdvancedMs);
assertThat(relocationPlanForTask.get().getReasonMessage()).isEqualTo(reasonMessage);
}
} | 1,491 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/util/RelocationUtilTest.java | package com.netflix.titus.supplementary.relocation.util;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import com.netflix.titus.api.jobmanager.TaskAttributes;
import com.netflix.titus.api.jobmanager.model.job.BatchJobTask;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.supplementary.relocation.connector.TitusNode;
import com.netflix.titus.testkit.model.job.JobGenerator;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class RelocationUtilTest {
private TitusNode buildNode(String nodeId) {
return TitusNode.newBuilder()
.withId(nodeId)
.withServerGroupId("serverGroup1")
.build();
}
@Test
public void buildTasksFromNodesAndJobsFilter() {
String node1 = "node1";
String node2 = "node2";
String node3 = "node3";
Job<BatchJobExt> job1 = JobGenerator.oneBatchJob();
Job<BatchJobExt> job2 = JobGenerator.oneBatchJob();
Job<BatchJobExt> job3 = JobGenerator.oneBatchJob();
BatchJobTask task1 = JobGenerator.batchTasks(job1).getValue().toBuilder()
.addToTaskContext(TaskAttributes.TASK_ATTRIBUTES_AGENT_INSTANCE_ID, node1).build();
BatchJobTask task2 = JobGenerator.batchTasks(job2).getValue().toBuilder()
.addToTaskContext(TaskAttributes.TASK_ATTRIBUTES_AGENT_INSTANCE_ID, node2).build();
BatchJobTask task3 = JobGenerator.batchTasks(job3).getValue().toBuilder()
.addToTaskContext(TaskAttributes.TASK_ATTRIBUTES_AGENT_INSTANCE_ID, node3).build();
ReadOnlyJobOperations jobOperations = mock(ReadOnlyJobOperations.class);
when(jobOperations.getJobs()).thenReturn(Arrays.asList(job1, job2, job3));
when(jobOperations.getTasks(job1.getId())).thenReturn(Collections.singletonList(task1));
when(jobOperations.getTasks(job2.getId())).thenReturn(Collections.singletonList(task2));
when(jobOperations.getTasks(job3.getId())).thenReturn(Collections.singletonList(task3));
Map<String, TitusNode> nodes = new HashMap<>(3);
nodes.put(node1, buildNode(node1));
nodes.put(node2, buildNode(node2));
nodes.put(node3, buildNode(node3));
Set<String> jobIds = new HashSet<>(2);
jobIds.addAll(Arrays.asList(job1.getId(), job3.getId()));
List<String> taskIdsOnBadNodes = RelocationUtil.buildTasksFromNodesAndJobsFilter(nodes, jobIds, jobOperations);
assertThat(taskIdsOnBadNodes.size()).isEqualTo(2);
}
} | 1,492 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/util/RelocationPredicatesTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.util;
import java.util.Collections;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.runtime.RelocationAttributes;
import com.netflix.titus.supplementary.relocation.connector.TitusNode;
import com.netflix.titus.testkit.model.eviction.DisruptionBudgetGenerator;
import com.netflix.titus.testkit.model.job.JobGenerator;
import org.junit.Test;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.appendJobDescriptorAttribute;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.appendTaskAttribute;
import static com.netflix.titus.supplementary.relocation.util.RelocationPredicates.checkIfMustBeRelocatedImmediately;
import static com.netflix.titus.supplementary.relocation.util.RelocationPredicates.checkIfNeedsRelocationPlan;
import static org.assertj.core.api.Assertions.assertThat;
public class RelocationPredicatesTest {
private static final Job<?> JOB = JobFunctions.changeDisruptionBudget(
JobGenerator.oneBatchJob(), DisruptionBudgetGenerator.budget(
DisruptionBudgetGenerator.selfManagedPolicy(1_000),
DisruptionBudgetGenerator.unlimitedRate(),
Collections.emptyList()
));
private static final Task TASK = JobGenerator.oneBatchTask().toBuilder().withJobId(JOB.getId()).build();
private static final TitusNode ACTIVE_NODE = TitusNode.newBuilder().withId("node1").withServerGroupId("serverGroup1").build();
@Test
public void testJobRelocationRequiredByPredicates() {
Job<?> taggedJob = appendJobDescriptorAttribute(JOB,
RelocationAttributes.RELOCATION_REQUIRED_BY, TASK.getStatus().getTimestamp() + 1
);
assertThat(checkIfNeedsRelocationPlan(taggedJob, TASK, ACTIVE_NODE)).isPresent();
assertThat(checkIfMustBeRelocatedImmediately(taggedJob, TASK, ACTIVE_NODE)).isEmpty();
}
@Test
public void testJobRelocationRequiredByImmediatePredicates() {
Job<?> taggedJob = appendJobDescriptorAttribute(JOB,
RelocationAttributes.RELOCATION_REQUIRED_BY_IMMEDIATELY, TASK.getStatus().getTimestamp() + 1
);
assertThat(checkIfNeedsRelocationPlan(taggedJob, TASK, ACTIVE_NODE)).isEmpty();
assertThat(checkIfMustBeRelocatedImmediately(taggedJob, TASK, ACTIVE_NODE)).isPresent();
}
@Test
public void testTaskRelocationRequiredPredicates() {
Task taggedTask = appendTaskAttribute(TASK, RelocationAttributes.RELOCATION_REQUIRED, "true");
assertThat(checkIfNeedsRelocationPlan(JOB, taggedTask, ACTIVE_NODE)).isPresent();
assertThat(checkIfMustBeRelocatedImmediately(JOB, taggedTask, ACTIVE_NODE)).isEmpty();
}
@Test
public void testTaskRelocationRequiredImmediatelyPredicates() {
Task taggedTask = appendTaskAttribute(TASK, RelocationAttributes.RELOCATION_REQUIRED_IMMEDIATELY, "true");
assertThat(checkIfNeedsRelocationPlan(JOB, taggedTask, ACTIVE_NODE)).isEmpty();
assertThat(checkIfMustBeRelocatedImmediately(JOB, taggedTask, ACTIVE_NODE)).isPresent();
}
@Test
public void testInstanceRelocationRequiredPredicates() {
TitusNode decommissionedNode = ACTIVE_NODE.toBuilder().withRelocationRequired(true).build();
assertThat(checkIfNeedsRelocationPlan(JOB, TASK, decommissionedNode)).isPresent();
assertThat(checkIfMustBeRelocatedImmediately(JOB, TASK, decommissionedNode)).isEmpty();
}
@Test
public void testInstanceRelocationRequiredImmediatelyPredicates() {
TitusNode decommissionedNode = ACTIVE_NODE.toBuilder().withRelocationRequiredImmediately(true).build();
assertThat(checkIfNeedsRelocationPlan(JOB, TASK, decommissionedNode)).isEmpty();
assertThat(checkIfMustBeRelocatedImmediately(JOB, TASK, decommissionedNode)).isPresent();
}
} | 1,493 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/integration/TaskRelocationResourceSandbox.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.integration;
import com.netflix.titus.common.environment.MyMutableEnvironment;
import com.netflix.titus.supplementary.relocation.RelocationConnectorStubs;
import org.junit.rules.ExternalResource;
import org.springframework.mock.env.MockEnvironment;
public class TaskRelocationResourceSandbox extends ExternalResource {
private final RelocationConnectorStubs relocationConnectorStubs;
private final MockEnvironment environment;
private TaskRelocationSandbox taskRelocationSandbox;
public TaskRelocationResourceSandbox(RelocationConnectorStubs relocationConnectorStubs, MockEnvironment environment) {
this.relocationConnectorStubs = relocationConnectorStubs;
this.environment = environment;
}
@Override
protected void before() {
this.taskRelocationSandbox = new TaskRelocationSandbox(relocationConnectorStubs, environment);
}
public TaskRelocationSandbox getTaskRelocationSandbox() {
return taskRelocationSandbox;
}
@Override
protected void after() {
if(taskRelocationSandbox != null) {
try {
taskRelocationSandbox.shutdown();
} catch (Exception ignore) {
}
}
}
}
| 1,494 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/integration/TaskRelocationSandbox.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.integration;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import com.netflix.titus.common.util.ExceptionExt;
import com.netflix.titus.common.util.archaius2.Archaius2Ext;
import com.netflix.titus.grpc.protogen.TaskRelocationQuery;
import com.netflix.titus.grpc.protogen.TaskRelocationServiceGrpc;
import com.netflix.titus.grpc.protogen.TaskRelocationServiceGrpc.TaskRelocationServiceBlockingStub;
import com.netflix.titus.runtime.clustermembership.activation.LeaderActivationComponent;
import com.netflix.titus.runtime.clustermembership.connector.ClusterMembershipInMemoryConnectorComponent;
import com.netflix.titus.runtime.clustermembership.endpoint.grpc.ClusterMembershipGrpcEndpointComponent;
import com.netflix.titus.runtime.clustermembership.service.ClusterMembershipServiceComponent;
import com.netflix.titus.runtime.connector.common.reactor.GrpcToReactorServerFactoryComponent;
import com.netflix.titus.runtime.endpoint.common.grpc.assistant.GrpcCallAssistantComponent;
import com.netflix.titus.runtime.endpoint.common.grpc.assistant.GrpcCallAssistantConfiguration;
import com.netflix.titus.runtime.endpoint.metadata.CallMetadataResolveComponent;
import com.netflix.titus.runtime.endpoint.resolver.HostCallerIdResolver;
import com.netflix.titus.runtime.endpoint.resolver.NoOpHostCallerIdResolver;
import com.netflix.titus.runtime.health.AlwaysHealthyComponent;
import com.netflix.titus.supplementary.relocation.RelocationConnectorStubs;
import com.netflix.titus.supplementary.relocation.RelocationLeaderActivator;
import com.netflix.titus.supplementary.relocation.descheduler.DeschedulerComponent;
import com.netflix.titus.supplementary.relocation.endpoint.grpc.TaskRelocationGrpcComponent;
import com.netflix.titus.supplementary.relocation.endpoint.grpc.TaskRelocationGrpcServerRunner;
import com.netflix.titus.supplementary.relocation.endpoint.rest.TaskRelocationExceptionHandler;
import com.netflix.titus.supplementary.relocation.endpoint.rest.TaskRelocationSpringResource;
import com.netflix.titus.supplementary.relocation.store.memory.InMemoryRelocationStoreComponent;
import com.netflix.titus.supplementary.relocation.workflow.TaskRelocationWorkflowComponent;
import io.grpc.ManagedChannel;
import io.grpc.ManagedChannelBuilder;
import org.springframework.context.annotation.AnnotationConfigApplicationContext;
import org.springframework.mock.env.MockEnvironment;
import static com.jayway.awaitility.Awaitility.await;
/**
* Task relocation server runner, with stubbed external connectors. Used by the task relocation service integration tests.
*/
public class TaskRelocationSandbox {
private static final long ACTIVATION_TIMEOUT_MS = 30_000;
private final AnnotationConfigApplicationContext container;
private final BlockingQueue<ManagedChannel> channels = new LinkedBlockingQueue<>();
public TaskRelocationSandbox(RelocationConnectorStubs relocationConnectorStubs, MockEnvironment config) {
config.setProperty("titus.relocation.endpoint.port", "0");
config.setProperty("titus.relocation.relocationScheduleIntervalMs", "100");
config.setProperty("titus.relocation.deschedulingIntervalMs", "100");
config.setProperty("titus.relocation.relocationTimeoutMs", "60000");
config.setProperty("titus.relocation.dataStalenessThresholdMs", "30000");
this.container = new AnnotationConfigApplicationContext();
container.getEnvironment().merge(config);
container.setParent(relocationConnectorStubs.getApplicationContext());
container.register(AlwaysHealthyComponent.class);
container.register(ClusterMembershipInMemoryConnectorComponent.class);
container.register(ClusterMembershipServiceComponent.class);
container.register(ClusterMembershipGrpcEndpointComponent.class);
container.register(LeaderActivationComponent.class);
container.registerBean(HostCallerIdResolver.class, NoOpHostCallerIdResolver::getInstance);
container.register(GrpcCallAssistantComponent.class);
container.registerBean(GrpcCallAssistantConfiguration.class, () -> Archaius2Ext.newConfiguration(GrpcCallAssistantConfiguration.class));
container.register(CallMetadataResolveComponent.class);
container.register(GrpcToReactorServerFactoryComponent.class);
container.register(InMemoryRelocationStoreComponent.class);
container.register(DeschedulerComponent.class);
container.register(TaskRelocationWorkflowComponent.class);
container.register(TaskRelocationGrpcComponent.class);
container.register(TaskRelocationGrpcServerRunner.class);
container.register(TaskRelocationSpringResource.class);
container.register(TaskRelocationExceptionHandler.class);
container.register(RelocationLeaderActivator.class);
container.refresh();
container.start();
TaskRelocationServiceBlockingStub stub = TaskRelocationServiceGrpc.newBlockingStub(getGrpcChannel());
await().timeout(ACTIVATION_TIMEOUT_MS, TimeUnit.MILLISECONDS).until(() ->
!ExceptionExt.doCatch(() -> stub.getCurrentTaskRelocationPlans(TaskRelocationQuery.getDefaultInstance())).isPresent()
);
}
public void shutdown() {
for (ManagedChannel channel : channels) {
channel.shutdownNow();
}
container.close();
}
public ManagedChannel getGrpcChannel() {
int port = container.getBean(TaskRelocationGrpcServerRunner.class).getServer().getPort();
ManagedChannel channel = ManagedChannelBuilder.forAddress("localhost", port)
.usePlaintext()
.build();
channels.add(channel);
return channel;
}
}
| 1,495 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/integration/TaskRelocationIntegrationTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.integration;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.Supplier;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan.TaskRelocationReason;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.time.Clock;
import com.netflix.titus.grpc.protogen.RelocationEvent;
import com.netflix.titus.grpc.protogen.RelocationEvent.EventCase;
import com.netflix.titus.grpc.protogen.RelocationTaskId;
import com.netflix.titus.grpc.protogen.TaskRelocationExecution;
import com.netflix.titus.grpc.protogen.TaskRelocationPlan;
import com.netflix.titus.grpc.protogen.TaskRelocationPlans;
import com.netflix.titus.grpc.protogen.TaskRelocationQuery;
import com.netflix.titus.grpc.protogen.TaskRelocationServiceGrpc;
import com.netflix.titus.grpc.protogen.TaskRelocationStatus;
import com.netflix.titus.supplementary.relocation.RelocationConnectorStubs;
import com.netflix.titus.supplementary.relocation.TestDataFactory;
import com.netflix.titus.testkit.grpc.TestStreamObserver;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import io.grpc.Status;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.springframework.mock.env.MockEnvironment;
import static com.netflix.titus.common.util.CollectionsExt.last;
import static com.netflix.titus.supplementary.relocation.TestDataFactory.newSelfManagedDisruptionBudget;
import static org.assertj.core.api.Assertions.assertThat;
@Category(value = IntegrationTest.class)
public class TaskRelocationIntegrationTest {
private static final int RELOCATION_TIME_MS = 100;
private final MockEnvironment environment = new MockEnvironment();
private final TitusRuntime titusRuntime = TitusRuntimes.internal(environment);
private final Clock clock = titusRuntime.getClock();
private final RelocationConnectorStubs relocationConnectorStubs = TestDataFactory.activeRemovableSetup(titusRuntime);
private final ReadOnlyJobOperations jobOperations = relocationConnectorStubs.getJobOperations();
@Rule
public final TaskRelocationResourceSandbox serverResource = new TaskRelocationResourceSandbox(relocationConnectorStubs, environment);
private TaskRelocationSandbox sandbox;
private TaskRelocationServiceGrpc.TaskRelocationServiceStub client;
@Before
public void setUp() {
this.sandbox = serverResource.getTaskRelocationSandbox();
this.client = TaskRelocationServiceGrpc.newStub(sandbox.getGrpcChannel());
}
@Test(timeout = 60_000)
public void testPlannedRelocation() throws Exception {
Task task = createAndPlaceOneTaskJob(TestDataFactory.REMOVABLE_INSTANCE_GROUP_ID);
relocationConnectorStubs.setQuota(task.getJobId(), 1);
// Get the plan
TaskRelocationPlan plan = doTry(() -> findRelocationPlan(task.getId()));
assertThat(plan.getTaskId()).isEqualTo(task.getId());
assertThat(plan.getReasonCode()).isEqualTo(TaskRelocationReason.SelfManagedMigration.name());
assertThat(plan.getRelocationTime()).isLessThanOrEqualTo(clock.wallTime() + RELOCATION_TIME_MS);
// Wait for the relocation
TaskRelocationStatus status = doTry(() -> findRelocationStatus(task.getId()));
assertThat(status.getState()).isEqualTo(TaskRelocationStatus.TaskRelocationState.Success);
assertThat(status.getStatusCode()).isEqualTo(com.netflix.titus.api.relocation.model.TaskRelocationStatus.STATUS_CODE_TERMINATED);
assertThat(status.getStatusMessage()).isNotEmpty();
}
@Test(timeout = 60_000)
public void testEvents() throws InterruptedException {
TestStreamObserver<RelocationEvent> events = new TestStreamObserver<>();
client.observeRelocationEvents(TaskRelocationQuery.getDefaultInstance(), events);
RelocationEvent firstEvent = events.takeNext(30, TimeUnit.SECONDS);
assertThat(firstEvent).isNotNull();
assertThat(firstEvent.getEventCase()).isEqualTo(EventCase.SNAPSHOTEND);
Task task = createAndPlaceOneTaskJob(TestDataFactory.REMOVABLE_INSTANCE_GROUP_ID);
relocationConnectorStubs.setQuota(task.getJobId(), 1);
RelocationEvent secondEvent = events.takeNext(30, TimeUnit.SECONDS);
assertThat(secondEvent).isNotNull();
assertThat(secondEvent.getEventCase()).isEqualTo(EventCase.TASKRELOCATIONPLANUPDATEEVENT);
RelocationEvent thirdEvent = events.takeNext(30, TimeUnit.SECONDS);
assertThat(thirdEvent).isNotNull();
assertThat(thirdEvent.getEventCase()).isEqualTo(EventCase.TASKRELOCATIONPLANREMOVEEVENT);
}
private Task createAndPlaceOneTaskJob(String instanceGroup) {
Job<BatchJobExt> job = TestDataFactory.newBatchJob("job1", 1, newSelfManagedDisruptionBudget(RELOCATION_TIME_MS));
relocationConnectorStubs.addJob(job);
Task task = jobOperations.getTasks().get(0);
relocationConnectorStubs.place(instanceGroup, task);
return task;
}
private Optional<TaskRelocationPlan> findRelocationPlan(String taskId) {
TaskRelocationPlans plans;
try {
TestStreamObserver<TaskRelocationPlans> events = new TestStreamObserver<>();
client.getCurrentTaskRelocationPlans(TaskRelocationQuery.getDefaultInstance(), events);
plans = events.getLast();
} catch (Exception e) {
if (e.getMessage().contains("Relocation workflow not ready yet")) {
return Optional.empty();
}
throw new RuntimeException(e);
}
Optional<TaskRelocationPlan> taskPlan = plans.getPlansList().stream().filter(p -> p.getTaskId().equals(taskId)).findFirst();
if (taskPlan.isPresent()) {
return taskPlan;
}
// Check if already processed
try {
TestStreamObserver<TaskRelocationExecution> events = new TestStreamObserver<>();
client.getTaskRelocationResult(RelocationTaskId.newBuilder().setId(taskId).build(), events);
return Optional.of(events.getLast().getTaskRelocationPlan());
} catch (Exception e) {
Status status = Status.fromThrowable(e);
if (status.getCode() == Status.Code.NOT_FOUND) {
return Optional.empty();
}
throw new RuntimeException(e);
}
}
private Optional<TaskRelocationStatus> findRelocationStatus(String taskId) {
try {
TestStreamObserver<TaskRelocationExecution> events = new TestStreamObserver<>();
client.getTaskRelocationResult(RelocationTaskId.newBuilder().setId(taskId).build(), events);
return Optional.of(last(events.getLast().getRelocationAttemptsList()));
} catch (Exception e) {
Status status = Status.fromThrowable(e);
if (status.getCode() == Status.Code.NOT_FOUND) {
return Optional.empty();
}
throw new RuntimeException(e);
}
}
private <T> T doTry(Supplier<Optional<T>> valueSupplier) throws Exception {
long deadline = System.currentTimeMillis() + 5_000;
while (deadline > System.currentTimeMillis()) {
Optional<T> result = valueSupplier.get();
if (result.isPresent()) {
return result.get();
}
Thread.sleep(10);
}
throw new TimeoutException();
}
}
| 1,496 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/endpoint | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/endpoint/grpc/TaskRelocationPlanPredicateTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.endpoint.grpc;
import java.util.function.Predicate;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan.TaskRelocationReason;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.grpc.protogen.TaskRelocationQuery;
import com.netflix.titus.supplementary.relocation.endpoint.TaskRelocationPlanPredicate;
import com.netflix.titus.testkit.model.job.JobComponentStub;
import com.netflix.titus.testkit.model.job.JobGenerator;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class TaskRelocationPlanPredicateTest {
private static final TaskRelocationPlan REFERENCE_PLAN = TaskRelocationPlan.newBuilder()
.withTaskId("task1")
.withReason(TaskRelocationReason.TaskMigration)
.withReasonMessage("reason message")
.withDecisionTime(100)
.withRelocationTime(123)
.build();
private final TitusRuntime titusRuntime = TitusRuntimes.internal();
private final JobComponentStub jobComponentStub = new JobComponentStub(titusRuntime);
private final Job<BatchJobExt> job = JobGenerator.oneBatchJob();
private final Task task = jobComponentStub.createJobAndTasks(job).getRight().get(0);
private final TaskRelocationPlan plan = REFERENCE_PLAN.toBuilder().withTaskId(task.getId()).build();
@Test
public void testJobIdsFilter() {
assertThat(newPredicte("jobIds", "").test(plan)).isTrue();
assertThat(newPredicte("jobIds", "jobX," + job.getId()).test(plan)).isTrue();
assertThat(newPredicte("jobIds", "jobX,jobY").test(plan)).isFalse();
}
@Test
public void testTaskIdsFilter() {
assertThat(newPredicte("taskIds", "").test(plan)).isTrue();
assertThat(newPredicte("taskIds", "taskX," + task.getId()).test(plan)).isTrue();
assertThat(newPredicte("taskIds", "taskX,taskY").test(plan)).isFalse();
}
@Test
public void testApplicationFilter() {
assertThat(newPredicte("applicationName", "").test(plan)).isTrue();
assertThat(newPredicte("applicationName", job.getJobDescriptor().getApplicationName()).test(plan)).isTrue();
assertThat(newPredicte("applicationName", "some" + task.getId()).test(plan)).isFalse();
}
@Test
public void testCapacityGroupFilter() {
assertThat(newPredicte("capacityGroup", "").test(plan)).isTrue();
assertThat(newPredicte("capacityGroup", job.getJobDescriptor().getCapacityGroup()).test(plan)).isTrue();
assertThat(newPredicte("capacityGroup", "some" + task.getId()).test(plan)).isFalse();
}
@Test
public void testMixed() {
Predicate<TaskRelocationPlan> predicate = newPredicte(
"jobIds", job.getId(),
"taskIds", task.getId(),
"applicationName", job.getJobDescriptor().getApplicationName(),
"capacityGroup", job.getJobDescriptor().getCapacityGroup()
);
assertThat(predicate.test(plan)).isTrue();
assertThat(predicate.test(plan.toBuilder().withTaskId("taskX").build())).isFalse();
}
private Predicate<TaskRelocationPlan> newPredicte(String... criteria) {
return new TaskRelocationPlanPredicate(
jobComponentStub.getJobOperations(),
TaskRelocationQuery.newBuilder()
.putAllFilteringCriteria(CollectionsExt.asMap(criteria))
.build()
);
}
} | 1,497 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/workflow/DefaultNodeConditionControllerTest.java | package com.netflix.titus.supplementary.relocation.workflow;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.titus.api.jobmanager.JobAttributes;
import com.netflix.titus.api.jobmanager.TaskAttributes;
import com.netflix.titus.api.jobmanager.model.job.BatchJobTask;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.common.framework.scheduler.ExecutionContext;
import com.netflix.titus.common.framework.scheduler.model.ExecutionId;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.runtime.connector.jobmanager.JobDataReplicator;
import com.netflix.titus.runtime.connector.jobmanager.JobManagementClient;
import com.netflix.titus.supplementary.relocation.RelocationConfiguration;
import com.netflix.titus.supplementary.relocation.connector.TitusNode;
import com.netflix.titus.supplementary.relocation.connector.NodeDataResolver;
import com.netflix.titus.testkit.model.job.JobGenerator;
import org.junit.Test;
import reactor.core.publisher.Mono;
import reactor.test.StepVerifier;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class DefaultNodeConditionControllerTest {
enum NodeIds {
NODE_1,
NODE_2,
NODE_3
}
@Test
public void noTerminationsOnDataStaleness() {
TitusRuntime titusRuntime = mock(TitusRuntime.class);
when(titusRuntime.getRegistry()).thenReturn(new DefaultRegistry());
RelocationConfiguration configuration = mock(RelocationConfiguration.class);
when(configuration.getBadNodeConditionPattern()).thenReturn(".*Problem");
when(configuration.isTaskTerminationOnBadNodeConditionEnabled()).thenReturn(true);
when(configuration.getDataStalenessThresholdMs()).thenReturn(8000L);
NodeDataResolver nodeDataResolver = mock(NodeDataResolver.class);
when(nodeDataResolver.getStalenessMs()).thenReturn(5L);
JobDataReplicator jobDataReplicator = mock(JobDataReplicator.class);
when(jobDataReplicator.getStalenessMs()).thenReturn(10L);
ReadOnlyJobOperations readOnlyJobOperations = mock(ReadOnlyJobOperations.class);
JobManagementClient jobManagementClient = mock(JobManagementClient.class);
Set<String> terminatedTaskIds = new HashSet<>();
when(jobManagementClient.killTask(anyString(), anyBoolean(), any())).thenAnswer(invocation -> {
String taskIdToBeTerminated = invocation.getArgument(0);
terminatedTaskIds.add(taskIdToBeTerminated);
return Mono.empty();
});
DefaultNodeConditionController nodeConditionCtrl = new DefaultNodeConditionController(configuration, nodeDataResolver, jobDataReplicator,
readOnlyJobOperations, jobManagementClient, titusRuntime);
ExecutionContext executionContext = ExecutionContext.newBuilder().withIteration(ExecutionId.initial()).build();
StepVerifier.create(nodeConditionCtrl.handleNodesWithBadCondition(executionContext))
.verifyComplete();
// No tasks terminated
assertThat(terminatedTaskIds).isEmpty();
}
@Test
public void checkTasksTerminatedDueToBadNodeConditions() {
// Mock jobs, tasks & nodes
Map<String, TitusNode> nodeMap = buildNodes();
List<Job<BatchJobExt>> jobs = getJobs(true);
Map<String, List<Task>> tasksByJobIdMap = buildTasksForJobAndNodeAssignment(new ArrayList<>(nodeMap.values()), jobs);
TitusRuntime titusRuntime = mock(TitusRuntime.class);
when(titusRuntime.getRegistry()).thenReturn(new DefaultRegistry());
RelocationConfiguration configuration = mock(RelocationConfiguration.class);
when(configuration.getBadNodeConditionPattern()).thenReturn(".*Failure");
when(configuration.isTaskTerminationOnBadNodeConditionEnabled()).thenReturn(true);
NodeDataResolver nodeDataResolver = mock(NodeDataResolver.class);
when(nodeDataResolver.resolve()).thenReturn(nodeMap);
JobDataReplicator jobDataReplicator = mock(JobDataReplicator.class);
when(jobDataReplicator.getStalenessMs()).thenReturn(0L);
ReadOnlyJobOperations readOnlyJobOperations = mock(ReadOnlyJobOperations.class);
when(readOnlyJobOperations.getJobs()).thenReturn(new ArrayList<>(jobs));
tasksByJobIdMap.forEach((key, value) -> when(readOnlyJobOperations.getTasks(key)).thenReturn(value));
JobManagementClient jobManagementClient = mock(JobManagementClient.class);
Set<String> terminatedTaskIds = new HashSet<>();
when(jobManagementClient.killTask(anyString(), anyBoolean(), any())).thenAnswer(invocation -> {
String taskIdToBeTerminated = invocation.getArgument(0);
terminatedTaskIds.add(taskIdToBeTerminated);
return Mono.empty();
});
DefaultNodeConditionController nodeConditionCtrl = new DefaultNodeConditionController(configuration, nodeDataResolver, jobDataReplicator,
readOnlyJobOperations, jobManagementClient, titusRuntime);
ExecutionContext executionContext = ExecutionContext.newBuilder().withIteration(ExecutionId.initial()).build();
StepVerifier.create(nodeConditionCtrl.handleNodesWithBadCondition(executionContext)).verifyComplete();
assertThat(terminatedTaskIds).isNotEmpty();
assertThat(terminatedTaskIds.size()).isEqualTo(2);
verifyTerminatedTasksOnBadNodes(terminatedTaskIds, tasksByJobIdMap, nodeMap);
}
@Test
public void badNodeConditionsIgnoredForJobsNotOptingIn() {
Map<String, TitusNode> nodeMap = buildNodes();
List<Job<BatchJobExt>> jobs = getJobs(false);
Map<String, List<Task>> stringListMap = buildTasksForJobAndNodeAssignment(new ArrayList<>(nodeMap.values()), jobs);
TitusRuntime titusRuntime = mock(TitusRuntime.class);
when(titusRuntime.getRegistry()).thenReturn(new DefaultRegistry());
RelocationConfiguration configuration = mock(RelocationConfiguration.class);
when(configuration.getBadNodeConditionPattern()).thenReturn(".*Failure");
when(configuration.isTaskTerminationOnBadNodeConditionEnabled()).thenReturn(true);
NodeDataResolver nodeDataResolver = mock(NodeDataResolver.class);
when(nodeDataResolver.resolve()).thenReturn(nodeMap);
JobDataReplicator jobDataReplicator = mock(JobDataReplicator.class);
when(jobDataReplicator.getStalenessMs()).thenReturn(0L);
// Job attribute "terminateContainerOnBadAgent" = False
ReadOnlyJobOperations readOnlyJobOperations = mock(ReadOnlyJobOperations.class);
when(readOnlyJobOperations.getJobs()).thenReturn(new ArrayList<>(jobs));
stringListMap.forEach((key, value) -> when(readOnlyJobOperations.getTasks(key)).thenReturn(value));
JobManagementClient jobManagementClient = mock(JobManagementClient.class);
Set<String> terminatedTaskIds = new HashSet<>();
when(jobManagementClient.killTask(anyString(), anyBoolean(), any())).thenAnswer(invocation -> {
String taskIdToBeTerminated = invocation.getArgument(0);
terminatedTaskIds.add(taskIdToBeTerminated);
return Mono.empty();
});
DefaultNodeConditionController nodeConditionController = new DefaultNodeConditionController(configuration, nodeDataResolver, jobDataReplicator,
readOnlyJobOperations, jobManagementClient, titusRuntime);
ExecutionContext executionContext = ExecutionContext.newBuilder().withIteration(ExecutionId.initial()).build();
StepVerifier.create(nodeConditionController.handleNodesWithBadCondition(executionContext))
.verifyComplete();
// no tasks should be terminated for jobs
assertThat(terminatedTaskIds).isEmpty();
}
private void verifyTerminatedTasksOnBadNodes(Set<String> terminatedTaskIds,
Map<String, List<Task>> tasksByJobIdMap, Map<String, TitusNode> nodeMap) {
List<Task> allTasks = tasksByJobIdMap.values().stream().flatMap(Collection::stream).collect(Collectors.toList());
List<String> badNodeIds = nodeMap.values().stream().filter(TitusNode::isInBadCondition).map(TitusNode::getId).collect(Collectors.toList());
Set<String> taskIdsOnBadNodes = allTasks.stream()
.filter(task -> task.getTaskContext().containsKey(TaskAttributes.TASK_ATTRIBUTES_AGENT_INSTANCE_ID) &&
badNodeIds.contains(task.getTaskContext().get(TaskAttributes.TASK_ATTRIBUTES_AGENT_INSTANCE_ID)))
.map(Task::getId)
.collect(Collectors.toSet());
assertThat(taskIdsOnBadNodes).containsAll(terminatedTaskIds);
}
private List<Job<BatchJobExt>> getJobs(boolean terminateOnBadAgent) {
Job<BatchJobExt> job1 = JobGenerator.batchJobsOfSize(2).getValue();
Map<String, String> job2Attributes = new HashMap<>();
job2Attributes.put(JobAttributes.JOB_PARAMETER_TERMINATE_ON_BAD_AGENT, Boolean.toString(terminateOnBadAgent));
Job<BatchJobExt> job2 = JobGenerator.batchJobsOfSizeAndAttributes(2, job2Attributes).getValue();
return Arrays.asList(job1, job2);
}
private List<Task> buildJobTasks(Job<BatchJobExt> batchJob, List<TitusNode> nodes) {
List<Task> tasksForJob = new ArrayList<>();
List<BatchJobTask> batchTasks = JobGenerator.batchTasks(batchJob).getValues(nodes.size());
for (int i = 0; i < batchTasks.size(); i++) {
tasksForJob.add(batchTasks.get(i).toBuilder()
.addToTaskContext(TaskAttributes.TASK_ATTRIBUTES_AGENT_INSTANCE_ID, nodes.get(i).getId())
.build());
}
return tasksForJob;
}
private Map<String, List<Task>> buildTasksForJobAndNodeAssignment(List<TitusNode> nodes, List<Job<BatchJobExt>> jobs) {
Map<String, List<Task>> tasksByJobIdMap = new HashMap<>(2);
jobs.forEach(job -> tasksByJobIdMap.put(job.getId(), buildJobTasks(job, nodes)));
return tasksByJobIdMap;
}
private Map<String, TitusNode> buildNodes() {
Map<String, TitusNode> nodeMap = new HashMap<>(3);
nodeMap.put(NodeIds.NODE_1.name(), buildNode(NodeIds.NODE_1.name(), true));
nodeMap.put(NodeIds.NODE_2.name(), buildNode(NodeIds.NODE_2.name(), true));
nodeMap.put(NodeIds.NODE_3.name(), buildNode(NodeIds.NODE_3.name(), false));
return nodeMap;
}
private TitusNode buildNode(String id, boolean isBadCondition) {
return TitusNode.newBuilder()
.withServerGroupId("serverGroup1")
.withId(id)
.withBadCondition(isBadCondition)
.build();
}
} | 1,498 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/workflow | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/workflow/step/MustBeRelocatedTaskStoreUpdateStepTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.workflow.step;
import java.util.Collections;
import java.util.Map;
import java.util.Optional;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.common.util.archaius2.Archaius2Ext;
import com.netflix.titus.supplementary.relocation.AbstractTaskRelocationTest;
import com.netflix.titus.supplementary.relocation.RelocationConfiguration;
import com.netflix.titus.supplementary.relocation.TestDataFactory;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationStore;
import org.junit.Test;
import reactor.core.publisher.Mono;
import static com.netflix.titus.supplementary.relocation.TestDataFactory.newSelfManagedDisruptionBudget;
import static com.netflix.titus.testkit.model.relocation.TaskRelocationPlanGenerator.oneMigrationPlan;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.anyList;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class MustBeRelocatedTaskStoreUpdateStepTest extends AbstractTaskRelocationTest {
private final RelocationConfiguration configuration = Archaius2Ext.newConfiguration(RelocationConfiguration.class);
private final TaskRelocationStore store = mock(TaskRelocationStore.class);
private final RelocationTransactionLogger transactionLog = new RelocationTransactionLogger(jobOperations);
public MustBeRelocatedTaskStoreUpdateStepTest() {
super(TestDataFactory.activeRemovableSetup());
}
@Test
public void testCreateOrUpdate() {
when(store.getAllTaskRelocationPlans()).thenReturn(Mono.just(Collections.emptyMap()));
MustBeRelocatedTaskStoreUpdateStep step = new MustBeRelocatedTaskStoreUpdateStep(configuration, store, transactionLog, titusRuntime);
Job<BatchJobExt> job = TestDataFactory.newBatchJob("job1", 1, newSelfManagedDisruptionBudget(1_000));
relocationConnectorStubs.addJob(job);
relocationConnectorStubs.place(TestDataFactory.REMOVABLE_INSTANCE_GROUP_ID, jobOperations.getTasks().get(0));
when(store.createOrUpdateTaskRelocationPlans(anyList())).thenReturn(Mono.just(Collections.singletonMap("task1", Optional.empty())));
TaskRelocationPlan taskRelocationPlan = oneMigrationPlan();
Map<String, TaskRelocationPlan> storeResult = step.persistChangesInStore(
Collections.singletonMap(taskRelocationPlan.getTaskId(), taskRelocationPlan)
);
assertThat(storeResult).hasSize(1);
verify(store, times(1)).createOrUpdateTaskRelocationPlans(Collections.singletonList(taskRelocationPlan));
// No try again with the same data, and make sure no store update is executed
Map<String, TaskRelocationPlan> storeResultRepeated = step.persistChangesInStore(
Collections.singletonMap(taskRelocationPlan.getTaskId(), taskRelocationPlan)
);
assertThat(storeResultRepeated).hasSize(1);
verify(store, times(1)).createOrUpdateTaskRelocationPlans(Collections.singletonList(taskRelocationPlan));
}
@Test
public void testRemove() {
TaskRelocationPlan taskRelocationPlan = oneMigrationPlan();
when(store.getAllTaskRelocationPlans()).thenReturn(Mono.just(Collections.singletonMap(taskRelocationPlan.getTaskId(), taskRelocationPlan)));
MustBeRelocatedTaskStoreUpdateStep step = new MustBeRelocatedTaskStoreUpdateStep(configuration, store, transactionLog, titusRuntime);
when(store.removeTaskRelocationPlans(Collections.singleton(taskRelocationPlan.getTaskId()))).thenReturn(Mono.just(Collections.singletonMap("task1", Optional.empty())));
step.persistChangesInStore(Collections.emptyMap());
verify(store, times(1)).removeTaskRelocationPlans(Collections.singleton(taskRelocationPlan.getTaskId()));
// No try again with the same data, and make sure no store update is executed
step.persistChangesInStore(Collections.emptyMap());
verify(store, times(1)).removeTaskRelocationPlans(Collections.singleton(taskRelocationPlan.getTaskId()));
}
} | 1,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.