index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/workflow | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/workflow/step/TaskEvictionResultStoreStepTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.workflow.step;
import java.util.Collections;
import java.util.Optional;
import com.netflix.titus.api.relocation.model.TaskRelocationStatus;
import com.netflix.titus.common.util.archaius2.Archaius2Ext;
import com.netflix.titus.supplementary.relocation.AbstractTaskRelocationTest;
import com.netflix.titus.supplementary.relocation.RelocationConfiguration;
import com.netflix.titus.supplementary.relocation.TestDataFactory;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationResultStore;
import com.netflix.titus.testkit.model.relocation.TaskRelocationPlanGenerator;
import org.junit.Before;
import org.junit.Test;
import reactor.core.publisher.Mono;
import static org.mockito.ArgumentMatchers.anyList;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class TaskEvictionResultStoreStepTest extends AbstractTaskRelocationTest {
private final RelocationConfiguration configuration = Archaius2Ext.newConfiguration(RelocationConfiguration.class);
private final RelocationTransactionLogger transactionLog = new RelocationTransactionLogger(jobOperations);
private TaskRelocationResultStore store = mock(TaskRelocationResultStore.class);
private TaskEvictionResultStoreStep step;
public TaskEvictionResultStoreStepTest() {
super(TestDataFactory.activeRemovableSetup());
}
@Before
public void setUp() {
this.step = new TaskEvictionResultStoreStep(configuration, store, transactionLog, titusRuntime);
}
@Test
public void testSuccessfulStore() {
TaskRelocationStatus relocationStatus = TaskRelocationPlanGenerator.oneSuccessfulRelocation();
when(store.createTaskRelocationStatuses(anyList())).thenReturn(
Mono.just(Collections.singletonMap(relocationStatus.getTaskId(), Optional.empty()))
);
step.storeTaskEvictionResults(Collections.singletonMap(relocationStatus.getTaskId(), relocationStatus));
verify(store, times(1)).createTaskRelocationStatuses(anyList());
}
@Test
public void testStoreFailure() {
when(store.createTaskRelocationStatuses(anyList())).thenReturn(Mono.error(new RuntimeException("Simulated store error")));
TaskRelocationStatus relocationStatus = TaskRelocationPlanGenerator.oneSuccessfulRelocation();
step.storeTaskEvictionResults(Collections.singletonMap(relocationStatus.getTaskId(), relocationStatus));
verify(store, times(1)).createTaskRelocationStatuses(anyList());
}
} | 1,500 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/workflow | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/workflow/step/MustBeRelocatedSelfManagedTaskCollectorStepTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.workflow.step;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.common.util.time.TestClock;
import com.netflix.titus.supplementary.relocation.AbstractTaskRelocationTest;
import com.netflix.titus.supplementary.relocation.TestDataFactory;
import org.junit.Test;
import static com.netflix.titus.supplementary.relocation.TestDataFactory.newSelfManagedDisruptionBudget;
import static org.assertj.core.api.Assertions.assertThat;
public class MustBeRelocatedSelfManagedTaskCollectorStepTest extends AbstractTaskRelocationTest {
private final MustBeRelocatedSelfManagedTaskCollectorStep step;
public MustBeRelocatedSelfManagedTaskCollectorStepTest() {
super(TestDataFactory.activeRemovableSetup());
this.step = new MustBeRelocatedSelfManagedTaskCollectorStep(nodeDataResolver, jobOperations, titusRuntime);
}
@Test
public void testMigrationOfTasksWithPolicy() {
Job<BatchJobExt> job = TestDataFactory.newBatchJob("job1", 1, newSelfManagedDisruptionBudget(1_000));
relocationConnectorStubs.addJob(job);
relocationConnectorStubs.place(TestDataFactory.REMOVABLE_INSTANCE_GROUP_ID, jobOperations.getTasks().get(0));
Map<String, TaskRelocationPlan> result = step.collectTasksThatMustBeRelocated();
assertThat(result).hasSize(1);
}
@Test
public void testTaskWithNoDisruptionBudgetIsNotMigrated() {
Job<BatchJobExt> job = TestDataFactory.newBatchJob("job1", 1, JobFunctions.getNoDisruptionBudgetMarker());
relocationConnectorStubs.addJob(job);
relocationConnectorStubs.place(TestDataFactory.REMOVABLE_INSTANCE_GROUP_ID, jobOperations.getTasks().get(0));
Map<String, TaskRelocationPlan> result = step.collectTasksThatMustBeRelocated();
assertThat(result).isEmpty();
}
@Test
public void testOriginalPlanIsReturnedOnEachInvocation() {
Job<BatchJobExt> job = TestDataFactory.newBatchJob("job1", 1, newSelfManagedDisruptionBudget(1_000));
relocationConnectorStubs.addJob(job);
Task task = jobOperations.getTasks().get(0);
relocationConnectorStubs.place(TestDataFactory.REMOVABLE_INSTANCE_GROUP_ID, task);
Map<String, TaskRelocationPlan> firstResult = step.collectTasksThatMustBeRelocated();
assertThat(firstResult).hasSize(1);
TaskRelocationPlan first = firstResult.get(task.getId());
((TestClock) titusRuntime.getClock()).advanceTime(1, TimeUnit.SECONDS);
Map<String, TaskRelocationPlan> secondResult = step.collectTasksThatMustBeRelocated();
assertThat(secondResult).hasSize(1);
TaskRelocationPlan second = secondResult.get(task.getId());
assertThat(first).isEqualTo(second);
}
} | 1,501 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/workflow | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/workflow/step/TaskEvictionStepTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.workflow.step;
import java.util.Collections;
import java.util.Map;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.supplementary.relocation.AbstractTaskRelocationTest;
import com.netflix.titus.supplementary.relocation.TestDataFactory;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.api.relocation.model.TaskRelocationStatus;
import org.junit.Before;
import org.junit.Test;
import reactor.core.scheduler.Schedulers;
import static com.netflix.titus.testkit.model.relocation.TaskRelocationPlanGenerator.oneMigrationPlan;
import static com.netflix.titus.supplementary.relocation.TestDataFactory.newSelfManagedDisruptionBudget;
import static org.assertj.core.api.Assertions.assertThat;
public class TaskEvictionStepTest extends AbstractTaskRelocationTest {
private final RelocationTransactionLogger transactionLog = new RelocationTransactionLogger(jobOperations);
private TaskEvictionStep step;
public TaskEvictionStepTest() {
super(TestDataFactory.activeRemovableSetup());
}
@Before
public void setUp() {
this.step = new TaskEvictionStep(evictionServiceClient, titusRuntime, transactionLog, Schedulers.parallel());
}
@Test
public void testSuccessfulEviction() {
Job<BatchJobExt> job = TestDataFactory.newBatchJob("job1", 1, newSelfManagedDisruptionBudget(1_000));
relocationConnectorStubs.addJob(job);
relocationConnectorStubs.setQuota("job1", 1);
Task task = jobOperations.getTasks().get(0);
relocationConnectorStubs.place(TestDataFactory.REMOVABLE_INSTANCE_GROUP_ID, task);
TaskRelocationPlan taskRelocationPlan = oneMigrationPlan().toBuilder().withTaskId(task.getId()).build();
Map<String, TaskRelocationStatus> result = step.evict(Collections.singletonMap(task.getId(), taskRelocationPlan));
assertThat(result).hasSize(1);
TaskRelocationStatus relocationStatus = result.get(task.getId());
assertThat(relocationStatus.getTaskId()).isEqualTo(task.getId());
assertThat(relocationStatus.getStatusCode()).isEqualTo(TaskRelocationStatus.STATUS_CODE_TERMINATED);
assertThat(relocationStatus.getTaskRelocationPlan()).isEqualTo(taskRelocationPlan);
}
@Test
public void testFailedEviction() {
TaskRelocationPlan taskRelocationPlan = oneMigrationPlan().toBuilder().withTaskId("nonExistingTaskId").build();
Map<String, TaskRelocationStatus> result = step.evict(Collections.singletonMap("nonExistingTaskId", taskRelocationPlan));
assertThat(result).hasSize(1);
TaskRelocationStatus relocationStatus = result.get("nonExistingTaskId");
assertThat(relocationStatus.getTaskId()).isEqualTo("nonExistingTaskId");
assertThat(relocationStatus.getStatusCode()).isEqualTo(TaskRelocationStatus.STATUS_EVICTION_ERROR);
assertThat(relocationStatus.getTaskRelocationPlan()).isEqualTo(taskRelocationPlan);
}
} | 1,502 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/connector/TitusNodePredicatesTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.connector;
import java.time.OffsetDateTime;
import java.util.Arrays;
import java.util.Collections;
import java.util.regex.Pattern;
import com.netflix.titus.runtime.connector.kubernetes.fabric8io.Fabric8IOUtil;
import com.netflix.titus.runtime.kubernetes.KubeConstants;
import io.fabric8.kubernetes.api.model.Node;
import io.fabric8.kubernetes.api.model.NodeBuilder;
import io.fabric8.kubernetes.api.model.NodeCondition;
import io.fabric8.kubernetes.api.model.NodeSpec;
import io.fabric8.kubernetes.api.model.NodeStatus;
import io.fabric8.kubernetes.api.model.Taint;
import io.fabric8.kubernetes.api.model.TaintBuilder;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class TitusNodePredicatesTest {
@Test
public void testIsOwnedByScheduler() {
Node node = new NodeBuilder(false)
.editOrNewSpec()
.addToTaints(new TaintBuilder()
.withKey(KubeConstants.TAINT_SCHEDULER)
.withValue("fenzo")
.build()
)
.endSpec()
.build();
assertThat(NodePredicates.isOwnedByScheduler("fenzo", node)).isTrue();
assertThat(NodePredicates.isOwnedByScheduler("kubeScheduler", node)).isFalse();
}
@Test
public void nodeConditionTransitionThreshold() {
NodeCondition nodeCondition1 = new NodeCondition();
nodeCondition1.setLastTransitionTime(Fabric8IOUtil.formatTimestamp(OffsetDateTime.now().minusMinutes(10)));
boolean isTransitionRecent = NodePredicates.isNodeConditionTransitionedRecently(nodeCondition1, 300);
assertThat(isTransitionRecent).isFalse();
NodeCondition nodeCondition2 = new NodeCondition();
nodeCondition2.setLastTransitionTime(Fabric8IOUtil.formatTimestamp(OffsetDateTime.now().minusSeconds(100)));
boolean isTransitionRecent2 = NodePredicates.isNodeConditionTransitionedRecently(nodeCondition2, 300);
assertThat(isTransitionRecent2).isTrue();
}
@Test
public void checkBadNodeCondition() {
Node node = new Node();
NodeCondition condition1 = new NodeCondition();
condition1.setLastTransitionTime(Fabric8IOUtil.formatTimestamp(OffsetDateTime.now().minusMinutes(10)));
condition1.setType("CorruptedMemoryFailure");
condition1.setMessage("There isn't that much corrupt memory");
condition1.setReason("CorruptedMemoryIsUnderThreshold");
condition1.setStatus("true");
NodeCondition condition2 = new NodeCondition();
condition2.setLastTransitionTime(Fabric8IOUtil.formatTimestamp(OffsetDateTime.now().minusMinutes(10)));
condition2.setType("EniCarrierProblem");
condition2.setMessage("Enis are working");
condition2.setReason("EnisAreWorking");
condition2.setStatus("False");
NodeStatus v1NodeStatus = new NodeStatus();
v1NodeStatus.setConditions(Arrays.asList(condition1, condition2));
node.setStatus(v1NodeStatus);
Pattern pattern = Pattern.compile(".*MemoryFailure");
boolean isBadCondition = NodePredicates.hasBadCondition(node, pattern::matcher, 300);
assertThat(isBadCondition).isTrue();
condition1.setStatus("False");
isBadCondition = NodePredicates.hasBadCondition(node, pattern::matcher, 300);
assertThat(isBadCondition).isFalse();
}
@Test
public void checkBadTaint() {
Node node = new Node();
NodeSpec nodeSpec = new NodeSpec();
Taint taint1 = new Taint();
taint1.setEffect("NoSchedule");
taint1.setKey("node.titus.netflix.com/tier");
taint1.setValue("Critical");
taint1.setTimeAdded(Fabric8IOUtil.formatTimestamp(OffsetDateTime.now().minusMinutes(20)));
Taint taint2 = new Taint();
taint2.setEffect("NoSchedule");
taint2.setKey("node.kubernetes.io/unreachable");
taint2.setTimeAdded(Fabric8IOUtil.formatTimestamp(OffsetDateTime.now().minusMinutes(10)));
nodeSpec.setTaints(Arrays.asList(taint1, taint2));
node.setSpec(nodeSpec);
Pattern pattern = Pattern.compile(".*unreachable");
boolean isBadTaint = NodePredicates.hasBadTaint(node, pattern::matcher, 300);
assertThat(isBadTaint).isTrue();
nodeSpec.setTaints(Collections.singletonList(taint1));
isBadTaint = NodePredicates.hasBadTaint(node, pattern::matcher, 300);
assertThat(isBadTaint).isFalse();
}
} | 1,503 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/test/java/com/netflix/titus/supplementary/relocation/connector/KubernetesNodeDataResolverTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.connector;
import java.util.Map;
import com.netflix.titus.common.util.archaius2.Archaius2Ext;
import com.netflix.titus.runtime.RelocationAttributes;
import com.netflix.titus.runtime.connector.kubernetes.fabric8io.Fabric8IOConnector;
import com.netflix.titus.supplementary.relocation.RelocationConfiguration;
import com.netflix.titus.supplementary.relocation.TestDataFactory;
import io.fabric8.kubernetes.api.model.Node;
import org.junit.Test;
import static com.netflix.titus.runtime.kubernetes.KubeConstants.TAINT_EFFECT_NO_EXECUTE;
import static com.netflix.titus.supplementary.relocation.TestDataFactory.addNodeCondition;
import static com.netflix.titus.supplementary.relocation.TestDataFactory.addNodeTaint;
import static com.netflix.titus.supplementary.relocation.TestDataFactory.newNode;
import static org.assertj.core.api.Assertions.assertThat;
public class KubernetesNodeDataResolverTest {
private final RelocationConfiguration configuration = Archaius2Ext.newConfiguration(RelocationConfiguration.class,
"titus.relocation.nodeRelocationRequiredTaints", "required.*",
"titus.relocation.nodeRelocationRequiredImmediatelyTaints", "immediately.*",
"titus.relocation.badNodeConditionPattern", ".*MemoryFailure"
);
@Test
public void testResolver() {
String node1Name = "node1";
String node2Name = "node2";
Node node1 = newNode(node1Name);
Node node2 = newNode(node2Name);
Fabric8IOConnector fabric8IOConnector = TestDataFactory.mockFabric8IOConnector(node1, node2);
KubernetesNodeDataResolver resolver = new KubernetesNodeDataResolver(configuration, fabric8IOConnector, node -> true);
Map<String, TitusNode> resolved = resolver.resolve();
assertThat(resolved).hasSize(2);
// Nothing is flagged yet
assertThat(resolver.resolve().get(node1Name).isRelocationRequired()).isFalse();
assertThat(resolver.resolve().get(node1Name).isRelocationRequiredImmediately()).isFalse();
assertThat(resolver.resolve().get(node1Name).isRelocationNotAllowed()).isFalse();
assertThat(resolver.resolve().get(node1Name).isInBadCondition()).isFalse();
// Tag one as removable
addNodeTaint(node1, "required.titus.com/decommissioning", "true", TAINT_EFFECT_NO_EXECUTE);
node1.getMetadata().getLabels().put(RelocationAttributes.RELOCATION_REQUIRED, "true");
assertThat(resolver.resolve().get(node1Name).isRelocationRequired()).isTrue();
assertThat(resolver.resolve().get(node1Name).isRelocationRequiredImmediately()).isFalse();
assertThat(resolver.resolve().get(node1Name).isRelocationNotAllowed()).isFalse();
// Now removable immediately
addNodeTaint(node1, "immediately.titus.com/decommissioning", "true", TAINT_EFFECT_NO_EXECUTE);
assertThat(resolver.resolve().get(node1Name).isRelocationRequiredImmediately()).isTrue();
assertThat(resolver.resolve().get(node1Name).isRelocationNotAllowed()).isFalse();
// bad memory condition = True
addNodeCondition(node2, "CorrectableMemoryFailure", "True");
assertThat(resolver.resolve().get(node2Name)).isNotNull();
assertThat(resolver.resolve().get(node2Name).isInBadCondition()).isTrue();
// bad memory condition = False
addNodeCondition(node1, "CorrectableMemoryFailure", "False");
assertThat(resolver.resolve().get(node1Name)).isNotNull();
assertThat(resolver.resolve().get(node1Name).isInBadCondition()).isFalse();
}
} | 1,504 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/RelocationConfiguration.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
@Configuration(prefix = "titus.relocation")
public interface RelocationConfiguration {
/**
* Interval at which the relocation workflow is triggered. This interval should be reasonably short, so the
* relocation plans are up to date.
*/
@DefaultValue("30000")
long getRelocationScheduleIntervalMs();
/**
* Interval at which descheduling, and task eviction is executed. This interval must be aligned with
* {@link #getRelocationScheduleIntervalMs()} interval, and should be a multiplication of the latter.
*/
@DefaultValue("120000")
long getDeschedulingIntervalMs();
@DefaultValue("300000")
long getRelocationTimeoutMs();
@DefaultValue("30000")
long getDataStalenessThresholdMs();
@DefaultValue("90000")
long getRdsTimeoutMs();
@DefaultValue(".*")
String getNodeRelocationRequiredTaints();
@DefaultValue("NONE")
String getNodeRelocationRequiredImmediatelyTaints();
/**
* Pattern identifying bad node conditions
*/
@DefaultValue("UncorrectableMemoryFailure")
String getBadNodeConditionPattern();
/**
* Pattern identifying bad node taints
*/
@DefaultValue(".*unreachable")
String getBadTaintsPattern();
@DefaultValue("false")
boolean isTaskTerminationOnBadNodeConditionEnabled();
/**
* Interval at which the node condition controller is triggered.
*/
@DefaultValue("60000")
long getNodeConditionControlLoopIntervalMs();
/**
* Node condition control loop timeout in Millis.
*/
@DefaultValue("300000")
long getNodeConditionControlLoopTimeoutMs();
/**
* It represents the last N seconds threshold for which the latest node condition is sustained
* It helps us avoid picking up node conditions that are reached for a very short duration
*/
@DefaultValue("300")
int getNodeConditionTransitionTimeThresholdSeconds();
/**
* It represents the last N seconds threshold for which the latest taint is sustained
* It helps us avoid picking up taint state that is sustained for a very short duration
*/
@DefaultValue("300")
int getNodeTaintTransitionTimeThresholdSeconds();
}
| 1,505 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/RelocationLeaderActivator.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation;
import java.util.Arrays;
import java.util.List;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.netflix.titus.api.clustermembership.service.ClusterMembershipService;
import com.netflix.titus.api.common.LeaderActivationListener;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.runtime.clustermembership.activation.LeaderActivationConfiguration;
import com.netflix.titus.runtime.clustermembership.activation.LeaderActivationCoordinator;
import com.netflix.titus.runtime.clustermembership.activation.LeaderActivationStatus;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationStoreActivator;
import com.netflix.titus.supplementary.relocation.workflow.NodeConditionController;
import com.netflix.titus.supplementary.relocation.workflow.RelocationWorkflowExecutor;
import static com.netflix.titus.runtime.clustermembership.activation.LeaderActivationCoordinators.coordinatorWithLoggingCallback;
import static com.netflix.titus.runtime.clustermembership.activation.LeaderActivationCoordinators.coordinatorWithSystemExitCallback;
@Singleton
public class RelocationLeaderActivator implements LeaderActivationStatus {
private final LeaderActivationCoordinator coordinator;
@Inject
public RelocationLeaderActivator(LeaderActivationConfiguration configuration,
TaskRelocationStoreActivator relocationStoreActivator,
RelocationWorkflowExecutor workflowExecutor,
NodeConditionController nodeConditionController,
ClusterMembershipService membershipService,
TitusRuntime titusRuntime) {
List<LeaderActivationListener> services = Arrays.asList(relocationStoreActivator, workflowExecutor, nodeConditionController);
this.coordinator = configuration.isSystemExitOnLeadershipLost()
? coordinatorWithSystemExitCallback(configuration, services, membershipService, titusRuntime)
: coordinatorWithLoggingCallback(configuration, services, membershipService, titusRuntime);
}
@PreDestroy
public void shutdown() {
coordinator.shutdown();
}
@Override
public boolean isActivatedLeader() {
return coordinator.isActivatedLeader();
}
}
| 1,506 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/RelocationMetrics.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation;
public class RelocationMetrics {
public static String METRIC_ROOT = "titus.relocation.";
}
| 1,507 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/descheduler/DeschedulerService.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.descheduler;
import java.util.List;
import java.util.Map;
import com.netflix.titus.supplementary.relocation.model.DeschedulingResult;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
public interface DeschedulerService {
List<DeschedulingResult> deschedule(Map<String, TaskRelocationPlan> plannedAheadTaskRelocationPlans);
}
| 1,508 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/descheduler/EvictionQuotaTracker.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.descheduler;
import java.util.HashMap;
import java.util.Map;
import com.netflix.titus.api.eviction.model.EvictionQuota;
import com.netflix.titus.api.eviction.service.ReadOnlyEvictionOperations;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.model.reference.Reference;
import com.netflix.titus.runtime.connector.eviction.EvictionConfiguration;
import com.netflix.titus.runtime.connector.eviction.EvictionRejectionReasons;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class EvictionQuotaTracker {
private static final Logger logger = LoggerFactory.getLogger(EvictionConfiguration.class);
private final Map<String, Long> jobEvictionQuotas = new HashMap<>();
private long systemEvictionQuota;
private boolean systemDisruptionWindowOpen = true;
EvictionQuotaTracker(ReadOnlyEvictionOperations evictionOperations, Map<String, Job<?>> jobs) {
EvictionQuota systemEvictionQuotaObj = evictionOperations.getEvictionQuota(Reference.system());
this.systemEvictionQuota = systemEvictionQuotaObj.getQuota();
if (systemEvictionQuota == 0) {
String evictionQuotaMessage = systemEvictionQuotaObj.getMessage();
if (evictionQuotaMessage.equals(EvictionRejectionReasons.SYSTEM_WINDOW_CLOSED.getReasonMessage())) {
systemDisruptionWindowOpen = false;
}
}
logger.debug("System Eviction Quota {}. System disruption window open ? {}", systemEvictionQuota, systemDisruptionWindowOpen);
jobs.forEach((id, job) -> {
long jobEvictionQuota = evictionOperations.findEvictionQuota(Reference.job(id)).map(EvictionQuota::getQuota).orElse(0L);
logger.debug("Job {} eviction quota {}", id, jobEvictionQuota);
jobEvictionQuotas.put(id, jobEvictionQuota);
}
);
}
long getSystemEvictionQuota() {
return systemEvictionQuota;
}
boolean isSystemDisruptionWindowOpen() {
return systemDisruptionWindowOpen;
}
long getJobEvictionQuota(String jobId) {
return jobEvictionQuotas.getOrDefault(jobId, 0L);
}
void consumeQuota(String jobId, boolean isJobExemptFromSystemWindow) {
if (systemEvictionQuota <= 0) {
if (systemDisruptionWindowOpen || !isJobExemptFromSystemWindow) {
throw DeschedulerException.noQuotaLeft("System quota is empty");
}
}
if (!jobEvictionQuotas.containsKey(jobId)) {
throw DeschedulerException.noQuotaLeft("Attempt to use quota for unknown job: jobId=%s", jobId);
}
long jobQuota = jobEvictionQuotas.get(jobId);
if (jobQuota <= 0) {
throw DeschedulerException.noQuotaLeft("Job quota is empty: jobId=%s", jobId);
}
systemEvictionQuota = systemEvictionQuota - 1;
jobEvictionQuotas.put(jobId, jobQuota - 1);
}
/**
* An alternative version to {@link #consumeQuota(String, boolean)} which does not throw an exception if there is not
* enough quota to relocate a task of a given job. This is used when the immediate relocation is required.
*/
void consumeQuotaNoError(String jobId) {
if (systemEvictionQuota > 0) {
systemEvictionQuota = systemEvictionQuota - 1;
}
long jobQuota = jobEvictionQuotas.getOrDefault(jobId, 0L);
if (jobQuota > 0) {
jobEvictionQuotas.put(jobId, jobQuota - 1);
}
}
}
| 1,509 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/descheduler/DefaultDeschedulerService.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.descheduler;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.titus.api.eviction.service.ReadOnlyEvictionOperations;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan.TaskRelocationReason;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.time.Clock;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.runtime.connector.eviction.EvictionConfiguration;
import com.netflix.titus.supplementary.relocation.connector.TitusNode;
import com.netflix.titus.supplementary.relocation.connector.NodeDataResolver;
import com.netflix.titus.supplementary.relocation.model.DeschedulingFailure;
import com.netflix.titus.supplementary.relocation.model.DeschedulingResult;
import com.netflix.titus.supplementary.relocation.util.RelocationPredicates;
import com.netflix.titus.supplementary.relocation.util.RelocationUtil;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.hasDisruptionBudget;
/**
* WARN This is a simple implementation focused on a single task migration use case.
*/
@Singleton
public class DefaultDeschedulerService implements DeschedulerService {
private final ReadOnlyJobOperations jobOperations;
private final ReadOnlyEvictionOperations evictionOperations;
private final NodeDataResolver nodeDataResolver;
private final TitusRuntime titusRuntime;
private final EvictionConfiguration evictionConfiguration;
private final Clock clock;
@Inject
public DefaultDeschedulerService(ReadOnlyJobOperations jobOperations,
ReadOnlyEvictionOperations evictionOperations,
NodeDataResolver nodeDataResolver,
EvictionConfiguration evictionConfiguration,
TitusRuntime titusRuntime) {
this.jobOperations = jobOperations;
this.evictionOperations = evictionOperations;
this.nodeDataResolver = nodeDataResolver;
this.evictionConfiguration = evictionConfiguration;
this.clock = titusRuntime.getClock();
this.titusRuntime = titusRuntime;
}
@Override
public List<DeschedulingResult> deschedule(Map<String, TaskRelocationPlan> plannedAheadTaskRelocationPlans) {
List<Pair<Job, List<Task>>> allJobsAndTasks = jobOperations.getJobsAndTasks();
Map<String, Job<?>> jobs = allJobsAndTasks.stream().map(Pair::getLeft).collect(Collectors.toMap(Job::getId, j -> j));
Map<String, Task> tasksById = allJobsAndTasks.stream()
.flatMap(p -> p.getRight().stream())
.collect(Collectors.toMap(Task::getId, t -> t));
EvacuatedAgentsAllocationTracker evacuatedAgentsAllocationTracker = new EvacuatedAgentsAllocationTracker(nodeDataResolver.resolve(), tasksById);
EvictionQuotaTracker evictionQuotaTracker = new EvictionQuotaTracker(evictionOperations, jobs);
TaskMigrationDescheduler taskMigrationDescheduler = new TaskMigrationDescheduler(
plannedAheadTaskRelocationPlans,
evacuatedAgentsAllocationTracker,
evictionQuotaTracker,
evictionConfiguration,
jobs, tasksById,
titusRuntime
);
Map<String, DeschedulingResult> requestedImmediateEvictions = taskMigrationDescheduler.findAllImmediateEvictions();
Map<String, DeschedulingResult> requestedEvictions = taskMigrationDescheduler.findRequestedJobOrTaskMigrations();
Map<String, DeschedulingResult> allRequestedEvictions = CollectionsExt.merge(requestedImmediateEvictions, requestedEvictions);
Map<String, DeschedulingResult> regularEvictions = new HashMap<>();
Optional<Pair<TitusNode, List<Task>>> bestMatch;
while ((bestMatch = taskMigrationDescheduler.nextBestMatch()).isPresent()) {
TitusNode agent = bestMatch.get().getLeft();
List<Task> tasks = bestMatch.get().getRight();
tasks.forEach(task -> {
if (!allRequestedEvictions.containsKey(task.getId())) {
Optional<TaskRelocationPlan> relocationPlanForTask = getRelocationPlanForTask(agent, task, plannedAheadTaskRelocationPlans);
relocationPlanForTask.ifPresent(rp -> regularEvictions.put(
task.getId(),
DeschedulingResult.newBuilder()
.withTask(task)
.withAgentInstance(agent)
.withTaskRelocationPlan(rp)
.build()
));
}
});
}
// Find eviction which could not be scheduled now.
for (Task task : tasksById.values()) {
if (allRequestedEvictions.containsKey(task.getId()) || regularEvictions.containsKey(task.getId())) {
continue;
}
if (evacuatedAgentsAllocationTracker.isEvacuated(task)) {
DeschedulingFailure failure = taskMigrationDescheduler.getDeschedulingFailure(task);
TaskRelocationPlan relocationPlan = plannedAheadTaskRelocationPlans.get(task.getId());
if (relocationPlan == null) {
relocationPlan = newNotDelayedRelocationPlan(task, false);
}
TitusNode agent = evacuatedAgentsAllocationTracker.getRemovableAgent(task);
regularEvictions.put(
task.getId(),
DeschedulingResult.newBuilder()
.withTask(task)
.withAgentInstance(agent)
.withTaskRelocationPlan(relocationPlan)
.withFailure(failure)
.build()
);
}
}
return CollectionsExt.merge(new ArrayList<>(allRequestedEvictions.values()), new ArrayList<>(regularEvictions.values()));
}
private TaskRelocationPlan newNotDelayedRelocationPlan(Task task, boolean approved) {
long now = clock.wallTime();
String reasonMessage = approved
? "Enough quota to migrate the task (no migration delay configured)"
: "Not enough quota to migrate the task (but no migration delay configured)";
return TaskRelocationPlan.newBuilder()
.withTaskId(task.getId())
.withReason(TaskRelocationReason.AgentEvacuation)
.withReasonMessage(reasonMessage)
.withDecisionTime(now)
.withRelocationTime(now)
.build();
}
@VisibleForTesting
Optional<TaskRelocationPlan> getRelocationPlanForTask(TitusNode agent, Task task,
Map<String, TaskRelocationPlan> plannedAheadTaskRelocationPlans) {
AtomicReference<Optional<TaskRelocationPlan>> result = new AtomicReference<>(Optional.empty());
TaskRelocationPlan plannedAheadTaskRelocationPlan = plannedAheadTaskRelocationPlans.get(task.getId());
if (plannedAheadTaskRelocationPlan == null) {
// recheck if a self managed plan is needed
jobOperations.getJob(task.getJobId()).ifPresent(job ->
RelocationPredicates.checkIfNeedsRelocationPlan(job, task, agent).ifPresent(reason -> {
if (RelocationPredicates.isSelfManaged(job) && hasDisruptionBudget(job)) {
result.set(Optional.of(RelocationUtil.buildSelfManagedRelocationPlan(job, task, reason, clock.wallTime())));
}
}));
if (!result.get().isPresent()) {
result.set(Optional.of(newNotDelayedRelocationPlan(task, true)));
}
} else {
result.set(Optional.of(plannedAheadTaskRelocationPlan));
}
return result.get();
}
}
| 1,510 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/descheduler/TaskMigrationDescheduler.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.descheduler;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Function;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.RegExpExt;
import com.netflix.titus.common.util.time.Clock;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.runtime.connector.eviction.EvictionConfiguration;
import com.netflix.titus.supplementary.relocation.connector.TitusNode;
import com.netflix.titus.supplementary.relocation.model.DeschedulingFailure;
import com.netflix.titus.supplementary.relocation.model.DeschedulingResult;
import com.netflix.titus.supplementary.relocation.util.RelocationPredicates;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class TaskMigrationDescheduler {
private static final Logger logger = LoggerFactory.getLogger(TaskMigrationDescheduler.class);
private static final double FITNESS_NONE = 0.0;
private static final double FITNESS_PERFECT = 1.0;
private static final Pair<Double, List<Task>> FITNESS_RESULT_NONE = Pair.of(FITNESS_NONE, Collections.emptyList());
private static final int MAX_EXPECTED_AGENT_CPUS = 64;
/**
* A factor used to lower a fitness score for agents that cannot be fully evacuated. Total factor is a multiplication
* of tasks left and this value. We set it to 1/64, as 64 is the maximum number of processors we may have per agent
* instance. If actual number of CPUs is higher than 64, it is ok. We will just not distinguish agents which are left
* with more than 64 tasks on them.
*/
private static final double TASK_ON_AGENT_PENALTY = 1.0 / MAX_EXPECTED_AGENT_CPUS;
private final Map<String, TaskRelocationPlan> plannedAheadTaskRelocationPlans;
private final EvacuatedAgentsAllocationTracker evacuatedAgentsAllocationTracker;
private final EvictionQuotaTracker evictionQuotaTracker;
private final Map<String, Job<?>> jobsById;
private final Map<String, Task> tasksById;
private final Clock clock;
private final Function<String, Matcher> appsExemptFromSystemDisruptionWindowMatcherFactory;
TaskMigrationDescheduler(Map<String, TaskRelocationPlan> plannedAheadTaskRelocationPlans,
EvacuatedAgentsAllocationTracker evacuatedAgentsAllocationTracker,
EvictionQuotaTracker evictionQuotaTracker,
EvictionConfiguration evictionConfiguration,
Map<String, Job<?>> jobsById,
Map<String, Task> tasksById,
TitusRuntime titusRuntime) {
this.plannedAheadTaskRelocationPlans = plannedAheadTaskRelocationPlans;
this.evacuatedAgentsAllocationTracker = evacuatedAgentsAllocationTracker;
this.evictionQuotaTracker = evictionQuotaTracker;
this.jobsById = jobsById;
this.tasksById = tasksById;
this.appsExemptFromSystemDisruptionWindowMatcherFactory = RegExpExt.dynamicMatcher(evictionConfiguration::getAppsExemptFromSystemDisruptionWindow,
"titus.eviction.appsExemptFromSystemDisruptionWindow", Pattern.DOTALL, logger);
this.clock = titusRuntime.getClock();
}
Map<String, DeschedulingResult> findAllImmediateEvictions() {
long now = clock.wallTime();
Map<String, DeschedulingResult> result = new HashMap<>();
tasksById.values().forEach(task -> {
Job<?> job = jobsById.get(task.getJobId());
TitusNode instance = evacuatedAgentsAllocationTracker.getAgent(task);
if (job != null && instance != null) {
RelocationPredicates.checkIfMustBeRelocatedImmediately(job, task, instance).ifPresent(reason -> {
evictionQuotaTracker.consumeQuotaNoError(job.getId());
result.put(task.getId(), newDeschedulingResultForRequestedRelocation(now, task, instance, reason.getRight()));
});
}
});
return result;
}
Map<String, DeschedulingResult> findRequestedJobOrTaskMigrations() {
long now = clock.wallTime();
Map<String, DeschedulingResult> result = new HashMap<>();
tasksById.values().forEach(task -> {
Job<?> job = jobsById.get(task.getJobId());
TitusNode instance = evacuatedAgentsAllocationTracker.getAgent(task);
if (job != null && instance != null) {
RelocationPredicates.checkIfRelocationRequired(job, task).ifPresent(reason -> {
if (isSystemEvictionQuotaAvailable(job) && canTerminate(task)) {
long quota = evictionQuotaTracker.getJobEvictionQuota(task.getJobId());
if (quota > 0) {
evictionQuotaTracker.consumeQuota(task.getJobId(), isJobExemptFromSystemDisruptionWindow(job));
result.put(task.getId(), newDeschedulingResultForRequestedRelocation(now, task, instance, reason.getRight()));
}
}
});
}
});
return result;
}
Optional<Pair<TitusNode, List<Task>>> nextBestMatch() {
return evacuatedAgentsAllocationTracker.getRemovableAgentsById().values().stream()
.map(i -> Pair.of(i, computeFitness(i)))
.filter(p -> p.getRight().getLeft() > 0)
.max(Comparator.comparingDouble(p -> p.getRight().getLeft()))
.map(p -> {
TitusNode agent = p.getLeft();
List<Task> tasks = p.getRight().getRight();
tasks.forEach(task -> {
evacuatedAgentsAllocationTracker.descheduled(task);
Job<?> job = jobsById.get(task.getJobId());
if (job != null) {
evictionQuotaTracker.consumeQuota(task.getJobId(), isJobExemptFromSystemDisruptionWindow(job));
} else {
logger.warn("Missing job for ID = {}", task.getJobId());
evictionQuotaTracker.consumeQuota(task.getJobId(), false);
}
});
return Pair.of(agent, tasks);
});
}
DeschedulingFailure getDeschedulingFailure(Task task) {
Job<?> job = jobsById.get(task.getJobId());
String message;
if (job == null) {
message = "No job record found";
} else {
TitusNode instance = evacuatedAgentsAllocationTracker.getAgent(task);
Optional<String> blockedOpt = instance != null
? RelocationPredicates.checkIfRelocationBlocked(job, task, instance)
: Optional.empty();
if (blockedOpt.isPresent()) {
message = blockedOpt.get();
} else if (!canTerminate(task)) {
message = "Migration deadline not reached yet";
} else if (evictionQuotaTracker.getJobEvictionQuota(job.getId()) <= 0) {
message = "Not enough job quota";
} else {
message = "Unknown";
}
}
return DeschedulingFailure.newBuilder().withReasonMessage(message).build();
}
private DeschedulingResult newDeschedulingResultForRequestedRelocation(long now, Task task, TitusNode instance, String reason) {
TaskRelocationPlan plan = TaskRelocationPlan.newBuilder()
.withTaskId(task.getId())
.withReason(TaskRelocationPlan.TaskRelocationReason.TaskMigration)
.withReasonMessage(reason)
.withDecisionTime(now)
.withRelocationTime(now)
.build();
return DeschedulingResult.newBuilder()
.withTask(task)
.withAgentInstance(instance)
.withTaskRelocationPlan(plan)
.build();
}
private Pair<Double, List<Task>> computeFitness(TitusNode agent) {
List<Task> tasks = evacuatedAgentsAllocationTracker.getTasksOnAgent(agent.getId());
if (tasks.isEmpty()) {
return FITNESS_RESULT_NONE;
}
boolean systemWindowOpen = evictionQuotaTracker.isSystemDisruptionWindowOpen();
long availableTerminationLimit;
if (systemWindowOpen) {
availableTerminationLimit = Math.min(tasks.size(), evictionQuotaTracker.getSystemEvictionQuota());
if (availableTerminationLimit <= 0) {
return FITNESS_RESULT_NONE;
}
} else {
// system window is closed, we'll need to inspect all eligible jobs during closed window
availableTerminationLimit = tasks.size();
}
Map<String, List<Task>> chosen = new HashMap<>();
List<Task> chosenList = new ArrayList<>();
for (Task task : tasks) {
if (canTerminate(task)) {
String jobId = task.getJobId();
Job<?> job = jobsById.get(jobId);
// if window is closed, then only pick up jobs that are exempt
boolean continueWithJobQuotaCheck = systemWindowOpen || isJobExemptFromSystemDisruptionWindow(job);
if (continueWithJobQuotaCheck) {
// applying job eviction quota
long quota = evictionQuotaTracker.getJobEvictionQuota(jobId);
long used = chosen.getOrDefault(jobId, Collections.emptyList()).size();
if ((quota - used) > 0) {
chosen.computeIfAbsent(jobId, jid -> new ArrayList<>()).add(task);
chosenList.add(task);
if (availableTerminationLimit <= chosenList.size()) {
break;
}
}
}
}
}
if (chosenList.size() == 0) {
return FITNESS_RESULT_NONE;
}
int leftOnAgent = tasks.size() - chosenList.size();
double fitness = Math.max(FITNESS_PERFECT - leftOnAgent * TASK_ON_AGENT_PENALTY, 0.01);
return Pair.of(fitness, chosenList);
}
private boolean canTerminate(Task task) {
Job<?> job = jobsById.get(task.getJobId());
if (job == null) {
return false;
}
TaskRelocationPlan relocationPlan = plannedAheadTaskRelocationPlans.get(task.getId());
// If no relocation plan is found, this means the disruption budget policy does not limit us here.
if (relocationPlan == null) {
return true;
}
return relocationPlan.getRelocationTime() <= clock.wallTime();
}
private boolean isSystemEvictionQuotaAvailable(Job<?> job) {
boolean skipSystemWindowCheck = isJobExemptFromSystemDisruptionWindow(job);
if (evictionQuotaTracker.getSystemEvictionQuota() <= 0) {
return !evictionQuotaTracker.isSystemDisruptionWindowOpen() && skipSystemWindowCheck;
}
return true;
}
private boolean isJobExemptFromSystemDisruptionWindow(Job<?> job) {
return appsExemptFromSystemDisruptionWindowMatcherFactory.apply(job.getJobDescriptor().getApplicationName()).matches();
}
}
| 1,511 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/descheduler/DeschedulerException.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.descheduler;
public class DeschedulerException extends RuntimeException {
public enum ErrorCode {
NoQuotaLeft
}
private final ErrorCode errorCode;
private DeschedulerException(ErrorCode errorCode, String message) {
super(message);
this.errorCode = errorCode;
}
public ErrorCode getErrorCode() {
return errorCode;
}
public static DeschedulerException noQuotaLeft(String message, Object... args) {
String formatted = args.length > 0
? String.format(message, args)
: message;
return new DeschedulerException(ErrorCode.NoQuotaLeft, formatted);
}
}
| 1,512 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/descheduler/DeschedulerComponent.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.descheduler;
import com.netflix.titus.api.eviction.service.ReadOnlyEvictionOperations;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.runtime.connector.eviction.EvictionConfiguration;
import com.netflix.titus.supplementary.relocation.connector.NodeDataResolver;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
@Configuration
public class DeschedulerComponent {
@Bean
public DeschedulerService getDeschedulerService(ReadOnlyJobOperations jobOperations,
ReadOnlyEvictionOperations evictionOperations,
NodeDataResolver nodeDataResolver,
EvictionConfiguration evictionConfiguration,
TitusRuntime titusRuntime) {
return new DefaultDeschedulerService(jobOperations, evictionOperations, nodeDataResolver, evictionConfiguration, titusRuntime);
}
}
| 1,513 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/descheduler/EvacuatedAgentsAllocationTracker.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.descheduler;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import com.google.common.base.Preconditions;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.supplementary.relocation.connector.TitusNode;
import com.netflix.titus.supplementary.relocation.util.RelocationUtil;
import static com.netflix.titus.common.util.CollectionsExt.copyAndRemove;
import static com.netflix.titus.common.util.CollectionsExt.transformValues;
class EvacuatedAgentsAllocationTracker {
private final Map<String, TitusNode> removableAgentsById;
private final Map<String, Pair<TitusNode, List<Task>>> removableAgentsAndTasksByAgentId;
private final Set<String> descheduledTasks = new HashSet<>();
private final Map<String, TitusNode> agentsByTaskId;
private final Map<String, TitusNode> removableAgentsByTaskId = new HashMap<>();
EvacuatedAgentsAllocationTracker(Map<String, TitusNode> nodesById, Map<String, Task> tasksById) {
this.agentsByTaskId = RelocationUtil.buildTasksToInstanceMap(nodesById, tasksById);
this.removableAgentsById = new HashMap<>();
nodesById.forEach((nodeId, node) -> {
if ((node.isServerGroupRelocationRequired() && !node.isRelocationNotAllowed()) || node.isRelocationRequired()) {
removableAgentsById.put(nodeId, node);
}
});
this.removableAgentsAndTasksByAgentId = transformValues(removableAgentsById, i -> Pair.of(i, RelocationUtil.findTasksOnInstance(i, tasksById.values())));
for (Pair<TitusNode, List<Task>> agentTasksPair : removableAgentsAndTasksByAgentId.values()) {
agentTasksPair.getRight().forEach(task -> removableAgentsByTaskId.put(task.getId(), agentTasksPair.getLeft()));
}
}
Map<String, TitusNode> getRemovableAgentsById() {
return removableAgentsById;
}
void descheduled(Task task) {
descheduledTasks.add(task.getId());
}
List<Task> getTasksOnAgent(String instanceId) {
Pair<TitusNode, List<Task>> pair = Preconditions.checkNotNull(
removableAgentsAndTasksByAgentId.get(instanceId),
"Agent instance not found: instanceId=%s", instanceId
);
return copyAndRemove(pair.getRight(), t -> descheduledTasks.contains(t.getId()));
}
boolean isEvacuated(Task task) {
return removableAgentsByTaskId.containsKey(task.getId());
}
TitusNode getRemovableAgent(Task task) {
return removableAgentsByTaskId.get(task.getId());
}
TitusNode getAgent(Task task) {
return agentsByTaskId.get(task.getId());
}
}
| 1,514 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/util/RelocationUtil.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.util;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import com.netflix.titus.api.jobmanager.TaskAttributes;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.disruptionbudget.SelfManagedDisruptionBudgetPolicy;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.common.util.DateTimeExt;
import com.netflix.titus.supplementary.relocation.connector.TitusNode;
public final class RelocationUtil {
public static Map<String, Task> buildTaskByIdMap(ReadOnlyJobOperations jobOperations) {
Map<String, Task> result = new HashMap<>();
jobOperations.getJobs().forEach(job -> jobOperations.getTasks(job.getId()).forEach(task -> result.put(task.getId(), task)));
return result;
}
public static Map<String, TitusNode> buildTasksToInstanceMap(Map<String, TitusNode> nodesById, Map<String, Task> taskByIdMap) {
Map<String, TitusNode> result = new HashMap<>();
taskByIdMap.values().forEach(task -> {
String instanceId = task.getTaskContext().get(TaskAttributes.TASK_ATTRIBUTES_AGENT_INSTANCE_ID);
if (instanceId != null) {
TitusNode instance = nodesById.get(instanceId);
if (instance != null) {
result.put(task.getId(), instance);
}
}
});
return result;
}
public static Map<String, TitusNode> buildTasksToInstanceMap(Map<String, TitusNode> nodesById,
ReadOnlyJobOperations jobOperations) {
return buildTasksToInstanceMap(nodesById, buildTaskByIdMap(jobOperations));
}
public static List<String> buildTasksFromNodesAndJobsFilter(Map<String, TitusNode> nodesById, Set<String> jobIds,
ReadOnlyJobOperations jobOperations) {
Map<String, Task> tasksById = buildTaskByIdMap(jobOperations);
Set<String> taskIdsOnNodes = buildTasksToInstanceMap(nodesById, tasksById).keySet();
return taskIdsOnNodes.stream().filter(taskId -> {
if (tasksById.containsKey(taskId)) {
Task task = tasksById.get(taskId);
return jobIds.contains(task.getJobId());
}
return false;
}).collect(Collectors.toList());
}
public static List<Task> findTasksOnInstance(TitusNode instance, Collection<Task> tasks) {
return tasks.stream()
.filter(task -> isAssignedToAgent(task) && isOnInstance(instance, task))
.collect(Collectors.toList());
}
public static boolean isAssignedToAgent(Task task) {
TaskState state = task.getStatus().getState();
return state != TaskState.Accepted && state != TaskState.Finished;
}
public static boolean isOnInstance(TitusNode instance, Task task) {
String taskAgentId = task.getTaskContext().get(TaskAttributes.TASK_ATTRIBUTES_AGENT_INSTANCE_ID);
return taskAgentId != null && taskAgentId.equals(instance.getId());
}
public static String doFormat(TaskRelocationPlan plan) {
return String.format("{reason=%s, reasonMessage='%s', relocationAfter=%s}", plan.getReason(), plan.getReasonMessage(), DateTimeExt.toUtcDateTimeString(plan.getRelocationTime()));
}
public static TaskRelocationPlan buildSelfManagedRelocationPlan(Job<?> job, Task task, String reason, long timeNow) {
SelfManagedDisruptionBudgetPolicy selfManaged = (SelfManagedDisruptionBudgetPolicy) job.getJobDescriptor().getDisruptionBudget().getDisruptionBudgetPolicy();
return TaskRelocationPlan.newBuilder()
.withTaskId(task.getId())
.withReason(TaskRelocationPlan.TaskRelocationReason.TaskMigration)
.withReasonMessage(reason)
.withDecisionTime(timeNow)
.withRelocationTime(timeNow + selfManaged.getRelocationTimeMs())
.build();
}
}
| 1,515 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/util/RelocationPredicates.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.util;
import java.util.Map;
import java.util.Optional;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.disruptionbudget.DisruptionBudgetPolicy;
import com.netflix.titus.api.jobmanager.model.job.disruptionbudget.SelfManagedDisruptionBudgetPolicy;
import com.netflix.titus.common.util.DateTimeExt;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.runtime.RelocationAttributes;
import com.netflix.titus.supplementary.relocation.connector.TitusNode;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.hasDisruptionBudget;
public class RelocationPredicates {
public enum RelocationTrigger {
Instance,
InstanceGroup,
Job,
Task
}
public static Optional<String> checkIfNeedsRelocationPlan(Job<?> job, Task task, TitusNode instance) {
if (!hasDisruptionBudget(job) || !isSelfManaged(job)) {
return Optional.empty();
}
if (isRelocationNotAllowed(job) || isRelocationNotAllowed(task) || instance.isRelocationNotAllowed()) {
return Optional.empty();
}
// As the relocation must be done immediately, there is no point in persisting the plan. The task will be
// evicted in this iteration.
if (instance.isRelocationRequiredImmediately() || isRelocationRequiredImmediately(task) || isRelocationRequiredByImmediately(job, task)) {
return Optional.empty();
}
if (isRelocationRequired(task)) {
return Optional.of("Task tagged for relocation");
}
if (isRelocationRequiredBy(job, task)) {
long jobTimestamp = getJobTimestamp(job, RelocationAttributes.RELOCATION_REQUIRED_BY);
long taskTimestamp = getTaskCreateTimestamp(task);
if (jobTimestamp >= taskTimestamp) {
return Optional.of("Job tagged for relocation for tasks created before " + DateTimeExt.toUtcDateTimeString(jobTimestamp));
}
}
if (instance.isRelocationRequired()) {
return Optional.of("Agent instance tagged for eviction");
}
if (instance.isServerGroupRelocationRequired()) {
return Optional.of("Agent instance group tagged for eviction");
}
return Optional.empty();
}
public static Optional<Pair<RelocationTrigger, String>> checkIfMustBeRelocatedImmediately(Job<?> job, Task task, TitusNode instance) {
if (instance.isRelocationRequiredImmediately()) {
return Optional.of(Pair.of(RelocationTrigger.Instance, "Agent instance tagged for immediate eviction"));
}
if (isRelocationRequiredImmediately(task)) {
return Optional.of(Pair.of(RelocationTrigger.Task, "Task marked for immediate eviction"));
}
if (isRelocationRequiredByImmediately(job, task)) {
return Optional.of(Pair.of(RelocationTrigger.Job, "Job marked for immediate eviction"));
}
return Optional.empty();
}
public static Optional<Pair<RelocationTrigger, String>> checkIfRelocationRequired(Job<?> job, Task task) {
if (isRelocationRequired(task)) {
return Optional.of(Pair.of(RelocationTrigger.Task, "Task marked for eviction"));
}
if (isRelocationRequiredBy(job, task)) {
long timestamp = getJobTimestamp(job, RelocationAttributes.RELOCATION_REQUIRED_BY);
return Optional.of(Pair.of(RelocationTrigger.Job, String.format("Job tasks created before %s marked for eviction", DateTimeExt.toUtcDateTimeString(timestamp))));
}
return Optional.empty();
}
public static Optional<Pair<RelocationTrigger, String>> checkIfRelocationRequired(Job<?> job, Task task, TitusNode instance) {
if (instance.isRelocationRequired()) {
return Optional.of(Pair.of(RelocationTrigger.Instance, "Agent tagged for eviction"));
}
return checkIfRelocationRequired(job, task);
}
public static Optional<String> checkIfRelocationBlocked(Job<?> job, Task task, TitusNode instance) {
if (isRelocationNotAllowed(task)) {
return Optional.of("Task marked as not evictable");
}
if (isRelocationNotAllowed(job)) {
return Optional.of("Job marked as not evictable");
}
if (instance.isRelocationNotAllowed()) {
return Optional.of("Agent marked as not evictable");
}
return Optional.empty();
}
private static boolean isRelocationRequired(Task task) {
return checkRelocationAttribute(task.getAttributes());
}
private static boolean isRelocationRequiredImmediately(Task task) {
return task.getAttributes()
.getOrDefault(RelocationAttributes.RELOCATION_REQUIRED_IMMEDIATELY, "false")
.equalsIgnoreCase("true");
}
private static boolean isRelocationRequiredByImmediately(Job<?> job, Task task) {
return getJobTimestamp(job, RelocationAttributes.RELOCATION_REQUIRED_BY_IMMEDIATELY) >= getTaskCreateTimestamp(task);
}
private static boolean isRelocationRequiredBy(Job<?> job, Task task) {
return getJobTimestamp(job, RelocationAttributes.RELOCATION_REQUIRED_BY) >= getTaskCreateTimestamp(task);
}
private static boolean isRelocationNotAllowed(Job<?> job) {
return job.getJobDescriptor().getAttributes()
.getOrDefault(RelocationAttributes.RELOCATION_NOT_ALLOWED, "false")
.equalsIgnoreCase("true");
}
private static boolean isRelocationNotAllowed(Task task) {
return task.getAttributes()
.getOrDefault(RelocationAttributes.RELOCATION_NOT_ALLOWED, "false")
.equalsIgnoreCase("true");
}
private static boolean checkRelocationAttribute(Map<String, String> attributes) {
return attributes.getOrDefault(RelocationAttributes.RELOCATION_REQUIRED, "false").equalsIgnoreCase("true");
}
private static long getJobTimestamp(Job<?> job, String key) {
try {
return Long.parseLong(job.getJobDescriptor().getAttributes().getOrDefault(key, "-1"));
} catch (NumberFormatException e) {
return 0;
}
}
private static long getTaskCreateTimestamp(Task task) {
return JobFunctions.findTaskStatus(task, TaskState.Accepted).orElse(task.getStatus()).getTimestamp();
}
public static boolean isSelfManaged(Job<?> job) {
DisruptionBudgetPolicy disruptionBudgetPolicy = job.getJobDescriptor().getDisruptionBudget().getDisruptionBudgetPolicy();
return disruptionBudgetPolicy instanceof SelfManagedDisruptionBudgetPolicy;
}
}
| 1,516 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/endpoint/TaskRelocationPlanPredicate.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.endpoint;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.common.util.StringExt;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.grpc.protogen.TaskRelocationPlans;
import com.netflix.titus.grpc.protogen.TaskRelocationQuery;
import com.netflix.titus.supplementary.relocation.workflow.RelocationWorkflowExecutor;
import static com.netflix.titus.common.util.Evaluators.acceptNotNull;
import static com.netflix.titus.runtime.relocation.endpoint.RelocationGrpcModelConverters.toGrpcTaskRelocationPlans;
public class TaskRelocationPlanPredicate implements Predicate<TaskRelocationPlan> {
public static final String FILTER_JOB_IDS = "jobIds";
public static final String FILTER_TASK_IDS = "taskIds";
public static final String FILTER_APPLICATION_NAME = "applicationName";
public static final String FILTER_CAPACITY_GROUP = "capacityGroup";
private final ReadOnlyJobOperations jobOperations;
private final Predicate<TaskRelocationPlan> predicate;
@VisibleForTesting
public TaskRelocationPlanPredicate(ReadOnlyJobOperations jobOperations, TaskRelocationQuery request) {
this.jobOperations = jobOperations;
Map<String, String> criteria = request.getFilteringCriteriaMap();
if (criteria.isEmpty()) {
this.predicate = relocationPlan -> true;
} else {
List<Predicate<TaskRelocationPlan>> predicates = new ArrayList<>();
acceptNotNull(criteria.get(FILTER_JOB_IDS), value -> newJobIdsPredicate(value).ifPresent(predicates::add));
acceptNotNull(criteria.get(FILTER_TASK_IDS), value -> newTaskIdsPredicate(value).ifPresent(predicates::add));
acceptNotNull(criteria.get(FILTER_APPLICATION_NAME), value -> newApplicationNamePredicate(value).ifPresent(predicates::add));
acceptNotNull(criteria.get(FILTER_CAPACITY_GROUP), value -> newCapacityGroupPredicate(value).ifPresent(predicates::add));
if (predicates.isEmpty()) {
this.predicate = relocationPlan -> true;
} else {
this.predicate = relocationPlan -> {
for (Predicate<TaskRelocationPlan> predicate : predicates) {
if (!predicate.test(relocationPlan)) {
return false;
}
}
return true;
};
}
}
}
@Override
public boolean test(TaskRelocationPlan relocationPlan) {
return predicate.test(relocationPlan);
}
private Optional<Predicate<TaskRelocationPlan>> newJobIdsPredicate(String jobIds) {
Set<String> ids = StringExt.splitByCommaIntoSet(jobIds);
if (ids.isEmpty()) {
return Optional.empty();
}
return Optional.of(newJobPredicate(job -> ids.contains(job.getId())));
}
private Optional<Predicate<TaskRelocationPlan>> newTaskIdsPredicate(String taskIds) {
List<String> idList = StringExt.splitByComma(taskIds);
if (idList.isEmpty()) {
return Optional.empty();
}
Set<String> idSet = new HashSet<>(idList);
return Optional.of(taskRelocationPlan -> idSet.contains(taskRelocationPlan.getTaskId()));
}
private Optional<Predicate<TaskRelocationPlan>> newApplicationNamePredicate(String applicationName) {
if (applicationName.isEmpty()) {
return Optional.empty();
}
return Optional.of(newJobPredicate(job -> applicationName.equalsIgnoreCase(job.getJobDescriptor().getApplicationName())));
}
private Optional<Predicate<TaskRelocationPlan>> newCapacityGroupPredicate(String capacityGroup) {
if (capacityGroup.isEmpty()) {
return Optional.empty();
}
return Optional.of(newJobPredicate(job -> capacityGroup.equalsIgnoreCase(job.getJobDescriptor().getCapacityGroup())));
}
private Predicate<TaskRelocationPlan> newJobPredicate(Predicate<Job<?>> jobPredicate) {
return taskRelocationPlan -> {
Optional<Pair<Job<?>, Task>> jobTaskOpt = jobOperations.findTaskById(taskRelocationPlan.getTaskId());
if (!jobTaskOpt.isPresent()) {
return false;
}
return jobPredicate.test(jobTaskOpt.get().getLeft());
};
}
public static TaskRelocationPlans buildProtobufQueryResult(ReadOnlyJobOperations jobOperations,
RelocationWorkflowExecutor relocationWorkflowExecutor,
TaskRelocationQuery request) {
Predicate<TaskRelocationPlan> filter = new TaskRelocationPlanPredicate(jobOperations, request);
List<TaskRelocationPlan> corePlans = new ArrayList<>(relocationWorkflowExecutor.getPlannedRelocations().values());
List<TaskRelocationPlan> filtered = corePlans.stream().filter(filter).collect(Collectors.toList());
return toGrpcTaskRelocationPlans(filtered);
}
}
| 1,517 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/endpoint | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/endpoint/grpc/ReactorTaskRelocationGrpcService.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.endpoint.grpc;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.api.relocation.model.TaskRelocationStatus;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.grpc.protogen.RelocationEvent;
import com.netflix.titus.grpc.protogen.RelocationTaskId;
import com.netflix.titus.grpc.protogen.TaskRelocationExecution;
import com.netflix.titus.grpc.protogen.TaskRelocationExecutions;
import com.netflix.titus.grpc.protogen.TaskRelocationPlans;
import com.netflix.titus.grpc.protogen.TaskRelocationQuery;
import com.netflix.titus.runtime.relocation.endpoint.RelocationGrpcModelConverters;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationResultStore;
import com.netflix.titus.supplementary.relocation.workflow.RelocationWorkflowExecutor;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import static com.netflix.titus.runtime.relocation.endpoint.RelocationGrpcModelConverters.toGrpcRelocationEvent;
import static com.netflix.titus.runtime.relocation.endpoint.RelocationGrpcModelConverters.toGrpcTaskRelocationExecutions;
import static com.netflix.titus.supplementary.relocation.endpoint.TaskRelocationPlanPredicate.buildProtobufQueryResult;
@Singleton
public class ReactorTaskRelocationGrpcService {
private final ReadOnlyJobOperations jobOperations;
private final RelocationWorkflowExecutor relocationWorkflowExecutor;
private final TaskRelocationResultStore archiveStore;
@Inject
public ReactorTaskRelocationGrpcService(ReadOnlyJobOperations jobOperations,
RelocationWorkflowExecutor relocationWorkflowExecutor,
TaskRelocationResultStore archiveStore) {
this.jobOperations = jobOperations;
this.relocationWorkflowExecutor = relocationWorkflowExecutor;
this.archiveStore = archiveStore;
}
/**
* TODO Pagination once the core pagination model with cursor is available.
*/
public Mono<TaskRelocationPlans> getCurrentTaskRelocationPlans(TaskRelocationQuery request) {
return Mono.just(buildProtobufQueryResult(jobOperations, relocationWorkflowExecutor, request));
}
/**
* TODO Implement filtering.
*/
public Mono<TaskRelocationExecutions> getLatestTaskRelocationResults(TaskRelocationQuery request) {
List<TaskRelocationStatus> coreResults = new ArrayList<>(relocationWorkflowExecutor.getLastEvictionResults().values());
TaskRelocationExecutions grpcResults = toGrpcTaskRelocationExecutions(coreResults);
return Mono.just(grpcResults);
}
public Mono<TaskRelocationExecution> getTaskRelocationResult(RelocationTaskId request) {
String taskId = request.getId();
TaskRelocationStatus latest = relocationWorkflowExecutor.getLastEvictionResults().get(taskId);
return archiveStore.getTaskRelocationStatusList(taskId).flatMap(
archived -> {
if (latest == null && archived.isEmpty()) {
return Mono.error(new StatusRuntimeException(Status.NOT_FOUND));
}
List<TaskRelocationStatus> combined;
if (latest == null) {
combined = archived;
} else if (archived.isEmpty()) {
combined = Collections.singletonList(latest);
} else {
if (CollectionsExt.last(archived).equals(latest)) {
combined = archived;
} else {
combined = CollectionsExt.copyAndAdd(archived, latest);
}
}
return Mono.just(RelocationGrpcModelConverters.toGrpcTaskRelocationExecution(combined));
});
}
public Flux<RelocationEvent> observeRelocationEvents(TaskRelocationQuery request) {
return relocationWorkflowExecutor.events().flatMap(event -> toGrpcRelocationEvent(event).map(Flux::just).orElse(Flux.empty()));
}
}
| 1,518 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/endpoint | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/endpoint/grpc/TaskRelocationGrpcComponent.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.endpoint.grpc;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.archaius2.Archaius2Ext;
import com.netflix.titus.runtime.endpoint.common.grpc.GrpcEndpointConfiguration;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationResultStore;
import com.netflix.titus.supplementary.relocation.workflow.RelocationWorkflowExecutor;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
@Configuration
public class TaskRelocationGrpcComponent {
@Bean
public GrpcEndpointConfiguration getGrpcEndpointConfiguration(TitusRuntime titusRuntime) {
return Archaius2Ext.newConfiguration(GrpcEndpointConfiguration.class, "titus.relocation.endpoint", titusRuntime.getMyEnvironment());
}
@Bean
public ReactorTaskRelocationGrpcService getTaskRelocationGrpcService(ReadOnlyJobOperations jobOperations,
RelocationWorkflowExecutor workflowExecutor,
TaskRelocationResultStore archiveStore) {
return new ReactorTaskRelocationGrpcService(jobOperations, workflowExecutor, archiveStore);
}
}
| 1,519 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/endpoint | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/endpoint/grpc/TaskRelocationGrpcServerRunner.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.endpoint.grpc;
import java.time.Duration;
import java.util.Collections;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.grpc.reactor.GrpcToReactorServerFactory;
import com.netflix.titus.grpc.protogen.TaskRelocationServiceGrpc;
import com.netflix.titus.runtime.clustermembership.activation.LeaderActivationStatus;
import com.netflix.titus.runtime.clustermembership.endpoint.grpc.ClusterMembershipGrpcExceptionMapper;
import com.netflix.titus.runtime.clustermembership.endpoint.grpc.GrpcClusterMembershipService;
import com.netflix.titus.runtime.clustermembership.endpoint.grpc.GrpcLeaderServerInterceptor;
import com.netflix.titus.runtime.endpoint.common.grpc.GrpcEndpointConfiguration;
import com.netflix.titus.runtime.endpoint.common.grpc.TitusGrpcServer;
@Singleton
public class TaskRelocationGrpcServerRunner {
private final TitusGrpcServer server;
@Inject
public TaskRelocationGrpcServerRunner(GrpcEndpointConfiguration configuration,
LeaderActivationStatus leaderActivationStatus,
GrpcClusterMembershipService grpcClusterMembershipService,
ReactorTaskRelocationGrpcService reactorTaskRelocationGrpcService,
GrpcToReactorServerFactory reactorServerFactory,
TitusRuntime titusRuntime) {
this.server = apply(TitusGrpcServer.newBuilder(configuration.getPort(), titusRuntime))
.withCallMetadataInterceptor()
.withInterceptor(GrpcLeaderServerInterceptor.clusterMembershipAllowed(leaderActivationStatus))
.withShutdownTime(Duration.ofMillis(configuration.getShutdownTimeoutMs()))
// Cluster membership service
.withServerConfigurer(builder -> builder.addService(grpcClusterMembershipService))
.withExceptionMapper(ClusterMembershipGrpcExceptionMapper.getInstance())
// Relocation service
.withService(
reactorServerFactory.apply(
TaskRelocationServiceGrpc.getServiceDescriptor(),
reactorTaskRelocationGrpcService
),
Collections.emptyList()
)
.build();
server.start();
}
@PreDestroy
public void shutdown() {
server.shutdown();
}
public TitusGrpcServer getServer() {
return server;
}
protected TitusGrpcServer.Builder apply(TitusGrpcServer.Builder serverBuilder) {
return serverBuilder;
}
}
| 1,520 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/endpoint | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/endpoint/rest/TaskRelocationSpringResource.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.endpoint.rest;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import javax.inject.Inject;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Response;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.api.relocation.model.TaskRelocationStatus;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.Evaluators;
import com.netflix.titus.grpc.protogen.TaskRelocationExecution;
import com.netflix.titus.grpc.protogen.TaskRelocationExecutions;
import com.netflix.titus.grpc.protogen.TaskRelocationPlans;
import com.netflix.titus.grpc.protogen.TaskRelocationQuery;
import com.netflix.titus.runtime.relocation.endpoint.RelocationGrpcModelConverters;
import com.netflix.titus.supplementary.relocation.endpoint.TaskRelocationPlanPredicate;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationResultStore;
import com.netflix.titus.supplementary.relocation.workflow.RelocationWorkflowExecutor;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import static com.netflix.titus.supplementary.relocation.endpoint.TaskRelocationPlanPredicate.FILTER_APPLICATION_NAME;
import static com.netflix.titus.supplementary.relocation.endpoint.TaskRelocationPlanPredicate.FILTER_CAPACITY_GROUP;
import static com.netflix.titus.supplementary.relocation.endpoint.TaskRelocationPlanPredicate.FILTER_JOB_IDS;
import static com.netflix.titus.supplementary.relocation.endpoint.TaskRelocationPlanPredicate.FILTER_TASK_IDS;
@RestController
@RequestMapping(path = "/api/v3/relocation")
public class TaskRelocationSpringResource {
private final ReadOnlyJobOperations jobOperations;
private final RelocationWorkflowExecutor relocationWorkflowExecutor;
private final TaskRelocationResultStore archiveStore;
@Inject
public TaskRelocationSpringResource(ReadOnlyJobOperations jobOperations,
RelocationWorkflowExecutor relocationWorkflowExecutor,
TaskRelocationResultStore archiveStore) {
this.jobOperations = jobOperations;
this.relocationWorkflowExecutor = relocationWorkflowExecutor;
this.archiveStore = archiveStore;
}
@RequestMapping(method = RequestMethod.GET, path = "/plans", produces = "application/json")
public TaskRelocationPlans getCurrentTaskRelocationPlans(@RequestParam(name = FILTER_JOB_IDS, required = false) String jobIds,
@RequestParam(name = FILTER_TASK_IDS, required = false) String taskIds,
@RequestParam(name = FILTER_APPLICATION_NAME, required = false) String applicationName,
@RequestParam(name = FILTER_CAPACITY_GROUP, required = false) String capacityGroup) {
TaskRelocationQuery.Builder requestBuilder = TaskRelocationQuery.newBuilder();
Evaluators.acceptNotNull(jobIds, value -> requestBuilder.putFilteringCriteria(FILTER_JOB_IDS, value));
Evaluators.acceptNotNull(taskIds, value -> requestBuilder.putFilteringCriteria(FILTER_TASK_IDS, value));
Evaluators.acceptNotNull(applicationName, value -> requestBuilder.putFilteringCriteria(FILTER_APPLICATION_NAME, value));
Evaluators.acceptNotNull(capacityGroup, value -> requestBuilder.putFilteringCriteria(FILTER_CAPACITY_GROUP, value));
return TaskRelocationPlanPredicate.buildProtobufQueryResult(jobOperations, relocationWorkflowExecutor, requestBuilder.build());
}
@RequestMapping(method = RequestMethod.GET, path = "/plans/{taskId}", produces = "application/json")
public com.netflix.titus.grpc.protogen.TaskRelocationPlan getTaskRelocationPlan(@PathVariable("taskId") String taskId) {
TaskRelocationPlan plan = relocationWorkflowExecutor.getPlannedRelocations().get(taskId);
if (plan != null) {
return RelocationGrpcModelConverters.toGrpcTaskRelocationPlan(plan);
}
throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND).build());
}
@RequestMapping(method = RequestMethod.GET, path = "/executions", produces = "application/json")
public TaskRelocationExecutions getTaskRelocationResults() {
List<TaskRelocationStatus> coreResults = new ArrayList<>(relocationWorkflowExecutor.getLastEvictionResults().values());
return RelocationGrpcModelConverters.toGrpcTaskRelocationExecutions(coreResults);
}
@RequestMapping(method = RequestMethod.GET, path = "/executions/{taskId}", produces = "application/json")
public TaskRelocationExecution getTaskRelocationResult(@PathVariable("taskId") String taskId) {
TaskRelocationStatus latest = relocationWorkflowExecutor.getLastEvictionResults().get(taskId);
List<TaskRelocationStatus> archived = archiveStore.getTaskRelocationStatusList(taskId).block();
if (latest == null && archived.isEmpty()) {
throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND).build());
}
List<TaskRelocationStatus> combined;
if (latest == null) {
combined = archived;
} else if (archived.isEmpty()) {
combined = Collections.singletonList(latest);
} else {
if (CollectionsExt.last(archived).equals(latest)) {
combined = archived;
} else {
combined = CollectionsExt.copyAndAdd(archived, latest);
}
}
return RelocationGrpcModelConverters.toGrpcTaskRelocationExecution(combined);
}
}
| 1,521 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/endpoint | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/endpoint/rest/TaskRelocationExceptionHandler.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.endpoint.rest;
import com.netflix.titus.supplementary.relocation.workflow.RelocationWorkflowException;
import org.springframework.core.Ordered;
import org.springframework.core.annotation.Order;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.ControllerAdvice;
import org.springframework.web.bind.annotation.ExceptionHandler;
import org.springframework.web.context.request.WebRequest;
@ControllerAdvice
public class TaskRelocationExceptionHandler {
@ExceptionHandler(value = {RelocationWorkflowException.class})
@Order(Ordered.HIGHEST_PRECEDENCE)
protected ResponseEntity<Object> handleRelocationWorkflowException(RelocationWorkflowException ex, WebRequest request) {
switch (ex.getErrorCode()) {
case NotReady:
return ResponseEntity.status(HttpStatus.SERVICE_UNAVAILABLE).body("Not ready yet");
case StoreError:
default:
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(ex.getMessage());
}
}
}
| 1,522 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow/NodeConditionController.java | package com.netflix.titus.supplementary.relocation.workflow;
import com.netflix.titus.api.common.LeaderActivationListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import reactor.core.publisher.Mono;
public interface NodeConditionController extends LeaderActivationListener {
}
| 1,523 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow/NodeConditionCtrlMetrics.java | package com.netflix.titus.supplementary.relocation.workflow;
import com.netflix.spectator.api.Gauge;
import com.netflix.spectator.api.Registry;
import com.netflix.titus.supplementary.relocation.RelocationMetrics;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class NodeConditionCtrlMetrics {
private static final Logger logger = LoggerFactory.getLogger(NodeConditionCtrlMetrics.class);
public static String NODE_CONDITION_METRICS_PREFIX = RelocationMetrics.METRIC_ROOT + "nodeConditionCtrl.";
private final Gauge stalenessStatusGauge;
private final Gauge stalenessTimeGauge;
private final Gauge numTasksTerminated;
public NodeConditionCtrlMetrics(Registry registry) {
this.stalenessStatusGauge = registry.gauge(NODE_CONDITION_METRICS_PREFIX + "stalenessStatus");
this.stalenessTimeGauge = registry.gauge(NODE_CONDITION_METRICS_PREFIX + "stalenessMs");
numTasksTerminated = registry.gauge(NODE_CONDITION_METRICS_PREFIX + "numTasksTerminated");
}
void setStaleness(boolean stalenessStatus, long stalenessMs) {
stalenessStatusGauge.set(stalenessStatus ? 1 : 0);
stalenessTimeGauge.set(stalenessMs);
}
void setTasksTerminated(int tasksTerminated) {
numTasksTerminated.set(tasksTerminated);
}
}
| 1,524 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow/TaskRelocationWorkflowComponent.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.workflow;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.archaius2.Archaius2Ext;
import com.netflix.titus.runtime.connector.eviction.EvictionConfiguration;
import com.netflix.titus.runtime.connector.eviction.EvictionDataReplicator;
import com.netflix.titus.runtime.connector.eviction.EvictionServiceClient;
import com.netflix.titus.runtime.connector.jobmanager.JobDataReplicator;
import com.netflix.titus.runtime.connector.jobmanager.JobManagementClient;
import com.netflix.titus.supplementary.relocation.RelocationConfiguration;
import com.netflix.titus.supplementary.relocation.connector.NodeDataResolver;
import com.netflix.titus.supplementary.relocation.descheduler.DeschedulerService;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationResultStore;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationStore;
import org.springframework.context.annotation.Bean;
import org.springframework.stereotype.Component;
@Component
public class TaskRelocationWorkflowComponent {
@Bean
public RelocationConfiguration getRelocationConfiguration(TitusRuntime titusRuntime) {
return Archaius2Ext.newConfiguration(RelocationConfiguration.class, titusRuntime.getMyEnvironment());
}
@Bean
public EvictionConfiguration getEvictionConfiguration(TitusRuntime titusRuntime) {
return Archaius2Ext.newConfiguration(EvictionConfiguration.class, titusRuntime.getMyEnvironment());
}
@Bean
public RelocationWorkflowExecutor getRelocationWorkflowExecutor(RelocationConfiguration configuration,
NodeDataResolver nodeDataResolver,
JobDataReplicator jobDataReplicator,
ReadOnlyJobOperations jobOperations,
EvictionDataReplicator evictionDataReplicator,
EvictionServiceClient evictionServiceClient,
DeschedulerService deschedulerService,
TaskRelocationStore activeStore,
TaskRelocationResultStore archiveStore,
TitusRuntime titusRuntime) {
return new DefaultRelocationWorkflowExecutor(
configuration,
nodeDataResolver,
jobDataReplicator,
jobOperations,
evictionDataReplicator,
evictionServiceClient,
deschedulerService,
activeStore,
archiveStore,
titusRuntime
);
}
@Bean
public NodeConditionController getNodeConditionCtrl(RelocationConfiguration configuration,
NodeDataResolver nodeDataResolver,
JobDataReplicator jobDataReplicator,
ReadOnlyJobOperations readOnlyJobOperations,
JobManagementClient jobManagementClient,
TitusRuntime titusRuntime) {
return new DefaultNodeConditionController(configuration, nodeDataResolver,
jobDataReplicator, readOnlyJobOperations, jobManagementClient, titusRuntime);
}
}
| 1,525 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow/DeschedulingResultLogger.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.workflow;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import com.netflix.titus.common.util.limiter.Limiters;
import com.netflix.titus.common.util.limiter.tokenbucket.TokenBucket;
import com.netflix.titus.common.util.time.Clocks;
import com.netflix.titus.supplementary.relocation.model.DeschedulingResult;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static com.netflix.titus.supplementary.relocation.util.RelocationUtil.doFormat;
class DeschedulingResultLogger {
private static final Logger logger = LoggerFactory.getLogger("DeschedulerLogger");
private static final long LOGGING_INTERVAL_MS = 100;
private final TokenBucket loggingTokenBucket = Limiters.createFixedIntervalTokenBucket(
DeschedulingResultLogger.class.getSimpleName(),
1,
1,
1,
LOGGING_INTERVAL_MS,
TimeUnit.MILLISECONDS,
Clocks.system()
);
void doLog(Map<String, DeschedulingResult> deschedulingResult) {
if (!loggingTokenBucket.tryTake()) {
return;
}
if (deschedulingResult.isEmpty()) {
logger.info("Descheduler result: empty");
return;
}
Map<String, List<DeschedulingResult>> byAgentEvictable = new HashMap<>();
Map<String, List<DeschedulingResult>> byAgentNotEvictable = new HashMap<>();
deschedulingResult.values().forEach(d -> {
if (d.canEvict()) {
byAgentEvictable.computeIfAbsent(d.getAgentInstance().getId(), i -> new ArrayList<>()).add(d);
} else {
byAgentNotEvictable.computeIfAbsent(d.getAgentInstance().getId(), i -> new ArrayList<>()).add(d);
}
});
long toEvictCount = deschedulingResult.values().stream().filter(DeschedulingResult::canEvict).count();
long failureCount = deschedulingResult.size() - toEvictCount;
logger.info("Descheduler result: evictable={}, failures={}", toEvictCount, failureCount);
if (toEvictCount > 0) {
logger.info(" Evictable tasks:");
byAgentEvictable.forEach((agentId, results) -> {
logger.info(" Agent({}):", agentId);
results.forEach(result ->
logger.info(" task({}): {}", result.getTask().getId(), doFormat(result.getTaskRelocationPlan()))
);
});
}
if (failureCount > 0) {
logger.info(" Not evictable tasks (failures):");
byAgentNotEvictable.forEach((agentId, results) -> {
logger.info(" Agent({}):", agentId);
results.forEach(result ->
logger.info(" task({}): failure={}, plan={}",
result.getTask().getId(), result.getFailure().get().getReasonMessage(),
doFormat(result.getTaskRelocationPlan())
)
);
});
}
}
}
| 1,526 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow/DefaultNodeConditionController.java | package com.netflix.titus.supplementary.relocation.workflow;
import java.time.Duration;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.titus.api.jobmanager.JobAttributes;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import com.netflix.titus.api.model.callmetadata.Caller;
import com.netflix.titus.api.model.callmetadata.CallerType;
import com.netflix.titus.common.framework.scheduler.ExecutionContext;
import com.netflix.titus.common.framework.scheduler.ScheduleReference;
import com.netflix.titus.common.framework.scheduler.model.ScheduleDescriptor;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.retry.Retryers;
import com.netflix.titus.runtime.connector.jobmanager.JobDataReplicator;
import com.netflix.titus.runtime.connector.jobmanager.JobManagementClient;
import com.netflix.titus.supplementary.relocation.RelocationConfiguration;
import com.netflix.titus.supplementary.relocation.connector.TitusNode;
import com.netflix.titus.supplementary.relocation.connector.NodeDataResolver;
import com.netflix.titus.supplementary.relocation.util.RelocationUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Schedulers;
public class DefaultNodeConditionController implements NodeConditionController {
private static final Logger logger = LoggerFactory.getLogger(DefaultNodeConditionController.class);
private final RelocationConfiguration configuration;
private final NodeDataResolver nodeDataResolver;
private final JobDataReplicator jobDataReplicator;
private final ReadOnlyJobOperations jobOperations;
private final JobManagementClient jobManagementClient;
private final NodeConditionCtrlMetrics metrics;
private final TitusRuntime titusRuntime;
private static final CallMetadata CALL_METADATA = buildCallMetadata();
private static final String CALLER_APP_ID = "titusrelocation";
private static final String CALL_REASON = "This task was automatically terminated because the underlying host had issues.";
private ScheduleReference scheduleReference;
public DefaultNodeConditionController(RelocationConfiguration relocationConfiguration,
NodeDataResolver nodeDataResolver,
JobDataReplicator jobDataReplicator,
ReadOnlyJobOperations jobOperations,
JobManagementClient jobManagementClient,
TitusRuntime titusRuntime) {
this.configuration = relocationConfiguration;
this.nodeDataResolver = nodeDataResolver;
this.jobDataReplicator = jobDataReplicator;
this.jobOperations = jobOperations;
this.jobManagementClient = jobManagementClient;
this.metrics = new NodeConditionCtrlMetrics(titusRuntime.getRegistry());
this.titusRuntime = titusRuntime;
}
@Override
public void activate() {
logger.info("Activating DefaultNodeConditionController");
ScheduleDescriptor nodeConditionControlLoopSchedulerDescriptor = ScheduleDescriptor.newBuilder()
.withName("nodeConditionCtrl")
.withDescription("Node Condition control loop")
.withInitialDelay(Duration.ZERO)
.withInterval(Duration.ofMillis(configuration.getNodeConditionControlLoopIntervalMs()))
.withTimeout(Duration.ofMillis(configuration.getNodeConditionControlLoopTimeoutMs()))
.withRetryerSupplier(() -> Retryers.exponentialBackoff(1, 5, TimeUnit.MINUTES))
.build();
this.scheduleReference = titusRuntime.getLocalScheduler().scheduleMono(nodeConditionControlLoopSchedulerDescriptor,
this::handleNodesWithBadCondition, Schedulers.parallel());
}
@Override
public void deactivate() {
if (scheduleReference != null) {
scheduleReference.cancel();
}
}
@VisibleForTesting
Mono<Void> handleNodesWithBadCondition(ExecutionContext executionContext) {
int iterationCount = executionContext.getExecutionId().getTotal();
logger.debug("Starting node condition controller iteration {} ...", iterationCount);
if (hasStaleData()) {
logger.info("Stale data. Skipping the node condition control loop iteration- {} ", iterationCount);
return Mono.empty();
}
return handleNodesWithBadCondition();
}
private boolean hasStaleData() {
long dataStaleness = getDataStalenessMs();
boolean stale = dataStaleness > configuration.getDataStalenessThresholdMs();
metrics.setStaleness(stale, dataStaleness);
return stale;
}
private long getDataStalenessMs() {
return Math.max(nodeDataResolver.getStalenessMs(), jobDataReplicator.getStalenessMs());
}
public Mono<Void> handleNodesWithBadCondition() {
// Identify bad nodes from node resolver
Map<String, TitusNode> badConditionNodesById = nodeDataResolver.resolve().entrySet().stream().filter(nodeEntry -> nodeEntry.getValue().isInBadCondition())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
// Find jobs that are eligible for bad node condition treatment
Set<String> eligibleJobIds = jobOperations.getJobs().stream()
.filter(job -> {
JobDescriptor<?> jobDescriptor = job.getJobDescriptor();
Map<String, String> jobAttributes = jobDescriptor.getAttributes();
if (jobAttributes.containsKey(JobAttributes.JOB_PARAMETER_TERMINATE_ON_BAD_AGENT)) {
String value = jobAttributes.get(JobAttributes.JOB_PARAMETER_TERMINATE_ON_BAD_AGENT);
return Boolean.parseBoolean(value);
}
return false;
})
.map(Job::getId)
.collect(Collectors.toSet());
if (eligibleJobIds.isEmpty()) {
logger.info("No jobs configured for task terminations on bad node conditions");
metrics.setTasksTerminated(0);
return Mono.empty();
}
// Find eligible tasks that are running on the bad condition nodes
List<String> eligibleTaskIds = RelocationUtil.buildTasksFromNodesAndJobsFilter(badConditionNodesById, eligibleJobIds, jobOperations);
if (configuration.isTaskTerminationOnBadNodeConditionEnabled()) {
// Terminate tasks directly using JobManagementClient
return Flux.fromIterable(eligibleTaskIds)
.delayElements(Duration.ofSeconds(1))
.flatMap(taskId -> {
return jobManagementClient.killTask(taskId, false, CALL_METADATA)
.doOnSuccess(v -> logger.info("Task {} terminated", taskId));
})
.doOnComplete(() -> metrics.setTasksTerminated(eligibleTaskIds.size()))
.doOnError(e -> logger.error("Exception terminating task ", e))
.then();
} else {
logger.info("Skipping {} task terminations on bad node conditions", eligibleTaskIds.size());
metrics.setTasksTerminated(0);
}
return Mono.empty();
}
private static CallMetadata buildCallMetadata() {
Caller caller = Caller.newBuilder().withCallerType(CallerType.Application).withId(CALLER_APP_ID).build();
return CallMetadata.newBuilder().withCallers(Collections.singletonList(caller)).withCallReason(CALL_REASON).build();
}
}
| 1,527 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow/WorkflowMetrics.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.workflow;
import com.netflix.spectator.api.Gauge;
import com.netflix.spectator.api.Registry;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.supplementary.relocation.RelocationMetrics;
class WorkflowMetrics {
public static String EVACUATION_METRICS = RelocationMetrics.METRIC_ROOT + "evacuation.";
private final Registry registry;
private final Gauge stalenessStatusGauge;
private final Gauge stalenessTimeGauge;
WorkflowMetrics(TitusRuntime titusRuntime) {
this.registry = titusRuntime.getRegistry();
this.stalenessStatusGauge = registry.gauge(EVACUATION_METRICS + "stalenessStatus");
this.stalenessTimeGauge = registry.gauge(EVACUATION_METRICS + "stalenessMs");
}
void setStaleness(boolean stalenessStatus, long stalenessMs) {
stalenessStatusGauge.set(stalenessStatus ? 1 : 0);
stalenessTimeGauge.set(stalenessMs);
}
}
| 1,528 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow/RelocationWorkflowException.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.workflow;
public class RelocationWorkflowException extends RuntimeException {
public enum ErrorCode {
NotReady,
StoreError,
}
private final ErrorCode errorCode;
private RelocationWorkflowException(ErrorCode errorCode, String message, Throwable cause) {
super(message, cause);
this.errorCode = errorCode;
}
public ErrorCode getErrorCode() {
return errorCode;
}
public static RelocationWorkflowException notReady() {
return new RelocationWorkflowException(ErrorCode.NotReady, "Relocation workflow not ready yet", null);
}
public static RelocationWorkflowException storeError(String message, Throwable cause) {
return new RelocationWorkflowException(ErrorCode.StoreError, message, cause);
}
}
| 1,529 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow/DefaultRelocationWorkflowExecutor.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.workflow;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.google.common.base.Stopwatch;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.api.relocation.model.TaskRelocationStatus;
import com.netflix.titus.api.relocation.model.TaskRelocationStatus.TaskRelocationState;
import com.netflix.titus.api.relocation.model.event.TaskRelocationEvent;
import com.netflix.titus.common.framework.scheduler.ExecutionContext;
import com.netflix.titus.common.framework.scheduler.ScheduleReference;
import com.netflix.titus.common.framework.scheduler.model.ScheduleDescriptor;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.IOExt;
import com.netflix.titus.common.util.retry.Retryers;
import com.netflix.titus.common.util.rx.ReactorExt;
import com.netflix.titus.runtime.connector.eviction.EvictionDataReplicator;
import com.netflix.titus.runtime.connector.eviction.EvictionServiceClient;
import com.netflix.titus.runtime.connector.jobmanager.JobDataReplicator;
import com.netflix.titus.supplementary.relocation.RelocationConfiguration;
import com.netflix.titus.supplementary.relocation.connector.NodeDataResolver;
import com.netflix.titus.supplementary.relocation.descheduler.DeschedulerService;
import com.netflix.titus.supplementary.relocation.model.DeschedulingResult;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationResultStore;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationStore;
import com.netflix.titus.supplementary.relocation.workflow.step.DeschedulerStep;
import com.netflix.titus.supplementary.relocation.workflow.step.MustBeRelocatedSelfManagedTaskCollectorStep;
import com.netflix.titus.supplementary.relocation.workflow.step.MustBeRelocatedTaskStoreUpdateStep;
import com.netflix.titus.supplementary.relocation.workflow.step.RelocationMetricsStep;
import com.netflix.titus.supplementary.relocation.workflow.step.RelocationTransactionLogger;
import com.netflix.titus.supplementary.relocation.workflow.step.TaskEvictionResultStoreStep;
import com.netflix.titus.supplementary.relocation.workflow.step.TaskEvictionStep;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.ReplayProcessor;
import reactor.core.scheduler.Schedulers;
@Singleton
public class DefaultRelocationWorkflowExecutor implements RelocationWorkflowExecutor {
private static final Logger logger = LoggerFactory.getLogger(DefaultRelocationWorkflowExecutor.class);
private static final long STALENESS_THRESHOLD_MS = 30_000;
/**
* A marker object (do not optimize by changing the value to {@link Collections#emptyList()}).
*/
private static final Map<String, TaskRelocationPlan> PLANS_NOT_READY = new HashMap<>();
private final RelocationConfiguration configuration;
private final NodeDataResolver nodeDataResolver;
private final JobDataReplicator jobDataReplicator;
private final EvictionDataReplicator evictionDataReplicator;
private final TitusRuntime titusRuntime;
private final WorkflowMetrics metrics;
private ScheduleReference localSchedulerDisposable;
private final RelocationMetricsStep relocationMetricsStep;
private final MustBeRelocatedSelfManagedTaskCollectorStep mustBeRelocatedSelfManagedTaskCollectorStep;
private final DeschedulerStep deschedulerStep;
private final MustBeRelocatedTaskStoreUpdateStep mustBeRelocatedTaskStoreUpdateStep;
private final TaskEvictionResultStoreStep taskEvictionResultStoreStep;
private final TaskEvictionStep taskEvictionStep;
private final DeschedulingResultLogger deschedulingResultLogger;
private volatile long lastDeschedulingTimestamp;
private volatile Map<String, TaskRelocationPlan> lastRelocationPlan = PLANS_NOT_READY;
private volatile Map<String, TaskRelocationPlan> lastEvictionPlan = Collections.emptyMap();
private volatile Map<String, TaskRelocationStatus> lastEvictionResult = Collections.emptyMap();
private final ReplayProcessor<List<TaskRelocationPlan>> newRelocationPlanEmitter = ReplayProcessor.create(1);
@Inject
public DefaultRelocationWorkflowExecutor(RelocationConfiguration configuration,
NodeDataResolver nodeDataResolver,
JobDataReplicator jobDataReplicator,
ReadOnlyJobOperations jobOperations,
EvictionDataReplicator evictionDataReplicator,
EvictionServiceClient evictionServiceClient,
DeschedulerService deschedulerService,
TaskRelocationStore activeStore,
TaskRelocationResultStore archiveStore,
TitusRuntime titusRuntime) {
this.configuration = configuration;
this.nodeDataResolver = nodeDataResolver;
this.jobDataReplicator = jobDataReplicator;
this.evictionDataReplicator = evictionDataReplicator;
this.metrics = new WorkflowMetrics(titusRuntime);
this.titusRuntime = titusRuntime;
newRelocationPlanEmitter.onNext(Collections.emptyList());
ensureReplicatorsReady();
RelocationTransactionLogger transactionLog = new RelocationTransactionLogger(jobOperations);
this.relocationMetricsStep = new RelocationMetricsStep(nodeDataResolver, jobOperations, titusRuntime);
this.mustBeRelocatedSelfManagedTaskCollectorStep = new MustBeRelocatedSelfManagedTaskCollectorStep(nodeDataResolver, jobOperations, titusRuntime);
this.mustBeRelocatedTaskStoreUpdateStep = new MustBeRelocatedTaskStoreUpdateStep(configuration, activeStore, transactionLog, titusRuntime);
this.deschedulerStep = new DeschedulerStep(deschedulerService, transactionLog, titusRuntime);
this.taskEvictionStep = new TaskEvictionStep(evictionServiceClient, titusRuntime, transactionLog, Schedulers.parallel());
this.taskEvictionResultStoreStep = new TaskEvictionResultStoreStep(configuration, archiveStore, transactionLog, titusRuntime);
this.lastDeschedulingTimestamp = titusRuntime.getClock().wallTime();
this.deschedulingResultLogger = new DeschedulingResultLogger();
}
@Override
public void activate() {
ScheduleDescriptor relocationScheduleDescriptor = ScheduleDescriptor.newBuilder()
.withName("relocationWorkflow")
.withDescription("Task relocation scheduler")
.withInitialDelay(Duration.ZERO)
.withInterval(Duration.ofMillis(configuration.getRelocationScheduleIntervalMs()))
.withTimeout(Duration.ofMillis(configuration.getRelocationTimeoutMs()))
.withRetryerSupplier(() -> Retryers.exponentialBackoff(1, 5, TimeUnit.MINUTES))
.build();
this.localSchedulerDisposable = titusRuntime.getLocalScheduler().schedule(relocationScheduleDescriptor, this::nextRelocationStep, true);
}
@Override
public void deactivate() {
localSchedulerDisposable.cancel();
}
/**
* Replicated caches start with empty snapshots and infinitely long staleness. We cannot proceed with
* the workflow setup until we have the caches ready, so we block here.
* TODO This should be handled in more generic way, and be part of the replicated caches toolkit.
*/
private void ensureReplicatorsReady() {
boolean agentsReady = false;
boolean jobsReady = false;
boolean evictionsReady = false;
while (!(agentsReady && jobsReady && evictionsReady)) {
agentsReady = agentsReady || nodeDataResolver.getStalenessMs() < STALENESS_THRESHOLD_MS;
jobsReady = jobsReady || jobDataReplicator.getStalenessMs() < STALENESS_THRESHOLD_MS;
evictionsReady = evictionsReady || evictionDataReplicator.getStalenessMs() < STALENESS_THRESHOLD_MS;
if (!(agentsReady && jobsReady && evictionsReady)) {
logger.info("Replicated caches not ready: agentsReady={}, jobsReady={}, evictionReady={}", agentsReady, jobsReady, evictionsReady);
try {
Thread.sleep(2_000);
} catch (InterruptedException e) {
throw new IllegalStateException("Bootstrap process terminated");
}
}
}
}
@PreDestroy
public void shutdown() {
IOExt.closeSilently(newRelocationPlanEmitter::dispose, localSchedulerDisposable::cancel);
}
@Override
public Map<String, TaskRelocationPlan> getPlannedRelocations() {
if (lastRelocationPlan == PLANS_NOT_READY) {
throw RelocationWorkflowException.notReady();
}
return lastRelocationPlan;
}
@Override
public Map<String, TaskRelocationPlan> getLastEvictionPlan() {
return Collections.unmodifiableMap(lastEvictionPlan);
}
@Override
public Map<String, TaskRelocationStatus> getLastEvictionResults() {
return Collections.unmodifiableMap(lastEvictionResult);
}
@Override
public Flux<TaskRelocationEvent> events() {
return ReactorExt.protectFromMissingExceptionHandlers(newRelocationPlanEmitter, logger)
.transformDeferred(ReactorExt.eventEmitter(
TaskRelocationPlan::getTaskId,
TaskRelocationPlan::equals,
TaskRelocationEvent::taskRelocationPlanUpdated,
removedPlan -> TaskRelocationEvent.taskRelocationPlanRemoved(removedPlan.getTaskId()),
TaskRelocationEvent.newSnapshotEndEvent()
));
}
private void nextRelocationStep(ExecutionContext executionContext) {
long count = executionContext.getExecutionId().getTotal();
boolean descheduling = titusRuntime.getClock().isPast(lastDeschedulingTimestamp + configuration.getDeschedulingIntervalMs());
logger.info("Starting task relocation iteration {} (descheduling={})...", count, descheduling);
Stopwatch stopwatch = Stopwatch.createStarted();
boolean executed = false;
try {
executed = doWork(descheduling);
logger.info("Task relocation iteration {} finished in {}sec", count, stopwatch.elapsed(TimeUnit.SECONDS));
} catch (Exception e) {
logger.error("Task relocation iteration {} failed after {}sec", count, stopwatch.elapsed(TimeUnit.SECONDS), e);
}
if (executed && descheduling) {
this.lastDeschedulingTimestamp = titusRuntime.getClock().wallTime();
}
}
private boolean doWork(boolean descheduling) {
if (hasStaleData()) {
logger.info("Stale data. Skipping the task relocation iteration");
return false;
}
// Metrics
relocationMetricsStep.updateMetrics();
// Self managed relocation plans
Map<String, TaskRelocationPlan> newSelfManagedRelocationPlan = mustBeRelocatedSelfManagedTaskCollectorStep.collectTasksThatMustBeRelocated();
this.lastRelocationPlan = mustBeRelocatedTaskStoreUpdateStep.persistChangesInStore(newSelfManagedRelocationPlan);
newRelocationPlanEmitter.onNext(new ArrayList<>(lastRelocationPlan.values()));
if (descheduling) {
// Descheduling
Map<String, DeschedulingResult> deschedulingResult = deschedulerStep.deschedule(this.lastRelocationPlan);
this.lastEvictionPlan = deschedulingResult.values().stream()
.filter(DeschedulingResult::canEvict)
.collect(Collectors.toMap(d -> d.getTask().getId(), DeschedulingResult::getTaskRelocationPlan));
deschedulingResultLogger.doLog(deschedulingResult);
// Eviction
this.lastEvictionResult = taskEvictionStep.evict(lastEvictionPlan);
taskEvictionResultStoreStep.storeTaskEvictionResults(lastEvictionResult);
// Remove relocation plans for tasks that were successfully evicted.
lastEvictionResult.forEach((taskId, status) -> {
if (status.getState() == TaskRelocationState.Success) {
lastRelocationPlan.remove(taskId);
}
});
}
return true;
}
private boolean hasStaleData() {
long dataStaleness = getDataStalenessMs();
boolean stale = dataStaleness > configuration.getDataStalenessThresholdMs();
metrics.setStaleness(stale, dataStaleness);
return stale;
}
private long getDataStalenessMs() {
return Math.max(nodeDataResolver.getStalenessMs(), Math.max(jobDataReplicator.getStalenessMs(), evictionDataReplicator.getStalenessMs()));
}
}
| 1,530 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow/RelocationWorkflowExecutor.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.workflow;
import java.util.Map;
import com.netflix.titus.api.common.LeaderActivationListener;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.api.relocation.model.TaskRelocationStatus;
import com.netflix.titus.api.relocation.model.event.TaskRelocationEvent;
import reactor.core.publisher.Flux;
public interface RelocationWorkflowExecutor extends LeaderActivationListener {
Map<String, TaskRelocationPlan> getPlannedRelocations();
Map<String, TaskRelocationPlan> getLastEvictionPlan();
Map<String, TaskRelocationStatus> getLastEvictionResults();
Flux<TaskRelocationEvent> events();
}
| 1,531 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow/step/TaskEvictionStep.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.workflow.step;
import java.time.Duration;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import com.google.common.base.Stopwatch;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.api.relocation.model.TaskRelocationStatus;
import com.netflix.titus.api.relocation.model.TaskRelocationStatus.TaskRelocationState;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.DateTimeExt;
import com.netflix.titus.common.util.ExceptionExt;
import com.netflix.titus.common.util.code.CodeInvariants;
import com.netflix.titus.common.util.rx.ReactorExt;
import com.netflix.titus.common.util.time.Clock;
import com.netflix.titus.runtime.connector.eviction.EvictionServiceClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Scheduler;
/**
* In this step, all tasks that were selected for termination, are terminated.
*/
public class TaskEvictionStep {
private static final Logger logger = LoggerFactory.getLogger(TaskEvictionStep.class);
private static final int CONCURRENCY_LIMIT = 20;
private static final Duration EVICTION_TIMEOUT = Duration.ofSeconds(5);
private static final String STEP_NAME = "taskEvictionStep";
private final EvictionServiceClient evictionServiceClient;
private final CodeInvariants invariants;
private final RelocationTransactionLogger transactionLog;
private final Scheduler scheduler;
private final Clock clock;
private final StepMetrics metrics;
public TaskEvictionStep(EvictionServiceClient evictionServiceClient,
TitusRuntime titusRuntime,
RelocationTransactionLogger transactionLog,
Scheduler scheduler) {
this.evictionServiceClient = evictionServiceClient;
this.transactionLog = transactionLog;
this.scheduler = scheduler;
this.invariants = titusRuntime.getCodeInvariants();
this.clock = titusRuntime.getClock();
this.metrics = new StepMetrics(STEP_NAME, titusRuntime);
}
public Map<String, TaskRelocationStatus> evict(Map<String, TaskRelocationPlan> taskToEvict) {
Stopwatch stopwatch = Stopwatch.createStarted();
try {
Map<String, TaskRelocationStatus> result = execute(taskToEvict);
metrics.onSuccess(result.size(), stopwatch.elapsed(TimeUnit.MILLISECONDS));
logger.debug("Eviction result: {}", result);
return result;
} catch (Exception e) {
logger.error("Step processing error", e);
metrics.onError(stopwatch.elapsed(TimeUnit.MILLISECONDS));
throw e;
}
}
private Map<String, TaskRelocationStatus> execute(Map<String, TaskRelocationPlan> taskToEvict) {
Map<String, Mono<Void>> actions = taskToEvict.values().stream()
.collect(Collectors.toMap(
TaskRelocationPlan::getTaskId,
p -> {
String message;
switch (p.getReason()) {
case AgentEvacuation:
message = String.format("Agent evacuation: %s", p.getReasonMessage());
break;
case SelfManagedMigration:
message = String.format("Self managed migration requested on %s: %s", DateTimeExt.toUtcDateTimeString(p.getDecisionTime()), p.getReasonMessage());
break;
case TaskMigration:
message = p.getReasonMessage();
break;
default:
message = String.format("[unrecognized relocation reason %s]: %s" + p.getReason(), p.getReasonMessage());
}
return evictionServiceClient.terminateTask(p.getTaskId(), message).timeout(EVICTION_TIMEOUT);
}));
Map<String, Optional<Throwable>> evictionResults;
try {
evictionResults = ReactorExt.merge(actions, CONCURRENCY_LIMIT, scheduler).block();
} catch (Exception e) {
logger.warn("Unexpected error when calling the eviction service", e);
return taskToEvict.values().stream()
.map(p -> TaskRelocationStatus.newBuilder()
.withState(TaskRelocationState.Failure)
.withStatusCode(TaskRelocationStatus.STATUS_SYSTEM_ERROR)
.withStatusMessage("Unexpected error: " + ExceptionExt.toMessageChain(e))
.withTimestamp(clock.wallTime())
.build()
)
.collect(Collectors.toMap(TaskRelocationStatus::getTaskId, s -> s));
}
Map<String, TaskRelocationStatus> results = new HashMap<>();
taskToEvict.forEach((taskId, plan) -> {
Optional<Throwable> evictionResult = evictionResults.get(plan.getTaskId());
TaskRelocationStatus status;
if (evictionResult != null) {
if (!evictionResult.isPresent()) {
status = TaskRelocationStatus.newBuilder()
.withTaskId(taskId)
.withState(TaskRelocationState.Success)
.withStatusCode(TaskRelocationStatus.STATUS_CODE_TERMINATED)
.withStatusMessage("Task terminated successfully")
.withTaskRelocationPlan(plan)
.withTimestamp(clock.wallTime())
.build();
} else {
status = TaskRelocationStatus.newBuilder()
.withTaskId(taskId)
.withState(TaskRelocationState.Failure)
.withStatusCode(TaskRelocationStatus.STATUS_EVICTION_ERROR)
.withStatusMessage(evictionResult.get().getMessage())
.withTaskRelocationPlan(plan)
.withTimestamp(clock.wallTime())
.build();
}
} else {
// This should never happen
invariants.inconsistent("Eviction result missing: taskId=%s", plan.getTaskId());
status = TaskRelocationStatus.newBuilder()
.withTaskId(taskId)
.withState(TaskRelocationState.Failure)
.withStatusCode(TaskRelocationStatus.STATUS_SYSTEM_ERROR)
.withStatusMessage("Eviction result missing")
.withTaskRelocationPlan(plan)
.withTimestamp(clock.wallTime())
.build();
}
results.put(taskId, status);
transactionLog.logTaskRelocationStatus(STEP_NAME, "eviction", status);
});
return results;
}
}
| 1,532 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow/step/MustBeRelocatedSelfManagedTaskCollectorStep.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.workflow.step;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Stopwatch;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.disruptionbudget.SelfManagedDisruptionBudgetPolicy;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan.TaskRelocationReason;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.time.Clock;
import com.netflix.titus.common.util.tuple.Triple;
import com.netflix.titus.supplementary.relocation.connector.TitusNode;
import com.netflix.titus.supplementary.relocation.connector.NodeDataResolver;
import com.netflix.titus.supplementary.relocation.util.RelocationUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static com.netflix.titus.api.relocation.model.RelocationFunctions.areEqualExceptRelocationTime;
import static com.netflix.titus.supplementary.relocation.util.RelocationPredicates.checkIfNeedsRelocationPlan;
/**
* Step at which all self managed containers that are requested to terminate are identified,
* and their relocation timestamps are set.
*/
public class MustBeRelocatedSelfManagedTaskCollectorStep {
private static final Logger logger = LoggerFactory.getLogger(MustBeRelocatedSelfManagedTaskCollectorStep.class);
private final NodeDataResolver nodeDataResolver;
private final ReadOnlyJobOperations jobOperations;
private final StepMetrics metrics;
private final Clock clock;
private Map<String, TaskRelocationPlan> lastResult = Collections.emptyMap();
public MustBeRelocatedSelfManagedTaskCollectorStep(NodeDataResolver nodeDataResolver,
ReadOnlyJobOperations jobOperations,
TitusRuntime titusRuntime) {
this.nodeDataResolver = nodeDataResolver;
this.jobOperations = jobOperations;
this.clock = titusRuntime.getClock();
this.metrics = new StepMetrics("mustBeRelocatedTaskCollectorStep", titusRuntime);
}
public Map<String, TaskRelocationPlan> collectTasksThatMustBeRelocated() {
Stopwatch stopwatch = Stopwatch.createStarted();
try {
Map<String, TaskRelocationPlan> result = buildRelocationPlans();
metrics.onSuccess(result.size(), stopwatch.elapsed(TimeUnit.MILLISECONDS));
logger.debug("Step results: {}", result);
return result;
} catch (Exception e) {
logger.error("Step processing error", e);
metrics.onError(stopwatch.elapsed(TimeUnit.MILLISECONDS));
throw e;
}
}
private Map<String, TaskRelocationPlan> buildRelocationPlans() {
Map<String, TitusNode> nodes = nodeDataResolver.resolve();
List<Triple<Job<?>, Task, TitusNode>> allItems = findAllJobTaskAgentTriples(nodes);
Map<String, TaskRelocationPlan> result = new HashMap<>();
logger.debug("Number of triplets to check: {}", allItems.size());
allItems.forEach(triple -> {
Job<?> job = triple.getFirst();
Task task = triple.getSecond();
TitusNode instance = triple.getThird();
checkIfNeedsRelocationPlan(job, task, instance).ifPresent(reason ->
result.put(task.getId(), buildSelfManagedRelocationPlan(job, task, reason))
);
});
this.lastResult = result;
return result;
}
private List<Triple<Job<?>, Task, TitusNode>> findAllJobTaskAgentTriples(Map<String, TitusNode> nodes) {
Map<String, TitusNode> taskToInstanceMap = RelocationUtil.buildTasksToInstanceMap(nodes, jobOperations);
List<Triple<Job<?>, Task, TitusNode>> result = new ArrayList<>();
jobOperations.getJobs().forEach(job -> {
jobOperations.getTasks(job.getId()).forEach(task -> {
TaskState taskState = task.getStatus().getState();
if (taskState == TaskState.StartInitiated || taskState == TaskState.Started) {
TitusNode instance = taskToInstanceMap.get(task.getId());
if (instance != null) {
result.add(Triple.of(job, task, instance));
} else {
logger.debug("Task in active state with no agent instance: taskId={}, state={}", task.getId(), task.getStatus().getState());
}
}
});
});
return result;
}
/**
* Relocation plans today are limited to self managed polices.
*/
private TaskRelocationPlan buildSelfManagedRelocationPlan(Job<?> job, Task task, String reason) {
long now = clock.wallTime();
SelfManagedDisruptionBudgetPolicy selfManaged = (SelfManagedDisruptionBudgetPolicy) job.getJobDescriptor().getDisruptionBudget().getDisruptionBudgetPolicy();
TaskRelocationPlan relocationPlan = TaskRelocationPlan.newBuilder()
.withTaskId(task.getId())
.withReason(TaskRelocationReason.SelfManagedMigration)
.withReasonMessage(reason)
.withDecisionTime(now)
.withRelocationTime(now + selfManaged.getRelocationTimeMs())
.build();
TaskRelocationPlan previous = lastResult.get(task.getId());
boolean keepPrevious = previous != null &&
(areEqualExceptRelocationTime(previous, relocationPlan) || previous.getRelocationTime() < relocationPlan.getRelocationTime());
return keepPrevious ? previous : relocationPlan;
}
}
| 1,533 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow/step/RelocationTransactionLogger.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.workflow.step;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.api.relocation.model.TaskRelocationStatus;
import com.netflix.titus.api.relocation.model.TaskRelocationStatus.TaskRelocationState;
import com.netflix.titus.common.util.ExceptionExt;
import com.netflix.titus.supplementary.relocation.model.DeschedulingFailure;
import com.netflix.titus.supplementary.relocation.model.DeschedulingResult;
import com.netflix.titus.supplementary.relocation.util.RelocationUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class RelocationTransactionLogger {
private static final Logger logger = LoggerFactory.getLogger("RelocationTransactionLogger");
private final ReadOnlyJobOperations jobOperations;
public RelocationTransactionLogger(ReadOnlyJobOperations jobOperations) {
this.jobOperations = jobOperations;
}
void logRelocationReadFromStore(String stepName, TaskRelocationPlan plan) {
doLog(findJob(plan.getTaskId()), plan.getTaskId(), stepName, "readFromStore", "success", "Relocation plan loaded from store: plan=" + RelocationUtil.doFormat(plan));
}
void logRelocationPlanUpdatedInStore(String stepName, TaskRelocationPlan plan) {
String taskId = plan.getTaskId();
doLog(findJob(taskId), taskId, stepName, "updateInStore", "success", "Relocation plan added to store: plan=" + RelocationUtil.doFormat(plan));
}
void logRelocationPlanUpdateInStoreError(String stepName, TaskRelocationPlan plan, Throwable error) {
String taskId = plan.getTaskId();
doLog(findJob(taskId),
taskId,
stepName,
"updateInStore",
"failure",
String.format(
"Failed to add a relocation plan to store: plan=%s, error=%s",
plan,
ExceptionExt.toMessageChain(error)
)
);
}
void logRelocationPlanRemovedFromStore(String stepName, String taskId) {
doLog(findJob(taskId), taskId, stepName, "removeFromStore", "success", "Obsolete relocation plan removed from store");
}
void logRelocationPlanRemoveFromStoreError(String stepName, String taskId, Throwable error) {
doLog(findJob(taskId),
taskId,
stepName,
"removeFromStore",
"failure",
"Failed to remove the obsolete relocation plan from store: error=" + ExceptionExt.toMessageChain(error)
);
}
void logTaskRelocationDeschedulingResult(String stepName, DeschedulingResult deschedulingResult) {
String taskId = deschedulingResult.getTask().getId();
DeschedulingFailure failure = deschedulingResult.getFailure().orElse(null);
if (failure == null) {
doLog(findJob(taskId),
taskId,
stepName,
"descheduling",
"success",
"Scheduled for being evicted now from agent: agentId=" + deschedulingResult.getAgentInstance().getId()
);
} else {
doLog(findJob(taskId),
taskId,
stepName,
"descheduling",
"failure",
String.format("Task eviction not possible: agentId=%s, reason=%s",
deschedulingResult.getAgentInstance().getId(),
failure.getReasonMessage()
)
);
}
}
void logTaskRelocationStatus(String stepName, String action, TaskRelocationStatus status) {
doLog(
findJob(status.getTaskId()),
status.getTaskId(),
stepName,
action,
status.getState() == TaskRelocationState.Success ? "success" : "failure",
String.format(
"Details: statusCode=%s, statusMessage=%s, plan=%s",
status.getStatusCode(),
status.getStatusMessage(),
RelocationUtil.doFormat(status.getTaskRelocationPlan())
)
);
}
void logTaskRelocationStatusStoreFailure(String stepName, TaskRelocationStatus status, Throwable error) {
doLog(
findJob(status.getTaskId()),
status.getTaskId(),
stepName,
"storeUpdate",
"failure",
String.format(
"Details: statusCode=%s, statusMessage=%s, plan=%s, storeError=%s",
status.getStatusCode(),
status.getStatusMessage(),
RelocationUtil.doFormat(status.getTaskRelocationPlan()),
ExceptionExt.toMessageChain(error)
)
);
}
private String findJob(String taskId) {
return jobOperations.findTaskById(taskId).map(t -> t.getLeft().getId()).orElse("<job_not_found>");
}
private static void doLog(String jobId,
String taskId,
String step,
String action,
String status,
String summary) {
String message = String.format(
"jobId=%s taskId=%s step=%-35s action=%-15s status=%-5s summary=%s",
jobId,
taskId,
step,
action,
status,
summary
);
logger.info(message);
}
}
| 1,534 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow/step/MustBeRelocatedTaskStoreUpdateStep.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.workflow.step;
import java.time.Duration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import com.google.common.base.Stopwatch;
import com.netflix.titus.api.relocation.model.RelocationFunctions;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.supplementary.relocation.RelocationConfiguration;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationStore;
import com.netflix.titus.supplementary.relocation.workflow.RelocationWorkflowException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Step at which information about task that must be relocated is persisted in the database.
*/
public class MustBeRelocatedTaskStoreUpdateStep {
private static final Logger logger = LoggerFactory.getLogger(MustBeRelocatedTaskStoreUpdateStep.class);
private static final String STEP_NAME = "mustBeRelocatedTaskStoreUpdateStep";
private final RelocationConfiguration configuration;
private final TaskRelocationStore store;
private final RelocationTransactionLogger transactionLog;
private final StepMetrics metrics;
private Map<String, TaskRelocationPlan> relocationsPlanInStore;
public MustBeRelocatedTaskStoreUpdateStep(RelocationConfiguration configuration,
TaskRelocationStore store,
RelocationTransactionLogger transactionLog,
TitusRuntime titusRuntime) {
this.configuration = configuration;
this.store = store;
this.transactionLog = transactionLog;
this.relocationsPlanInStore = new HashMap<>(loadPlanFromStore());
this.metrics = new StepMetrics(STEP_NAME, titusRuntime);
}
public Map<String, TaskRelocationPlan> persistChangesInStore(Map<String, TaskRelocationPlan> mustBeRelocatedTasks) {
Stopwatch stopwatch = Stopwatch.createStarted();
try {
Pair<Integer, Map<String, TaskRelocationPlan>> updatePair = execute(mustBeRelocatedTasks);
metrics.onSuccess(updatePair.getLeft(), stopwatch.elapsed(TimeUnit.MILLISECONDS));
return updatePair.getRight();
} catch (Exception e) {
logger.error("Step processing error", e);
metrics.onError(stopwatch.elapsed(TimeUnit.MILLISECONDS));
throw e;
}
}
private Pair<Integer, Map<String, TaskRelocationPlan>> execute(Map<String, TaskRelocationPlan> mustBeRelocatedTasks) {
Set<String> toRemove = CollectionsExt.copyAndRemove(relocationsPlanInStore.keySet(), mustBeRelocatedTasks.keySet());
List<TaskRelocationPlan> toUpdate = mustBeRelocatedTasks.values().stream()
.filter(current -> areDifferent(relocationsPlanInStore.get(current.getTaskId()), current))
.collect(Collectors.toList());
removeFromStore(toRemove);
updateInStore(toUpdate);
logger.debug("Plans removed from store: {}", toRemove);
logger.debug("Plans added to store: {}", toUpdate.stream().map(TaskRelocationPlan::getTaskId).collect(Collectors.toList()));
relocationsPlanInStore.keySet().removeAll(toRemove);
toUpdate.forEach(plan -> relocationsPlanInStore.put(plan.getTaskId(), plan));
return Pair.of(
toRemove.size() + toUpdate.size(),
new HashMap<>(relocationsPlanInStore)
);
}
private Map<String, TaskRelocationPlan> loadPlanFromStore() {
try {
Map<String, TaskRelocationPlan> allPlans = store.getAllTaskRelocationPlans().block();
allPlans.forEach((taskId, plan) -> transactionLog.logRelocationReadFromStore(STEP_NAME, plan));
return allPlans;
} catch (Exception e) {
throw RelocationWorkflowException.storeError("Cannot load task relocation plan from store on startup", e);
}
}
private boolean areDifferent(TaskRelocationPlan previous, TaskRelocationPlan current) {
return previous == null || !RelocationFunctions.areEqualExceptRelocationTime(previous, current);
}
private void updateInStore(List<TaskRelocationPlan> toUpdate) {
if (toUpdate.isEmpty()) {
return;
}
Map<String, TaskRelocationPlan> byTaskId = toUpdate.stream().collect(Collectors.toMap(TaskRelocationPlan::getTaskId, p -> p));
Map<String, Optional<Throwable>> result;
try {
result = store.createOrUpdateTaskRelocationPlans(toUpdate)
.timeout(Duration.ofMillis(configuration.getRdsTimeoutMs()))
.block();
} catch (Exception e) {
List<String> toUpdateIds = toUpdate.stream().map(TaskRelocationPlan::getTaskId).collect(Collectors.toList());
logger.warn("Could not remove task relocation plans from the database: {}", toUpdateIds, e);
return;
}
result.forEach((taskId, errorOpt) -> {
if (errorOpt.isPresent()) {
transactionLog.logRelocationPlanUpdateInStoreError(STEP_NAME, byTaskId.get(taskId), errorOpt.get());
} else {
transactionLog.logRelocationPlanUpdatedInStore(STEP_NAME, byTaskId.get(taskId));
}
});
}
private void removeFromStore(Set<String> toRemove) {
if (toRemove.isEmpty()) {
return;
}
Map<String, Optional<Throwable>> result;
try {
result = store.removeTaskRelocationPlans(toRemove)
.timeout(Duration.ofMillis(configuration.getRdsTimeoutMs()))
.block();
} catch (Exception e) {
logger.warn("Could not remove task relocation plans from the database: {}", toRemove, e);
return;
}
result.forEach((taskId, errorOpt) -> {
if (errorOpt.isPresent()) {
transactionLog.logRelocationPlanRemoveFromStoreError(STEP_NAME, taskId, errorOpt.get());
} else {
transactionLog.logRelocationPlanRemovedFromStore(STEP_NAME, taskId);
}
});
}
}
| 1,535 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow/step/StepMetrics.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.workflow.step;
import java.util.concurrent.TimeUnit;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Timer;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.supplementary.relocation.RelocationMetrics;
class StepMetrics {
private final Counter successCounter;
private final Counter failureCounter;
private final Timer successExecutionTime;
private final Timer failureExecutionTime;
StepMetrics(String stepName, TitusRuntime titusRuntime) {
Registry registry = titusRuntime.getRegistry();
Id baseCounterId = registry.createId(RelocationMetrics.METRIC_ROOT + "steps", "step", stepName);
this.successCounter = registry.counter(baseCounterId.withTag("status", "success"));
this.failureCounter = registry.counter(baseCounterId.withTag("status", "failure"));
Id baseTimerId = registry.createId(RelocationMetrics.METRIC_ROOT + "steps", "stepExecutionTime", stepName);
this.successExecutionTime = registry.timer(baseTimerId.withTag("status", "success"));
this.failureExecutionTime = registry.timer(baseTimerId.withTag("status", "failure"));
}
void onSuccess(int resultSetSize, long elapsed) {
successCounter.increment(resultSetSize);
successExecutionTime.record(elapsed, TimeUnit.MILLISECONDS);
}
void onError(long elapsed) {
failureCounter.increment();
failureExecutionTime.record(elapsed, TimeUnit.MILLISECONDS);
}
}
| 1,536 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow/step/DeschedulerStep.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.workflow.step;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Stopwatch;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.supplementary.relocation.descheduler.DeschedulerService;
import com.netflix.titus.supplementary.relocation.model.DeschedulingResult;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Tasks to be relocated now are identified in this step. Termination of the selected tasks should not violate the
* disruption budget constraints (unless explicitly requested).
*/
public class DeschedulerStep {
private static final Logger logger = LoggerFactory.getLogger(DeschedulerStep.class);
private static final String STEP_NAME = "deschedulerStep";
private final DeschedulerService deschedulerService;
private final RelocationTransactionLogger transactionLogger;
private final StepMetrics metrics;
public DeschedulerStep(DeschedulerService deschedulerService, RelocationTransactionLogger transactionLogger, TitusRuntime titusRuntime) {
this.deschedulerService = deschedulerService;
this.transactionLogger = transactionLogger;
this.metrics = new StepMetrics(STEP_NAME, titusRuntime);
}
/**
* Accepts collection of tasks that must be relocated, and their relocation was planned ahead of time.
* For certain scenarios ahead of planning is not possible or desirable. For example during agent defragmentation,
* the defragmentation process must be down quickly, otherwise it may become quickly obsolete.
*
* @return a collection of tasks to terminate now. This collection may include tasks from the 'mustBeRelocatedTasks'
* collection if their deadline has passed. It may also include tasks that were not planned ahead of time
* for relocation.
*/
public Map<String, DeschedulingResult> deschedule(Map<String, TaskRelocationPlan> tasksToEvict) {
Stopwatch stopwatch = Stopwatch.createStarted();
try {
Map<String, DeschedulingResult> result = execute(tasksToEvict);
metrics.onSuccess(result.size(), stopwatch.elapsed(TimeUnit.MILLISECONDS));
logger.debug("Descheduling result: {}", result);
return result;
} catch (Exception e) {
logger.error("Step processing error", e);
metrics.onError(stopwatch.elapsed(TimeUnit.MILLISECONDS));
throw e;
}
}
private Map<String, DeschedulingResult> execute(Map<String, TaskRelocationPlan> tasksToEvict) {
List<DeschedulingResult> deschedulingResult = deschedulerService.deschedule(tasksToEvict);
Map<String, DeschedulingResult> resultByTaskId = new HashMap<>();
deschedulingResult.forEach(result -> {
resultByTaskId.put(result.getTask().getId(), result);
transactionLogger.logTaskRelocationDeschedulingResult(STEP_NAME, result);
});
return resultByTaskId;
}
}
| 1,537 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow/step/RelocationMetricsStep.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.workflow.step;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Tag;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.Evaluators;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.supplementary.relocation.RelocationMetrics;
import com.netflix.titus.supplementary.relocation.connector.TitusNode;
import com.netflix.titus.supplementary.relocation.connector.NodeDataResolver;
import com.netflix.titus.supplementary.relocation.util.RelocationPredicates;
import com.netflix.titus.supplementary.relocation.util.RelocationPredicates.RelocationTrigger;
import com.netflix.titus.supplementary.relocation.util.RelocationUtil;
/**
* Reports current relocation needs.
*/
public class RelocationMetricsStep {
private static final String JOB_REMAINING_RELOCATION_METRICS = RelocationMetrics.METRIC_ROOT + "jobs";
private static final String TASK_REMAINING_RELOCATION_METRICS = RelocationMetrics.METRIC_ROOT + "tasks";
private final NodeDataResolver nodeDataResolver;
private final ReadOnlyJobOperations jobOperations;
private final Registry registry;
private final Map<String, JobMetrics> metrics = new HashMap<>();
public RelocationMetricsStep(NodeDataResolver nodeDataResolver,
ReadOnlyJobOperations jobOperations,
TitusRuntime titusRuntime) {
this.nodeDataResolver = nodeDataResolver;
this.jobOperations = jobOperations;
this.registry = titusRuntime.getRegistry();
}
public void updateMetrics() {
Map<String, TitusNode> nodes = nodeDataResolver.resolve();
Map<String, TitusNode> taskToInstanceMap = RelocationUtil.buildTasksToInstanceMap(nodes, jobOperations);
Set<String> jobIds = new HashSet<>();
jobOperations.getJobsAndTasks().forEach(jobAndTask -> {
Job<?> job = jobAndTask.getLeft();
jobIds.add(job.getId());
metrics.computeIfAbsent(job.getId(), jid -> new JobMetrics(job)).update(job, jobAndTask.getRight(), taskToInstanceMap);
});
// Remove jobs no longer running.
Set<String> toRemove = new HashSet<>();
metrics.keySet().forEach(jobId -> {
if (!jobIds.contains(jobId)) {
metrics.get(jobId).remove();
toRemove.add(jobId);
}
});
toRemove.forEach(metrics::remove);
}
private class JobMetrics {
private Job<?> job;
private List<Task> tasks;
private final Id jobsRemainingId;
private final Id tasksRemainingId;
JobMetrics(Job<?> job) {
this.job = job;
List<Tag> tags = Arrays.asList(
new BasicTag("jobId", job.getId()),
new BasicTag("application", job.getJobDescriptor().getApplicationName()),
new BasicTag("capacityGroup", job.getJobDescriptor().getCapacityGroup())
);
this.jobsRemainingId = registry.createId(JOB_REMAINING_RELOCATION_METRICS, tags);
this.tasksRemainingId = registry.createId(TASK_REMAINING_RELOCATION_METRICS, tags);
}
Job<?> getJob() {
return job;
}
void update(Job<?> latestJob, List<Task> latestTasks, Map<String, TitusNode> taskToInstanceMap) {
this.job = latestJob;
this.tasks = latestTasks;
updateJobWithDisruptionBudget(taskToInstanceMap);
}
private void updateJobWithDisruptionBudget(Map<String, TitusNode> taskToInstanceMap) {
if (tasks.isEmpty()) {
remove();
} else {
updateTasks(taskToInstanceMap);
}
}
private void updateTasks(Map<String, TitusNode> taskToInstanceMap) {
int noRelocation = 0;
int evacuatedAgentMatches = 0;
int jobRelocationRequestMatches = 0;
int taskRelocationRequestMatches = 0;
int taskRelocationUnrecognized = 0;
for (Task task : tasks) {
TitusNode instance = taskToInstanceMap.get(task.getId());
if (instance == null) {
noRelocation++;
} else {
RelocationTrigger trigger = Evaluators
.firstPresent(
() -> instance.isServerGroupRelocationRequired() ? Optional.of(RelocationTrigger.InstanceGroup) : Optional.empty(),
() -> RelocationPredicates.checkIfMustBeRelocatedImmediately(job, task, instance).map(Pair::getLeft),
() -> RelocationPredicates.checkIfRelocationRequired(job, task, instance).map(Pair::getLeft)
)
.orElse(null);
if (trigger != null) {
switch (trigger) {
case Instance:
evacuatedAgentMatches++;
break;
case Job:
jobRelocationRequestMatches++;
break;
case Task:
taskRelocationRequestMatches++;
break;
default:
taskRelocationUnrecognized++;
}
} else {
noRelocation++;
}
}
}
update(noRelocation, evacuatedAgentMatches, jobRelocationRequestMatches, taskRelocationRequestMatches, taskRelocationUnrecognized);
}
private void update(int noRelocation, int evacuatedAgentMatches, int jobRelocationRequestMatches, int taskRelocationRequestMatches, int taskRelocationUnrecognized) {
String policyType = job.getJobDescriptor().getDisruptionBudget().getDisruptionBudgetPolicy().getClass().getSimpleName();
// Job level
int totalToRelocate = evacuatedAgentMatches + jobRelocationRequestMatches + taskRelocationRequestMatches + taskRelocationUnrecognized;
registry.gauge(jobsRemainingId.withTags(
"relocationRequired", "false",
"policy", policyType
)).set((totalToRelocate == 0) ? 1 : 0);
registry.gauge(jobsRemainingId.withTags(
"relocationRequired", "true",
"policy", policyType
)).set(totalToRelocate > 0 ? 1 : 0);
// Task aggregates
registry.gauge(tasksRemainingId.withTags(
"trigger", "noRelocation",
"policy", policyType
)).set(noRelocation);
registry.gauge(tasksRemainingId.withTags(
"trigger", "evacuatedAgents",
"policy", policyType
)).set(evacuatedAgentMatches);
registry.gauge(tasksRemainingId.withTags(
"trigger", "jobRelocationRequest",
"policy", policyType
)).set(jobRelocationRequestMatches);
registry.gauge(tasksRemainingId.withTags(
"trigger", "taskRelocationRequest",
"policy", policyType
)).set(taskRelocationRequestMatches);
registry.gauge(tasksRemainingId.withTags(
"trigger", "unrecognized",
"policy", policyType
)).set(taskRelocationUnrecognized);
registry.gauge(tasksRemainingId.withTags(
"trigger", "unrecognized",
"policy", policyType
)).set(taskRelocationUnrecognized);
}
void remove() {
update(0, 0, 0, 0, 0);
}
}
}
| 1,538 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/workflow/step/TaskEvictionResultStoreStep.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.workflow.step;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Stopwatch;
import com.netflix.titus.api.relocation.model.TaskRelocationStatus;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.supplementary.relocation.RelocationConfiguration;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationResultStore;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* At this step, the task eviction result is written to the database.
*/
public class TaskEvictionResultStoreStep {
private static final Logger logger = LoggerFactory.getLogger(TaskEvictionResultStoreStep.class);
private static final String STEP_NAME = "taskEvictionResultStoreStep";
private final RelocationConfiguration configuration;
private final TaskRelocationResultStore store;
private final RelocationTransactionLogger transactionLog;
private final StepMetrics metrics;
public TaskEvictionResultStoreStep(RelocationConfiguration configuration,
TaskRelocationResultStore store,
RelocationTransactionLogger transactionLog,
TitusRuntime titusRuntime) {
this.configuration = configuration;
this.store = store;
this.transactionLog = transactionLog;
this.metrics = new StepMetrics(STEP_NAME, titusRuntime);
}
public void storeTaskEvictionResults(Map<String, TaskRelocationStatus> taskEvictionResults) {
Stopwatch stopwatch = Stopwatch.createStarted();
try {
int updates = execute(taskEvictionResults);
metrics.onSuccess(updates, stopwatch.elapsed(TimeUnit.MILLISECONDS));
} catch (Exception e) {
logger.error("Step processing error", e);
metrics.onError(stopwatch.elapsed(TimeUnit.MILLISECONDS));
throw e;
}
}
private int execute(Map<String, TaskRelocationStatus> taskEvictionResults) {
Map<String, Optional<Throwable>> result;
try {
result = store.createTaskRelocationStatuses(new ArrayList<>(taskEvictionResults.values()))
.timeout(Duration.ofMillis(configuration.getRdsTimeoutMs()))
.block();
} catch (Exception e) {
logger.warn("Could not remove task relocation plans from the database: {}", taskEvictionResults.keySet(), e);
taskEvictionResults.forEach((taskId, status) -> transactionLog.logTaskRelocationStatusStoreFailure(STEP_NAME, status, e));
return 0;
}
result.forEach((taskId, errorOpt) -> {
if (errorOpt.isPresent()) {
transactionLog.logTaskRelocationStatusStoreFailure(STEP_NAME, taskEvictionResults.get(taskId), errorOpt.get());
} else {
transactionLog.logTaskRelocationStatus(STEP_NAME, "storeUpdate", taskEvictionResults.get(taskId));
}
});
return result.size();
}
}
| 1,539 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/model/DeschedulingFailure.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.model;
import java.util.Objects;
public class DeschedulingFailure {
private final String reasonMessage;
public DeschedulingFailure(String reasonMessage) {
this.reasonMessage = reasonMessage;
}
public String getReasonMessage() {
return reasonMessage;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
DeschedulingFailure failure = (DeschedulingFailure) o;
return Objects.equals(reasonMessage, failure.reasonMessage);
}
@Override
public int hashCode() {
return Objects.hash(reasonMessage);
}
@Override
public String toString() {
return "DeschedulingFailure{" +
"reasonMessage='" + reasonMessage + '\'' +
'}';
}
public Builder but() {
return newBuilder().withReasonMessage(reasonMessage);
}
public static Builder newBuilder() {
return new Builder();
}
public static final class Builder {
private String reasonMessage;
private Builder() {
}
public Builder withReasonMessage(String reasonMessage) {
this.reasonMessage = reasonMessage;
return this;
}
public DeschedulingFailure build() {
return new DeschedulingFailure(reasonMessage);
}
}
}
| 1,540 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/model/DeschedulingResult.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.model;
import java.util.Objects;
import java.util.Optional;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.supplementary.relocation.connector.TitusNode;
public class DeschedulingResult {
private final TaskRelocationPlan taskRelocationPlan;
private final Task task;
private final TitusNode agentInstance;
private final Optional<DeschedulingFailure> failure;
public DeschedulingResult(TaskRelocationPlan taskRelocationPlan,
Task task,
TitusNode agentInstance,
Optional<DeschedulingFailure> failure) {
this.taskRelocationPlan = taskRelocationPlan;
this.task = task;
this.agentInstance = agentInstance;
this.failure = failure;
}
public TaskRelocationPlan getTaskRelocationPlan() {
return taskRelocationPlan;
}
public Task getTask() {
return task;
}
public TitusNode getAgentInstance() {
return agentInstance;
}
public boolean canEvict() {
return !failure.isPresent();
}
public Optional<DeschedulingFailure> getFailure() {
return failure;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
DeschedulingResult result = (DeschedulingResult) o;
return Objects.equals(taskRelocationPlan, result.taskRelocationPlan) &&
Objects.equals(task, result.task) &&
Objects.equals(agentInstance, result.agentInstance) &&
Objects.equals(failure, result.failure);
}
@Override
public int hashCode() {
return Objects.hash(taskRelocationPlan, task, agentInstance, failure);
}
@Override
public String toString() {
return "DeschedulingResult{" +
"taskRelocationPlan=" + taskRelocationPlan +
", task=" + task +
", agentInstance=" + agentInstance +
", failure=" + failure +
'}';
}
public Builder toBuilder() {
return newBuilder().withTaskRelocationPlan(taskRelocationPlan).withTask(task).withAgentInstance(agentInstance).withFailure(failure.orElse(null));
}
public static Builder newBuilder() {
return new Builder();
}
public static final class Builder {
private TaskRelocationPlan taskRelocationPlan;
private Task task;
private TitusNode agentInstance;
private Optional<DeschedulingFailure> failure = Optional.empty();
private Builder() {
}
public Builder withTaskRelocationPlan(TaskRelocationPlan taskRelocationPlan) {
this.taskRelocationPlan = taskRelocationPlan;
return this;
}
public Builder withTask(Task task) {
this.task = task;
return this;
}
public Builder withAgentInstance(TitusNode agentInstance) {
this.agentInstance = agentInstance;
return this;
}
public Builder withFailure(DeschedulingFailure failure) {
this.failure = Optional.ofNullable(failure);
return this;
}
public DeschedulingResult build() {
return new DeschedulingResult(taskRelocationPlan, task, agentInstance, failure);
}
}
}
| 1,541 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/connector/KubernetesNodeDataResolver.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.connector;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import com.netflix.titus.common.util.RegExpExt;
import com.netflix.titus.runtime.connector.kubernetes.fabric8io.Fabric8IOConnector;
import com.netflix.titus.supplementary.relocation.RelocationConfiguration;
import io.fabric8.kubernetes.api.model.Node;
import io.fabric8.kubernetes.client.informers.SharedIndexInformer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static com.netflix.titus.runtime.kubernetes.KubeConstants.NODE_LABEL_MACHINE_GROUP;
import static com.netflix.titus.runtime.kubernetes.KubeConstants.TAINT_EFFECT_NO_EXECUTE;
public class KubernetesNodeDataResolver implements NodeDataResolver {
private static final Logger logger = LoggerFactory.getLogger(KubernetesNodeDataResolver.class);
private static final long NOT_SYNCED_STALENESS_MS = 10 * 3600_000;
private final RelocationConfiguration configuration;
private final SharedIndexInformer<Node> nodeInformer;
private final Predicate<Node> nodeFilter;
private final Function<String, Matcher> relocationRequiredTaintsMatcher;
private final Function<String, Matcher> relocationRequiredImmediatelyTaintsMatcher;
private final Function<String, Matcher> badConditionMatcherFactory;
private final Function<String, Matcher> badTaintMatcherFactory;
public KubernetesNodeDataResolver(RelocationConfiguration configuration,
Fabric8IOConnector fabric8IOConnector,
Predicate<Node> nodeFilter) {
this.configuration = configuration;
this.nodeInformer = fabric8IOConnector.getNodeInformer();
this.relocationRequiredTaintsMatcher = RegExpExt.dynamicMatcher(
configuration::getNodeRelocationRequiredTaints,
"nodeRelocationRequiredTaints",
Pattern.DOTALL,
logger);
this.relocationRequiredImmediatelyTaintsMatcher = RegExpExt.dynamicMatcher(
configuration::getNodeRelocationRequiredImmediatelyTaints,
"nodeRelocationRequiredImmediatelyTaints",
Pattern.DOTALL,
logger);
this.nodeFilter = nodeFilter;
this.badConditionMatcherFactory = RegExpExt.dynamicMatcher(configuration::getBadNodeConditionPattern,
"titus.relocation.badNodeConditionPattern", Pattern.DOTALL, logger);
this.badTaintMatcherFactory = RegExpExt.dynamicMatcher(configuration::getBadTaintsPattern,
"titus.relocation.badTaintsPattern", Pattern.DOTALL, logger);
}
@Override
public Map<String, TitusNode> resolve() {
List<Node> k8sNodes = nodeInformer.getIndexer().list().stream().filter(nodeFilter).collect(Collectors.toList());
Map<String, TitusNode> result = new HashMap<>();
k8sNodes.forEach(k8Node -> toReconcilerNode(k8Node).ifPresent(node -> result.put(node.getId(), node)));
return result;
}
private Optional<TitusNode> toReconcilerNode(Node k8sNode) {
if (k8sNode.getMetadata() == null
|| k8sNode.getMetadata().getName() == null
|| k8sNode.getMetadata().getLabels() == null
|| k8sNode.getSpec() == null
|| k8sNode.getSpec().getTaints() == null) {
return Optional.empty();
}
Map<String, String> k8sLabels = k8sNode.getMetadata().getLabels();
String serverGroupId = k8sLabels.get(NODE_LABEL_MACHINE_GROUP);
if (serverGroupId == null) {
return Optional.empty();
}
boolean hasBadNodeCondition = NodePredicates.hasBadCondition(k8sNode, badConditionMatcherFactory,
configuration.getNodeConditionTransitionTimeThresholdSeconds());
boolean hasBadTaint = NodePredicates.hasBadTaint(k8sNode, badTaintMatcherFactory,
configuration.getNodeTaintTransitionTimeThresholdSeconds());
TitusNode node = TitusNode.newBuilder()
.withId(k8sNode.getMetadata().getName())
.withServerGroupId(serverGroupId)
.withRelocationRequired(anyNoExecuteMatch(k8sNode, relocationRequiredTaintsMatcher))
.withRelocationRequiredImmediately(anyNoExecuteMatch(k8sNode, relocationRequiredImmediatelyTaintsMatcher))
.withBadCondition(hasBadNodeCondition || hasBadTaint)
.build();
return Optional.of(node);
}
private boolean anyNoExecuteMatch(Node k8sNode, Function<String, Matcher> taintsMatcher) {
return k8sNode.getSpec().getTaints().stream().anyMatch(taint ->
TAINT_EFFECT_NO_EXECUTE.equals(taint.getEffect()) && taintsMatcher.apply(taint.getKey()).matches()
);
}
/**
* Kubernetes informer does not provide staleness details, just information about the first sync.
*/
@Override
public long getStalenessMs() {
return nodeInformer.hasSynced() ? 0 : NOT_SYNCED_STALENESS_MS;
}
}
| 1,542 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/connector/NodeDataResolver.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.connector;
import java.util.Map;
public interface NodeDataResolver {
Map<String, TitusNode> resolve();
long getStalenessMs();
}
| 1,543 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/connector/NodePredicates.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.connector;
import java.time.OffsetDateTime;
import java.util.List;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.regex.Matcher;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.titus.runtime.connector.kubernetes.fabric8io.Fabric8IOUtil;
import com.netflix.titus.runtime.kubernetes.KubeConstants;
import io.fabric8.kubernetes.api.model.Node;
import io.fabric8.kubernetes.api.model.NodeCondition;
import io.fabric8.kubernetes.api.model.Taint;
public class NodePredicates {
public static Predicate<Node> getKubeSchedulerNodePredicate() {
return node -> isOwnedByScheduler("kubeScheduler", node);
}
@VisibleForTesting
static boolean isOwnedByScheduler(String schedulerName, Node node) {
if (node == null || node.getSpec() == null || node.getSpec().getTaints() == null) {
return false;
}
List<Taint> taints = node.getSpec().getTaints();
return taints.stream().anyMatch(taint ->
KubeConstants.TAINT_SCHEDULER.equals(taint.getKey()) && schedulerName.equals(taint.getValue())
);
}
@VisibleForTesting
static boolean hasBadCondition(Node node, Function<String, Matcher> badConditionExpression,
int nodeConditionTransitionTimeThresholdSeconds) {
if (node.getStatus() != null && node.getStatus().getConditions() != null) {
return node.getStatus().getConditions().stream()
.anyMatch(v1NodeCondition -> badConditionExpression.apply(v1NodeCondition.getType()).matches() &&
Boolean.parseBoolean(v1NodeCondition.getStatus()) &&
!isNodeConditionTransitionedRecently(v1NodeCondition, nodeConditionTransitionTimeThresholdSeconds));
}
return false;
}
@VisibleForTesting
static boolean hasBadTaint(Node node, Function<String, Matcher> badTaintExpression,
int nodeTaintTransitionTimeThresholdSeconds) {
if (node.getSpec() != null && node.getSpec().getTaints() != null) {
return node.getSpec().getTaints().stream()
.anyMatch(v1Taint -> badTaintExpression.apply(v1Taint.getKey()).matches() &&
matchesTaintValueIfAvailable(v1Taint, Boolean.TRUE.toString()) &&
!isTransitionedRecently(v1Taint.getTimeAdded(), nodeTaintTransitionTimeThresholdSeconds));
}
return false;
}
static boolean matchesTaintValueIfAvailable(Taint taint, String value) {
if (taint.getValue() != null) {
return taint.getValue().equalsIgnoreCase(value);
}
return true;
}
static boolean isNodeConditionTransitionedRecently(NodeCondition nodeCondition, int thresholdSeconds) {
OffsetDateTime threshold = OffsetDateTime.now().minusSeconds(thresholdSeconds);
if (nodeCondition.getLastTransitionTime() != null) {
OffsetDateTime timestamp = Fabric8IOUtil.parseTimestamp(nodeCondition.getLastTransitionTime());
return timestamp.isAfter(threshold);
}
return false;
}
static boolean isTransitionedRecently(String nodeTransitionTime, int thresholdSeconds) {
OffsetDateTime threshold = OffsetDateTime.now().minusSeconds(thresholdSeconds);
if (nodeTransitionTime != null) {
OffsetDateTime timestamp = Fabric8IOUtil.parseTimestamp(nodeTransitionTime);
return timestamp.isAfter(threshold);
}
return false;
}
}
| 1,544 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/connector/TitusNode.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.connector;
import com.google.common.base.Preconditions;
public class TitusNode {
private final String id;
private final String serverGroupId;
private String ipAddress;
private boolean relocationNotAllowed;
private boolean relocationRequired;
private boolean relocationRequiredImmediately;
private boolean serverGroupRelocationRequired;
private boolean inBadCondition;
public TitusNode(String id,
String serverGroupId) {
this.id = id;
this.serverGroupId = serverGroupId;
}
public String getId() {
return id;
}
public String getIpAddress() {
return ipAddress;
}
public String getServerGroupId() {
return serverGroupId;
}
public boolean isRelocationNotAllowed() {
return relocationNotAllowed;
}
public boolean isRelocationRequired() {
return relocationRequired;
}
public boolean isRelocationRequiredImmediately() {
return relocationRequiredImmediately;
}
public boolean isServerGroupRelocationRequired() {
return serverGroupRelocationRequired;
}
public boolean isInBadCondition() {
return inBadCondition;
}
public Builder toBuilder() {
return newBuilder()
.withId(id)
.withServerGroupId(serverGroupId)
.withRelocationRequired(relocationRequired)
.withRelocationRequiredImmediately(relocationRequiredImmediately)
.withRelocationNotAllowed(relocationNotAllowed)
.withServerGroupRelocationRequired(serverGroupRelocationRequired);
}
public static Builder newBuilder() {
return new Builder();
}
public static final class Builder {
private String id;
private String serverGroupId;
private boolean relocationRequired;
private boolean relocationNotAllowed;
private boolean relocationRequiredImmediately;
private boolean serverGroupRelocationRequired;
private boolean inBadCondition;
private String ipAddress;
private Builder() {
}
public Builder withId(String id) {
this.id = id;
return this;
}
public Builder withIpAddress(String ipAddress) {
this.ipAddress = ipAddress;
return this;
}
public Builder withServerGroupId(String serverGroupId) {
this.serverGroupId = serverGroupId;
return this;
}
public Builder withRelocationNotAllowed(boolean relocationNotAllowed) {
this.relocationNotAllowed = relocationNotAllowed;
return this;
}
public Builder withRelocationRequired(boolean relocationRequired) {
this.relocationRequired = relocationRequired;
return this;
}
public Builder withRelocationRequiredImmediately(boolean relocationRequiredImmediately) {
this.relocationRequiredImmediately = relocationRequiredImmediately;
return this;
}
public Builder withServerGroupRelocationRequired(boolean serverGroupRelocationRequired) {
this.serverGroupRelocationRequired = serverGroupRelocationRequired;
return this;
}
public Builder withBadCondition(boolean inBadCondition) {
this.inBadCondition = inBadCondition;
return this;
}
public TitusNode build() {
Preconditions.checkNotNull(id, "instance id is null");
Preconditions.checkNotNull(serverGroupId, "server group id is null");
TitusNode node = new TitusNode(id, serverGroupId);
node.ipAddress = ipAddress;
node.relocationNotAllowed = this.relocationNotAllowed;
node.relocationRequiredImmediately = this.relocationRequiredImmediately;
node.relocationRequired = this.relocationRequired;
node.serverGroupRelocationRequired = this.serverGroupRelocationRequired;
node.inBadCondition = this.inBadCondition;
return node;
}
}
}
| 1,545 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/connector/NodeDataResolverComponent.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.connector;
import com.netflix.titus.runtime.connector.kubernetes.fabric8io.Fabric8IOConnector;
import com.netflix.titus.supplementary.relocation.RelocationConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.stereotype.Component;
@Component
public class NodeDataResolverComponent {
@Bean
public NodeDataResolver getKubernetesNodeDataResolver(RelocationConfiguration configuration,
Fabric8IOConnector fabric8IOConnector) {
return new KubernetesNodeDataResolver(configuration,
fabric8IOConnector,
NodePredicates.getKubeSchedulerNodePredicate()
);
}
}
| 1,546 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/store/TaskRelocationStoreActivator.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.store;
import com.netflix.titus.api.common.LeaderActivationListener;
public interface TaskRelocationStoreActivator extends LeaderActivationListener {
}
| 1,547 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/store/TaskRelocationResultStore.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.store;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import com.netflix.titus.api.relocation.model.TaskRelocationStatus;
import reactor.core.publisher.Mono;
/**
* Store interface for persisting large amount of data, which (most of the time) are never updated. This
* interface may be backed by the same or different store as {@link TaskRelocationStore}.
*/
public interface TaskRelocationResultStore {
/**
* Creates or updates a record in the database.
*/
Mono<Map<String, Optional<Throwable>>> createTaskRelocationStatuses(List<TaskRelocationStatus> taskRelocationStatuses);
/**
* Returns all archived task relocation statuses, or an empty list if non is found.
*/
Mono<List<TaskRelocationStatus>> getTaskRelocationStatusList(String taskId);
}
| 1,548 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/store/TaskRelocationStore.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.store;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import com.netflix.titus.api.common.LeaderActivationListener;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import reactor.core.publisher.Mono;
/**
* Store API for managing the active data set. This includes active task relocation plans, and the latest
* relocation attempts.
*/
public interface TaskRelocationStore extends LeaderActivationListener {
/**
* Creates or updates task relocation plans in the database.
*/
Mono<Map<String, Optional<Throwable>>> createOrUpdateTaskRelocationPlans(List<TaskRelocationPlan> taskRelocationPlans);
/**
* Returns all task relocation plans stored in the database.
*/
Mono<Map<String, TaskRelocationPlan>> getAllTaskRelocationPlans();
/**
* Remove task relocation plans from the store.
*/
Mono<Map<String, Optional<Throwable>>> removeTaskRelocationPlans(Set<String> toRemove);
}
| 1,549 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/store | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/store/memory/InMemoryTaskRelocationResultStore.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.store.memory;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.inject.Singleton;
import com.netflix.titus.api.relocation.model.TaskRelocationStatus;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationResultStore;
import reactor.core.publisher.Mono;
@Singleton
public class InMemoryTaskRelocationResultStore implements TaskRelocationResultStore {
private final ConcurrentMap<String, List<TaskRelocationStatus>> taskRelocationStatusesByTaskId = new ConcurrentHashMap<>();
@Override
public Mono<Map<String, Optional<Throwable>>> createTaskRelocationStatuses(List<TaskRelocationStatus> taskRelocationStatuses) {
return Mono.defer(() -> {
Map<String, Optional<Throwable>> result = new HashMap<>();
synchronized (taskRelocationStatusesByTaskId) {
taskRelocationStatuses.forEach(status -> {
taskRelocationStatusesByTaskId.computeIfAbsent(
status.getTaskId(),
tid -> new ArrayList<>()
).add(status);
result.put(status.getTaskId(), Optional.empty());
}
);
}
return Mono.just(result);
});
}
@Override
public Mono<List<TaskRelocationStatus>> getTaskRelocationStatusList(String taskId) {
return Mono.defer(() -> {
List<TaskRelocationStatus> status = taskRelocationStatusesByTaskId.get(taskId);
if(status == null) {
return Mono.just(Collections.emptyList());
}
return Mono.just(status);
});
}
}
| 1,550 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/store | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/store/memory/InMemoryRelocationStoreComponent.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.store.memory;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationResultStore;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationStore;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationStoreActivator;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
@Configuration
@ConditionalOnProperty(name = "titus.relocation.store.inMemory.enabled", havingValue = "true", matchIfMissing = true)
public class InMemoryRelocationStoreComponent {
@Bean
public TaskRelocationStore getTaskRelocationStore() {
return new InMemoryTaskRelocationStore();
}
@Bean
public TaskRelocationResultStore getTaskRelocationResultStore() {
return new InMemoryTaskRelocationResultStore();
}
@Bean
public TaskRelocationStoreActivator getTaskRelocationStoreActivator() {
return new TaskRelocationStoreActivator() {
};
}
}
| 1,551 |
0 | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/store | Create_ds/titus-control-plane/titus-supplementary-component/task-relocation/src/main/java/com/netflix/titus/supplementary/relocation/store/memory/InMemoryTaskRelocationStore.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.supplementary.relocation.store.memory;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.stream.Collectors;
import javax.inject.Singleton;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationStore;
import reactor.core.publisher.Mono;
@Singleton
public class InMemoryTaskRelocationStore implements TaskRelocationStore {
private final ConcurrentMap<String, TaskRelocationPlan> taskRelocationPlanByTaskId = new ConcurrentHashMap<>();
@Override
public Mono<Map<String, Optional<Throwable>>> createOrUpdateTaskRelocationPlans(List<TaskRelocationPlan> taskRelocationPlans) {
return Mono.defer(() -> {
Map<String, Optional<Throwable>> result = new HashMap<>();
taskRelocationPlans.forEach(status -> {
taskRelocationPlanByTaskId.put(status.getTaskId(), status);
result.put(status.getTaskId(), Optional.empty());
}
);
return Mono.just(result);
});
}
@Override
public Mono<Map<String, TaskRelocationPlan>> getAllTaskRelocationPlans() {
return Mono.defer(() -> Mono.just(Collections.unmodifiableMap(taskRelocationPlanByTaskId)));
}
@Override
public Mono<Map<String, Optional<Throwable>>> removeTaskRelocationPlans(Set<String> toRemove) {
return Mono.defer(() -> {
taskRelocationPlanByTaskId.keySet().removeAll(toRemove);
Map<String, Optional<Throwable>> result = toRemove.stream().collect(Collectors.toMap(tid -> tid, tid -> Optional.empty()));
return Mono.just(result);
});
}
}
| 1,552 |
0 | Create_ds/edda-client/edda-client/src/test/java/com/netflix | Create_ds/edda-client/edda-client/src/test/java/com/netflix/edda/Ec2ClientTests.java | /*
* Copyright 2014-2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda;
import java.util.Map;
import java.util.HashMap;
import static org.junit.Assert.*;
import com.netflix.archaius.api.Config;
import com.netflix.archaius.api.PropertyFactory;
import com.netflix.archaius.config.EmptyConfig;
import org.junit.BeforeClass;
import org.junit.AfterClass;
import org.junit.Test;
import io.netty.buffer.ByteBuf;
import io.reactivex.netty.RxNetty;
import io.reactivex.netty.protocol.http.server.HttpServer;
import io.reactivex.netty.protocol.http.server.file.ClassPathFileRequestHandler;
import com.amazonaws.services.ec2.AmazonEC2;
import com.amazonaws.services.ec2.model.*;
import com.netflix.archaius.DefaultPropertyFactory;
import com.netflix.iep.config.DynamicPropertiesConfiguration;
import com.netflix.iep.config.TestResourceConfiguration;
import com.netflix.iep.http.RxHttp;
public class Ec2ClientTests {
private static HttpServer<ByteBuf, ByteBuf> server;
private static EddaContext eddaContext = new EddaContext(new RxHttp(EmptyConfig.INSTANCE, null));
private static DynamicPropertiesConfiguration config = null;
@BeforeClass
public static void setUp() throws Exception {
server = RxNetty.createHttpServer(0, new ClassPathFileRequestHandler(".")).start();
final String userDir = System.getProperty("user.dir");
Map<String,String> subs = new HashMap<String,String>() {{
put("user.dir", userDir);
put("resources.url", "http://localhost:" + server.getServerPort());
}};
Config cfg = TestResourceConfiguration.load("edda.test.properties", subs);
PropertyFactory factory = new DefaultPropertyFactory(cfg);
config = new DynamicPropertiesConfiguration(factory);
//config.init();
}
@AfterClass
public static void tearDown() throws Exception {
config.destroy();
}
@Test
public void describeSubnets() {
AmazonEC2 client = AwsClientFactory.newEc2Client();
DescribeSubnetsResult res = client.describeSubnets();
assertEquals("size", res.getSubnets().size(), 8);
String id = "subnet-30ef1559";
res = client.describeSubnets(new DescribeSubnetsRequest().withSubnetIds(id));
assertEquals("size", res.getSubnets().size(), 1);
assertEquals("id", res.getSubnets().get(0).getSubnetId(), id);
String id2 = "subnet-0962c560";
res = client.describeSubnets(new DescribeSubnetsRequest().withSubnetIds(id, id2));
assertEquals("size", res.getSubnets().size(), 2);
assertEquals("id1", res.getSubnets().get(0).getSubnetId(), id);
assertEquals("id2", res.getSubnets().get(1).getSubnetId(), id2);
}
}
| 1,553 |
0 | Create_ds/edda-client/edda-client/src/main/java/com/netflix | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda/EddaElasticLoadBalancingClient.java | /*
* Copyright 2014-2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda;
import java.io.IOException;
import java.util.List;
import java.util.ArrayList;
import com.fasterxml.jackson.core.type.TypeReference;
import com.amazonaws.AmazonClientException;
import com.amazonaws.services.elasticloadbalancing.AmazonElasticLoadBalancing;
import com.amazonaws.services.elasticloadbalancing.model.*;
import com.netflix.edda.mapper.InstanceStateView;
import com.netflix.edda.mapper.LoadBalancerAttributesView;
public class EddaElasticLoadBalancingClient extends EddaAwsClient {
public EddaElasticLoadBalancingClient(AwsConfiguration config, String vip, String region) {
super(config, vip, region);
}
public AmazonElasticLoadBalancing readOnly() {
return readOnly(AmazonElasticLoadBalancing.class);
}
public AmazonElasticLoadBalancing wrapAwsClient(AmazonElasticLoadBalancing delegate) {
return wrapAwsClient(AmazonElasticLoadBalancing.class, delegate);
}
public DescribeInstanceHealthResult describeInstanceHealth(DescribeInstanceHealthRequest request) {
validateNotEmpty("LoadBalancerName", request.getLoadBalancerName());
TypeReference<InstanceStateView> ref = new TypeReference<InstanceStateView>() {};
String loadBalancerName = request.getLoadBalancerName();
String url = config.url() + "/api/v2/view/loadBalancerInstances/"+loadBalancerName+";_expand";
try {
InstanceStateView instanceStateView = parse(ref, doGet(url));
List<InstanceState> instanceStates = instanceStateView.getInstances();
List<Instance> instances = request.getInstances();
List<String> ids = new ArrayList<String>();
if (instances != null) {
for (Instance i : instances)
ids.add(i.getInstanceId());
}
if (shouldFilter(ids)) {
List<InstanceState> iss = new ArrayList<InstanceState>();
for (InstanceState is : instanceStates) {
if (matches(ids, is.getInstanceId()))
iss.add(is);
}
instanceStates = iss;
}
return new DescribeInstanceHealthResult()
.withInstanceStates(instanceStateView.getInstances());
}
catch (IOException e) {
throw new AmazonClientException("Faled to parse " + url, e);
}
}
public DescribeLoadBalancersResult describeLoadBalancers() {
return describeLoadBalancers(new DescribeLoadBalancersRequest());
}
public DescribeLoadBalancersResult describeLoadBalancers(DescribeLoadBalancersRequest request) {
TypeReference<List<LoadBalancerDescription>> ref = new TypeReference<List<LoadBalancerDescription>>() {};
String url = config.url() + "/api/v2/aws/loadBalancers;_expand";
try {
List<LoadBalancerDescription> loadBalancerDescriptions = parse(ref, doGet(url));
List<String> names = request.getLoadBalancerNames();
if (shouldFilter(names)) {
List<LoadBalancerDescription> lbs = new ArrayList<LoadBalancerDescription>();
for (LoadBalancerDescription lb : loadBalancerDescriptions) {
if (matches(names, lb.getLoadBalancerName()))
lbs.add(lb);
}
loadBalancerDescriptions = lbs;
}
return new DescribeLoadBalancersResult()
.withLoadBalancerDescriptions(loadBalancerDescriptions);
}
catch (IOException e) {
throw new AmazonClientException("Faled to parse " + url, e);
}
}
public DescribeLoadBalancerAttributesResult describeLoadBalancerAttributes(DescribeLoadBalancerAttributesRequest request) {
validateNotEmpty("LoadBalancerName", request.getLoadBalancerName());
TypeReference<LoadBalancerAttributesView> ref = new TypeReference<LoadBalancerAttributesView>() {};
String loadBalancerName = request.getLoadBalancerName();
String url = config.url() + "/api/v2/view/loadBalancerAttributes/"+loadBalancerName+";_expand";
try {
LoadBalancerAttributesView loadBalancerAttributesView = parse(ref, doGet(url));
return new DescribeLoadBalancerAttributesResult()
.withLoadBalancerAttributes(loadBalancerAttributesView.getAttributes());
}
catch (IOException e) {
throw new AmazonClientException("Faled to parse " + url, e);
}
}
}
| 1,554 |
0 | Create_ds/edda-client/edda-client/src/main/java/com/netflix | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda/AwsConfiguration.java | /*
* Copyright 2014-2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda;
import com.netflix.iep.config.IConfiguration;
import com.netflix.iep.config.DefaultValue;
import org.joda.time.Duration;
public interface AwsConfiguration extends IConfiguration {
/** Should we mock dependencies? */
@DefaultValue("false")
public boolean useMock();
/** Should we wrap a real AWS client capable of accessing AWS apis directly */
@DefaultValue("false")
public boolean wrapAwsClient();
/**
* Should we attempt to use edda for reads? This is only supported for the EC2 api and will be
* ignored for all others.
*/
@DefaultValue("true")
public boolean useEdda();
@DefaultValue("vip://edda-client:${vip}")
public String url();
/////////////////////////////////////////////////////////////////////////////
// Settings below are used to setup amazon ClientConfiguration object
@DefaultValue("PT10S")
public Duration connectionTimeout();
@DefaultValue("200")
public int maxConnections();
@DefaultValue("2")
public int maxErrorRetry();
@DefaultValue("PT60S")
public Duration socketTimeout();
}
| 1,555 |
0 | Create_ds/edda-client/edda-client/src/main/java/com/netflix | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda/EddaRoute53Client.java | /*
* Copyright 2014-2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda;
import java.io.IOException;
import java.util.List;
import com.fasterxml.jackson.core.type.TypeReference;
import com.amazonaws.AmazonClientException;
import com.amazonaws.services.route53.AmazonRoute53;
import com.amazonaws.services.route53.model.*;
public class EddaRoute53Client extends EddaAwsClient {
public EddaRoute53Client(AwsConfiguration config, String vip, String region) {
super(config, vip, region);
}
public AmazonRoute53 readOnly() {
return readOnly(AmazonRoute53.class);
}
public AmazonRoute53 wrapAwsClient(AmazonRoute53 delegate) {
return wrapAwsClient(AmazonRoute53.class, delegate);
}
public ListHostedZonesResult listHostedZones() {
return listHostedZones(new ListHostedZonesRequest());
}
public ListHostedZonesResult listHostedZones(ListHostedZonesRequest request) {
TypeReference<List<HostedZone>> ref = new TypeReference<List<HostedZone>>() {};
String url = config.url() + "/api/v2/aws/hostedZones;_expand";
try {
List<HostedZone> hostedZones = parse(ref, doGet(url));
return new ListHostedZonesResult()
.withHostedZones(hostedZones);
}
catch (IOException e) {
throw new AmazonClientException("Faled to parse " + url, e);
}
}
public ListResourceRecordSetsResult listResourceRecordSets(ListResourceRecordSetsRequest request) {
validateNotEmpty("HostedZoneId", request.getHostedZoneId());
TypeReference<List<ResourceRecordSet>> ref = new TypeReference<List<ResourceRecordSet>>() {};
String hostedZoneId = request.getHostedZoneId();
String url = config.url() + "/api/v2/aws/hostedRecords;_expand;zone.id=" + hostedZoneId;
try {
List<ResourceRecordSet> resourceRecordSets = parse(ref, doGet(url));
return new ListResourceRecordSetsResult()
.withResourceRecordSets(resourceRecordSets);
}
catch (IOException e) {
throw new AmazonClientException("Faled to parse " + url, e);
}
}
}
| 1,556 |
0 | Create_ds/edda-client/edda-client/src/main/java/com/netflix | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda/EddaCloudWatchClient.java | /*
* Copyright 2014-2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda;
import java.io.IOException;
import java.util.List;
import java.util.ArrayList;
import com.fasterxml.jackson.core.type.TypeReference;
import com.amazonaws.AmazonClientException;
import com.amazonaws.services.cloudwatch.AmazonCloudWatch;
import com.amazonaws.services.cloudwatch.model.*;
public class EddaCloudWatchClient extends EddaAwsClient {
public EddaCloudWatchClient(AwsConfiguration config, String vip, String region) {
super(config, vip, region);
}
public AmazonCloudWatch readOnly() {
return readOnly(AmazonCloudWatch.class);
}
public AmazonCloudWatch wrapAwsClient(AmazonCloudWatch delegate) {
return wrapAwsClient(AmazonCloudWatch.class, delegate);
}
public DescribeAlarmsResult describeAlarms() {
return describeAlarms(new DescribeAlarmsRequest());
}
public DescribeAlarmsResult describeAlarms(DescribeAlarmsRequest request) {
validateEmpty("ActionPrefix", request.getActionPrefix());
validateEmpty("AlarmNamePrefix", request.getAlarmNamePrefix());
TypeReference<List<MetricAlarm>> ref = new TypeReference<List<MetricAlarm>>() {};
String url = config.url() + "/api/v2/aws/alarms;_expand";
try {
List<MetricAlarm> metricAlarms = parse(ref, doGet(url));
List<String> names = request.getAlarmNames();
String state = request.getStateValue();
if (shouldFilter(names) || shouldFilter(state)) {
List<MetricAlarm> mas = new ArrayList<MetricAlarm>();
for (MetricAlarm ma : metricAlarms) {
if (matches(names, ma.getAlarmName()) && matches(state, ma.getStateValue()))
mas.add(ma);
}
metricAlarms = mas;
}
return new DescribeAlarmsResult()
.withMetricAlarms(metricAlarms);
}
catch (IOException e) {
throw new AmazonClientException("Faled to parse " + url, e);
}
}
}
| 1,557 |
0 | Create_ds/edda-client/edda-client/src/main/java/com/netflix | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda/EddaAutoScalingClient.java | /*
* Copyright 2014-2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda;
import java.io.IOException;
import java.util.List;
import java.util.ArrayList;
import com.fasterxml.jackson.core.type.TypeReference;
import com.amazonaws.AmazonClientException;
import com.amazonaws.services.autoscaling.AmazonAutoScaling;
import com.amazonaws.services.autoscaling.model.*;
public class EddaAutoScalingClient extends EddaAwsClient {
public EddaAutoScalingClient(AwsConfiguration config, String vip, String region) {
super(config, vip, region);
}
public AmazonAutoScaling readOnly() {
return readOnly(AmazonAutoScaling.class);
}
public AmazonAutoScaling wrapAwsClient(AmazonAutoScaling delegate) {
return wrapAwsClient(AmazonAutoScaling.class, delegate);
}
public DescribeAutoScalingGroupsResult describeAutoScalingGroups() {
return describeAutoScalingGroups(new DescribeAutoScalingGroupsRequest());
}
public DescribeAutoScalingGroupsResult describeAutoScalingGroups(DescribeAutoScalingGroupsRequest request) {
TypeReference<List<AutoScalingGroup>> ref = new TypeReference<List<AutoScalingGroup>>() {};
String url = config.url() + "/api/v2/aws/autoScalingGroups;_expand";
try {
List<AutoScalingGroup> autoScalingGroups = parse(ref, doGet(url));
List<String> names = request.getAutoScalingGroupNames();
if (shouldFilter(names)) {
List<AutoScalingGroup> asgs = new ArrayList<AutoScalingGroup>();
for (AutoScalingGroup asg : autoScalingGroups) {
if (matches(names, asg.getAutoScalingGroupName()))
asgs.add(asg);
}
autoScalingGroups = asgs;
}
return new DescribeAutoScalingGroupsResult()
.withAutoScalingGroups(autoScalingGroups);
}
catch (IOException e) {
throw new AmazonClientException("Faled to parse " + url, e);
}
}
public DescribeLaunchConfigurationsResult describeLaunchConfigurations() {
return describeLaunchConfigurations(new DescribeLaunchConfigurationsRequest());
}
public DescribeLaunchConfigurationsResult describeLaunchConfigurations(DescribeLaunchConfigurationsRequest request) {
TypeReference<List<LaunchConfiguration>> ref = new TypeReference<List<LaunchConfiguration>>() {};
String url = config.url() + "/api/v2/aws/launchConfigurations;_expand";
try {
List<LaunchConfiguration> launchConfigurations = parse(ref, doGet(url));
List<String> names = request.getLaunchConfigurationNames();
if (shouldFilter(names)) {
List<LaunchConfiguration> lcs = new ArrayList<LaunchConfiguration>();
for (LaunchConfiguration lc : launchConfigurations) {
if (matches(names, lc.getLaunchConfigurationName()))
lcs.add(lc);
}
launchConfigurations = lcs;
}
return new DescribeLaunchConfigurationsResult()
.withLaunchConfigurations(launchConfigurations);
}
catch (IOException e) {
throw new AmazonClientException("Faled to parse " + url, e);
}
}
public DescribePoliciesResult describePolicies() {
return describePolicies(new DescribePoliciesRequest());
}
public DescribePoliciesResult describePolicies(DescribePoliciesRequest request) {
TypeReference<List<ScalingPolicy>> ref = new TypeReference<List<ScalingPolicy>>() {};
String url = config.url() + "/api/v2/aws/scalingPolicies;_expand";
try {
List<ScalingPolicy> scalingPolicies = parse(ref, doGet(url));
String asg = request.getAutoScalingGroupName();
List<String> names = request.getPolicyNames();
if (shouldFilter(asg) || shouldFilter(names)) {
List<ScalingPolicy> sps = new ArrayList<ScalingPolicy>();
for (ScalingPolicy sp : scalingPolicies) {
if (matches(asg, sp.getAutoScalingGroupName()) && matches(names, sp.getPolicyName()))
sps.add(sp);
}
scalingPolicies = sps;
}
return new DescribePoliciesResult()
.withScalingPolicies(scalingPolicies);
}
catch (IOException e) {
throw new AmazonClientException("Faled to parse " + url, e);
}
}
}
| 1,558 |
0 | Create_ds/edda-client/edda-client/src/main/java/com/netflix | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda/EddaAwsClient.java | /*
* Copyright 2014-2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.TimeUnit;
import com.fasterxml.jackson.core.type.TypeReference;
import com.amazonaws.AmazonServiceException;
import com.netflix.edda.util.ProxyHelper;
abstract public class EddaAwsClient {
final AwsConfiguration config;
final String vip;
final String region;
public EddaAwsClient(AwsConfiguration config, String vip, String region) {
this.config = config;
this.vip = vip;
this.region = region;
}
public void shutdown() {}
protected <T> T readOnly(Class<T> c) {
return ProxyHelper.unsupported(c, this);
}
protected <T> T wrapAwsClient(Class<T> c, T delegate) {
return ProxyHelper.wrapper(c, delegate, this);
}
protected byte[] doGet(final String uri) {
try {
return EddaContext.getContext().getRxHttp().get(mkUrl(uri))
.flatMap(response -> {
if (response.getStatus().code() != 200) {
AmazonServiceException e = new AmazonServiceException("Failed to fetch " + uri);
e.setStatusCode(response.getStatus().code());
e.setErrorCode("Edda");
e.setRequestId(uri);
return rx.Observable.error(e);
}
return response.getContent()
.reduce(
new ByteArrayOutputStream(),
(out, bb) -> {
try { bb.readBytes(out, bb.readableBytes()); }
catch (IOException e) { throw new RuntimeException(e); }
return out;
}
)
.map(out -> {
return out.toByteArray();
});
})
.toBlocking()
.toFuture()
.get(2, TimeUnit.MINUTES);
}
catch (Exception e) {
throw new RuntimeException("failed to get url: " + uri, e);
}
}
protected String mkUrl(String url) {
return url.replaceAll("\\$\\{vip\\}", vip).replaceAll("\\$\\{region\\}", region);
}
protected <T> T parse(TypeReference<T> ref, byte[] body) throws IOException {
return JsonHelper.createParser(new ByteArrayInputStream(body)).readValueAs(ref);
}
protected void validateEmpty(String name, String s) {
if (s != null && s.length() > 0)
throw new UnsupportedOperationException(name + " not supported");
}
protected void validateNotEmpty(String name, String s) {
if (s == null || s.length() == 0)
throw new UnsupportedOperationException(name + " required");
}
protected void validateEmpty(String name, Boolean b) {
if (b != null)
throw new UnsupportedOperationException(name + " not supported");
}
protected <T> void validateEmpty(String name, List<T> list) {
if (list != null && list.size() > 0)
throw new UnsupportedOperationException(name + " not supported");
}
protected boolean shouldFilter(String s) {
return (s != null && s.length() > 0);
}
protected boolean shouldFilter(List<String> list) {
return (list != null && list.size() > 0);
}
protected boolean matches(String s, String v) {
return !shouldFilter(s) || s.equals(v);
}
protected boolean matches(List<String> list, String v) {
return !shouldFilter(list) || list.contains(v);
}
}
| 1,559 |
0 | Create_ds/edda-client/edda-client/src/main/java/com/netflix | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda/AwsException.java | /*
* Copyright 2014-2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda;
import com.amazonaws.AmazonServiceException;
public class AwsException {
private AwsException() {}
private final static String oneTrueId = "04d05938-1521-44f1-a0dd-39263a5326f3";
public static void raise(
int code,
String svc,
String reqId,
String error,
String msg
) {
StringBuffer buf = new StringBuffer()
.append("Status Code: ").append(code)
.append(", AWS Service: ").append(svc)
.append(", AWS Request ID: ").append(reqId)
.append(", AWS Error Code: ").append(error)
.append(", AWS Error Message:").append(msg);
AmazonServiceException e = new AmazonServiceException(buf.toString());
e.setStatusCode(code);
e.setServiceName(svc);
e.setRequestId(reqId);
e.setErrorCode(error);
throw e;
}
public static void raise(String svc, String error) {
if (error.equals("AccessDenied"))
raise(403, svc, oneTrueId, error, error);
if (error.equals("AuthFailure"))
raise(401, svc, oneTrueId, error, error);
if (error.equals("InternalError"))
raise(500, svc, oneTrueId, error, error);
if (error.equals("InvalidParameterValue"))
raise(400, svc, oneTrueId, error, error);
if (error.equals("RequestThrottled"))
raise(403, svc, oneTrueId, error, error);
if (error.equals("ServiceUnavailable"))
raise(503, svc, oneTrueId, error, error);
raise(400, svc, oneTrueId, error, error);
}
}
| 1,560 |
0 | Create_ds/edda-client/edda-client/src/main/java/com/netflix | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda/EddaContext.java | /*
* Copyright 2014-2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda;
import java.util.concurrent.atomic.AtomicReference;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.config.ConfigurationManager;
import com.netflix.iep.http.RxHttp;
@Singleton
public class EddaContext {
private static final Logger LOGGER = LoggerFactory.getLogger(EddaContext.class);
private static final String ENABLED_PROP = "edda-client.nflx.enabled";
private static final String CONFIG_FILE = "edda-client.properties";
public class EddaContextInstance {
private final RxHttp rxHttp;
protected EddaContextInstance(RxHttp rxHttp) {
this.rxHttp = rxHttp;
try {
if (ConfigurationManager.getConfigInstance().getBoolean(ENABLED_PROP, true)) {
LOGGER.debug("loading properties: " + CONFIG_FILE);
ConfigurationManager.loadPropertiesFromResources(CONFIG_FILE);
}
else {
LOGGER.debug("context not enabled, set " + ENABLED_PROP + "=true to enable");
}
}
catch (java.io.IOException e) {
LOGGER.debug("context creation failed", e);
throw new RuntimeException(e);
}
}
public RxHttp getRxHttp() {
return rxHttp;
}
}
private static final AtomicReference<EddaContextInstance> CONTEXT =
new AtomicReference<EddaContextInstance>(null);
protected static EddaContextInstance getContext() {
EddaContextInstance ctx = CONTEXT.get();
if (ctx == null) throw new IllegalStateException("EddaContext not initialized");
return ctx;
}
@Inject
public EddaContext(RxHttp rxHttp) {
CONTEXT.set(new EddaContextInstance(rxHttp));
}
}
| 1,561 |
0 | Create_ds/edda-client/edda-client/src/main/java/com/netflix | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda/AwsClientFactory.java | /*
* Copyright 2014-2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda;
import java.util.concurrent.atomic.AtomicReference;
import com.amazonaws.AmazonWebServiceRequest;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.regions.Regions;
import com.amazonaws.retry.RetryPolicy;
import com.amazonaws.services.autoscaling.AmazonAutoScaling;
import com.amazonaws.services.autoscaling.AmazonAutoScalingClient;
import com.amazonaws.services.cloudwatch.AmazonCloudWatch;
import com.amazonaws.services.cloudwatch.AmazonCloudWatchClient;
import com.amazonaws.services.ec2.AmazonEC2;
import com.amazonaws.services.ec2.AmazonEC2Client;
import com.amazonaws.services.elasticloadbalancing.AmazonElasticLoadBalancing;
import com.amazonaws.services.elasticloadbalancing.AmazonElasticLoadBalancingClient;
import com.amazonaws.services.route53.AmazonRoute53;
import com.amazonaws.services.route53.AmazonRoute53Client;
import com.netflix.iep.config.Configuration;
import com.netflix.iep.NetflixEnvironment;
public class AwsClientFactory {
private AwsClientFactory() {}
private static AwsConfiguration config() {
return Configuration.newProxy(AwsConfiguration.class, "netflix.edda.aws");
}
private static final AtomicReference<AWSCredentialsProvider> DEFAULT_PROVIDER =
new AtomicReference<AWSCredentialsProvider>(new DefaultAWSCredentialsProviderChain());
private static final AtomicReference<String> DEFAULT_VIP =
new AtomicReference<String>("edda-main:7001");
public static void setDefaultCredentialsProvider(AWSCredentialsProvider p) {
DEFAULT_PROVIDER.set(p);
}
public static void setDefaultVip(String vip) {
DEFAULT_VIP.set(vip);
}
private static ClientConfiguration clientConfig(AwsConfiguration config) {
return new ClientConfiguration()
.withConnectionTimeout((int) config.connectionTimeout().getMillis())
.withMaxConnections(config.maxConnections())
.withMaxErrorRetry(config.maxErrorRetry())
.withSocketTimeout((int) config.socketTimeout().getMillis())
.withRetryPolicy(
new RetryPolicy(
new RetryPolicy.RetryCondition() {
private final int maxRetries = config.maxErrorRetry();
@Override public boolean shouldRetry(
AmazonWebServiceRequest r, AmazonClientException e, int retriesAttempted
) {
if (e instanceof AmazonServiceException) {
int code = ((AmazonServiceException) e).getStatusCode();
if (!(code % 100 == 5 || code == 400 || code == 403 || code == 429)) return false;
}
return retriesAttempted < maxRetries;
}
},
new RetryPolicy.BackoffStrategy() {
@Override public long delayBeforeNextRetry(
AmazonWebServiceRequest r, AmazonClientException e, int retriesAttempted
) { return retriesAttempted * 1000L; }
},
config.maxErrorRetry(),
true
)
);
}
public static AmazonAutoScaling newAutoScalingClient() {
return newAutoScalingClient(DEFAULT_PROVIDER.get(), DEFAULT_VIP.get());
}
public static AmazonAutoScaling newAutoScalingClient(AWSCredentialsProvider provider, String vip) {
AwsConfiguration config = config();
return newAutoScalingClient(config, provider, vip, NetflixEnvironment.region());
}
public static AmazonAutoScaling newAutoScalingClient(
AwsConfiguration config,
AWSCredentialsProvider provider,
String vip,
String region
) {
if (config.useMock())
throw new UnsupportedOperationException("AutoScaling mock not yet supported");
EddaAutoScalingClient edda = new EddaAutoScalingClient(config, vip, region);
if (config.useEdda() && !config.wrapAwsClient()) return edda.readOnly();
AmazonAutoScaling client = AmazonAutoScalingClient.builder()
.withCredentials(provider)
.withClientConfiguration(clientConfig(config))
.withRegion(region)
.build();
if (config.useEdda())
client = edda.wrapAwsClient(client);
return client;
}
public static AmazonCloudWatch newCloudWatchClient() {
return newCloudWatchClient(DEFAULT_PROVIDER.get(), DEFAULT_VIP.get());
}
public static AmazonCloudWatch newCloudWatchClient(AWSCredentialsProvider provider, String vip) {
AwsConfiguration config = config();
return newCloudWatchClient(config, provider, vip, NetflixEnvironment.region());
}
public static AmazonCloudWatch newCloudWatchClient(
AwsConfiguration config,
AWSCredentialsProvider provider,
String vip,
String region
) {
if (config.useMock())
throw new UnsupportedOperationException("CloudWatch mock not yet supported");
EddaCloudWatchClient edda = new EddaCloudWatchClient(config, vip, region);
if (config.useEdda() && !config.wrapAwsClient()) return edda.readOnly();
AmazonCloudWatch client = AmazonCloudWatchClient.builder()
.withCredentials(provider)
.withClientConfiguration(clientConfig(config))
.withRegion(region)
.build();
if (config.useEdda())
client = edda.wrapAwsClient(client);
return client;
}
public static AmazonEC2 newEc2Client() {
return newEc2Client(DEFAULT_PROVIDER.get(), DEFAULT_VIP.get());
}
public static AmazonEC2 newEc2Client(AWSCredentialsProvider provider, String vip) {
AwsConfiguration config = config();
return newEc2Client(config, provider, vip, NetflixEnvironment.region());
}
public static AmazonEC2 newEc2Client(
AwsConfiguration config,
AWSCredentialsProvider provider,
String vip,
String region
) {
if (config.useMock())
throw new UnsupportedOperationException("EC2 mock not yet supported");
EddaEc2Client edda = new EddaEc2Client(config, vip, region);
if (config.useEdda() && !config.wrapAwsClient()) return edda.readOnly();
AmazonEC2 client = AmazonEC2Client.builder()
.withCredentials(provider)
.withClientConfiguration(clientConfig(config))
.withRegion(region)
.build();
if (config.useEdda())
client = edda.wrapAwsClient(client);
return client;
}
public static AmazonElasticLoadBalancing newElasticLoadBalancingClient() {
return newElasticLoadBalancingClient(DEFAULT_PROVIDER.get(), DEFAULT_VIP.get());
}
public static AmazonElasticLoadBalancing newElasticLoadBalancingClient(
AWSCredentialsProvider provider,
String vip
) {
AwsConfiguration config = config();
return newElasticLoadBalancingClient(config, provider, vip, NetflixEnvironment.region());
}
public static AmazonElasticLoadBalancing newElasticLoadBalancingClient(
AwsConfiguration config
) {
AWSCredentialsProvider provider = DEFAULT_PROVIDER.get();
String vip = DEFAULT_VIP.get();
return newElasticLoadBalancingClient(config, provider, vip, NetflixEnvironment.region());
}
public static AmazonElasticLoadBalancing newElasticLoadBalancingClient(
AwsConfiguration config,
AWSCredentialsProvider provider,
String vip,
String region
) {
if (config.useMock())
throw new UnsupportedOperationException("ElasticLoadBalancing mock not yet supported");
EddaElasticLoadBalancingClient edda = new EddaElasticLoadBalancingClient(config, vip, region);
if (config.useEdda() && !config.wrapAwsClient()) return edda.readOnly();
AmazonElasticLoadBalancing client = AmazonElasticLoadBalancingClient.builder()
.withCredentials(provider)
.withClientConfiguration(clientConfig(config))
.withRegion(region)
.build();
if (config.useEdda())
client = edda.wrapAwsClient(client);
return client;
}
public static AmazonRoute53 newRoute53Client() {
return newRoute53Client(DEFAULT_PROVIDER.get(), DEFAULT_VIP.get());
}
public static AmazonRoute53 newRoute53Client(AWSCredentialsProvider provider, String vip) {
AwsConfiguration config = config();
return newRoute53Client(config, provider, vip, NetflixEnvironment.region());
}
public static AmazonRoute53 newRoute53Client(
AwsConfiguration config,
AWSCredentialsProvider provider,
String vip,
String region
) {
if (config.useMock())
throw new UnsupportedOperationException("Route53 mock not yet supported");
EddaRoute53Client edda = new EddaRoute53Client(config, vip, region);
if (config.useEdda() && !config.wrapAwsClient()) return edda.readOnly();
AmazonRoute53 client = AmazonRoute53Client.builder()
.withCredentials(provider)
.withClientConfiguration(clientConfig(config))
.withRegion(Regions.US_EAST_1) // us-east-1 only and needs to be explicit
.build();
if (config.useEdda())
client = edda.wrapAwsClient(client);
return client;
}
}
| 1,562 |
0 | Create_ds/edda-client/edda-client/src/main/java/com/netflix | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda/JsonHelper.java | /*
* Copyright 2014-2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda;
import java.io.IOException;
import java.io.InputStream;
import java.io.Reader;
import java.io.StringReader;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.MappingJsonFactory;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.awsobjectmapper.AmazonObjectMapperConfigurer;
import com.netflix.edda.mapper.*;
public class JsonHelper {
private JsonHelper() {}
private static final ObjectMapper mapper;
private static final MappingJsonFactory factory;
static {
mapper = AmazonObjectMapperConfigurer.createConfigured()
.addMixIn(InstanceStateView.class, InstanceStateViewMixIn.class)
.addMixIn(LoadBalancerAttributesView.class, LoadBalancerAttributesViewMixIn.class);
factory = new MappingJsonFactory(mapper);
}
public static JsonParser createParser(InputStream input) throws IOException {
return factory.createParser(input);
}
public static JsonParser createParser(Reader input) throws IOException {
return factory.createParser(input);
}
public static <T> T decode(Class<T> c, InputStream input) throws IOException {
try {
TypeReference<T> ref = new TypeReference<T>() {};
return createParser(input).readValueAs(ref);
}
finally {
input.close();
}
}
public static <T> T decode(Class<T> c, Reader input) throws IOException {
try {
TypeReference<T> ref = new TypeReference<T>() {};
return createParser(input).readValueAs(ref);
}
finally {
input.close();
}
}
public static <T> T decode(Class<T> c, String json) throws IOException {
return decode(c, new StringReader(json));
}
}
| 1,563 |
0 | Create_ds/edda-client/edda-client/src/main/java/com/netflix | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda/EddaEc2Client.java | /*
* Copyright 2014-2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda;
import java.io.IOException;
import java.util.List;
import java.util.ArrayList;
import com.fasterxml.jackson.core.type.TypeReference;
import com.amazonaws.AmazonClientException;
import com.amazonaws.services.ec2.AmazonEC2;
import com.amazonaws.services.ec2.model.*;
public class EddaEc2Client extends EddaAwsClient {
public EddaEc2Client(AwsConfiguration config, String vip, String region) {
super(config, vip, region);
}
public AmazonEC2 readOnly() {
return readOnly(AmazonEC2.class);
}
public AmazonEC2 wrapAwsClient(AmazonEC2 delegate) {
return wrapAwsClient(AmazonEC2.class, delegate);
}
public DescribeClassicLinkInstancesResult describeClassicLinkInstances() {
return describeClassicLinkInstances(new DescribeClassicLinkInstancesRequest());
}
public DescribeClassicLinkInstancesResult describeClassicLinkInstances(DescribeClassicLinkInstancesRequest request) {
validateEmpty("Filter", request.getFilters());
TypeReference<List<ClassicLinkInstance>> ref = new TypeReference<List<ClassicLinkInstance>>() {};
String url = config.url() + "/api/v2/aws/classicLinkInstances;_expand";
try {
List<ClassicLinkInstance> instances = parse(ref, doGet(url));
List<String> ids = request.getInstanceIds();
if (shouldFilter(ids)) {
List<ClassicLinkInstance> is = new ArrayList<ClassicLinkInstance>();
for (ClassicLinkInstance i : instances) {
if (matches(ids, i.getInstanceId()))
is.add(i);
}
instances = is;
}
return new DescribeClassicLinkInstancesResult()
.withInstances(instances);
}
catch (IOException e) {
throw new AmazonClientException("Faled to parse " + url, e);
}
}
public DescribeImagesResult describeImages() {
return describeImages(new DescribeImagesRequest());
}
public DescribeImagesResult describeImages(DescribeImagesRequest request) {
validateEmpty("ExecutableUsers", request.getExecutableUsers());
List<Filter> filters = request.getFilters();
String path = "aws/images";
if (filters != null && filters.size() > 0) {
if (
filters.size() == 1 &&
filters.get(0) != null &&
"is-public".equals(filters.get(0).getName()) &&
filters.get(0).getValues() != null &&
filters.get(0).getValues().size() == 1 &&
"false".equals(filters.get(0).getValues().get(0))
) {
path = "view/images";
}
else {
throw new UnsupportedOperationException("filters only support is-public=false");
}
}
TypeReference<List<Image>> ref = new TypeReference<List<Image>>() {};
String url = config.url() + "/api/v2/"+ path + ";_expand";
try {
List<Image> images = parse(ref, doGet(url));
List<String> owners = request.getOwners();
List<String> ids = request.getImageIds();
if (shouldFilter(owners) || shouldFilter(ids)) {
List<Image> is = new ArrayList<Image>();
for (Image i : images) {
if (matches(owners, i.getOwnerId()) && matches(ids, i.getImageId()))
is.add(i);
}
images = is;
}
return new DescribeImagesResult()
.withImages(images);
}
catch (IOException e) {
throw new AmazonClientException("Faled to parse " + url, e);
}
}
public DescribeInstancesResult describeInstances() {
return describeInstances(new DescribeInstancesRequest());
}
public DescribeInstancesResult describeInstances(DescribeInstancesRequest request) {
validateEmpty("Filter", request.getFilters());
TypeReference<List<Reservation>> ref = new TypeReference<List<Reservation>>() {};
String url = config.url() + "/api/v2/aws/instances;_expand";
try {
List<Reservation> reservations = parse(ref, doGet(url));
List<String> ids = request.getInstanceIds();
if (shouldFilter(ids)) {
List<Reservation> rs = new ArrayList<Reservation>();
for (Reservation r : reservations) {
List<Instance> is = new ArrayList<Instance>();
for (Instance i : r.getInstances()) {
if (matches(ids, i.getInstanceId()))
is.add(i);
}
if (is.size() > 0)
rs.add(r.withInstances(is));
}
reservations = rs;
}
return new DescribeInstancesResult()
.withReservations(reservations);
}
catch (IOException e) {
throw new AmazonClientException("Faled to parse " + url, e);
}
}
public DescribeReservedInstancesOfferingsResult describeReservedInstancesOfferings() {
return describeReservedInstancesOfferings(new DescribeReservedInstancesOfferingsRequest());
}
public DescribeReservedInstancesOfferingsResult describeReservedInstancesOfferings(DescribeReservedInstancesOfferingsRequest request) {
validateEmpty("Filter", request.getFilters());
validateEmpty("AvailabilityZone", request.getAvailabilityZone());
validateEmpty("IncludeMarketplace", request.getIncludeMarketplace());
validateEmpty("InstanceTenancy", request.getInstanceTenancy());
validateEmpty("InstanceType", request.getInstanceType());
validateEmpty("OfferingType", request.getOfferingType());
validateEmpty("ProductDescription", request.getProductDescription());
TypeReference<List<ReservedInstancesOffering>> ref = new TypeReference<List<ReservedInstancesOffering>>() {};
String url = config.url() + "/api/v2/aws/reservedInstancesOfferings;_expand";
try {
List<ReservedInstancesOffering> reservedInstancesOfferings = parse(ref, doGet(url));
List<String> ids = request.getReservedInstancesOfferingIds();
if (shouldFilter(ids)) {
List<ReservedInstancesOffering> rs = new ArrayList<ReservedInstancesOffering>();
for (ReservedInstancesOffering r : reservedInstancesOfferings) {
if (matches(ids, r.getReservedInstancesOfferingId()))
rs.add(r);
}
reservedInstancesOfferings = rs;
}
return new DescribeReservedInstancesOfferingsResult()
.withReservedInstancesOfferings(reservedInstancesOfferings);
}
catch (IOException e) {
throw new AmazonClientException("Faled to parse " + url, e);
}
}
public DescribeSecurityGroupsResult describeSecurityGroups() {
return describeSecurityGroups(new DescribeSecurityGroupsRequest());
}
public DescribeSecurityGroupsResult describeSecurityGroups(DescribeSecurityGroupsRequest request) {
validateEmpty("Filter", request.getFilters());
TypeReference<List<SecurityGroup>> ref = new TypeReference<List<SecurityGroup>>() {};
String url = config.url() + "/api/v2/aws/securityGroups;_expand";
try {
List<SecurityGroup> securityGroups = parse(ref, doGet(url));
List<String> names = request.getGroupNames();
List<String> ids = request.getGroupIds();
if (shouldFilter(names) || shouldFilter(ids)) {
List<SecurityGroup> sgs = new ArrayList<SecurityGroup>();
for (SecurityGroup sg : securityGroups) {
if (matches(names, sg.getGroupName()) && matches(ids, sg.getGroupId()))
sgs.add(sg);
}
securityGroups = sgs;
}
return new DescribeSecurityGroupsResult()
.withSecurityGroups(securityGroups);
}
catch (IOException e) {
throw new AmazonClientException("Faled to parse " + url, e);
}
}
public DescribeSubnetsResult describeSubnets() {
return describeSubnets(new DescribeSubnetsRequest());
}
public DescribeSubnetsResult describeSubnets(DescribeSubnetsRequest request) {
validateEmpty("Filter", request.getFilters());
TypeReference<List<Subnet>> ref = new TypeReference<List<Subnet>>() {};
String url = config.url() + "/api/v2/aws/subnets;_expand";
try {
List<Subnet> subnets = parse(ref, doGet(url));
List<String> ids = request.getSubnetIds();
if (shouldFilter(ids)) {
List<Subnet> ss = new ArrayList<Subnet>();
for (Subnet s : subnets) {
if (matches(ids, s.getSubnetId()))
ss.add(s);
}
subnets = ss;
}
return new DescribeSubnetsResult()
.withSubnets(subnets);
}
catch (IOException e) {
throw new AmazonClientException("Faled to parse " + url, e);
}
}
public DescribeVolumesResult describeVolumes() {
return describeVolumes(new DescribeVolumesRequest());
}
public DescribeVolumesResult describeVolumes(DescribeVolumesRequest request) {
validateEmpty("Filter", request.getFilters());
TypeReference<List<Volume>> ref = new TypeReference<List<Volume>>() {};
String url = config.url() + "/api/v2/aws/volumes;_expand";
try {
List<Volume> volumes = parse(ref, doGet(url));
List<String> ids = request.getVolumeIds();
if (shouldFilter(ids)) {
List<Volume> vs = new ArrayList<Volume>();
for (Volume v : volumes) {
if (matches(ids, v.getVolumeId()))
vs.add(v);
}
volumes = vs;
}
return new DescribeVolumesResult()
.withVolumes(volumes);
}
catch (IOException e) {
throw new AmazonClientException("Faled to parse " + url, e);
}
}
public DescribeVpcPeeringConnectionsResult describeVpcPeeringConnections() {
return describeVpcPeeringConnections(new DescribeVpcPeeringConnectionsRequest());
}
public DescribeVpcPeeringConnectionsResult describeVpcPeeringConnections(DescribeVpcPeeringConnectionsRequest request) {
validateEmpty("Filter", request.getFilters());
TypeReference<List<VpcPeeringConnection>> ref = new TypeReference<List<VpcPeeringConnection>>() {};
String url = config.url() + "/api/v2/aws/vpcPeeringConnections;_expand";
try {
List<VpcPeeringConnection> vpcs = parse(ref, doGet(url));
List<String> ids = request.getVpcPeeringConnectionIds();
if (shouldFilter(ids)) {
List<VpcPeeringConnection> vs = new ArrayList<VpcPeeringConnection>();
for (VpcPeeringConnection v : vpcs) {
if (matches(ids, v.getVpcPeeringConnectionId()))
vs.add(v);
}
vpcs = vs;
}
return new DescribeVpcPeeringConnectionsResult()
.withVpcPeeringConnections(vpcs);
}
catch (IOException e) {
throw new AmazonClientException("Faled to parse " + url, e);
}
}
public DescribeVpcsResult describeVpcs() {
return describeVpcs(new DescribeVpcsRequest());
}
public DescribeVpcsResult describeVpcs(DescribeVpcsRequest request) {
validateEmpty("Filter", request.getFilters());
TypeReference<List<Vpc>> ref = new TypeReference<List<Vpc>>() {};
String url = config.url() + "/api/v2/aws/vpcs;_expand";
try {
List<Vpc> vpcs = parse(ref, doGet(url));
List<String> ids = request.getVpcIds();
if (shouldFilter(ids)) {
List<Vpc> vs = new ArrayList<Vpc>();
for (Vpc v : vpcs) {
if (matches(ids, v.getVpcId()))
vs.add(v);
}
vpcs = vs;
}
return new DescribeVpcsResult()
.withVpcs(vpcs);
}
catch (IOException e) {
throw new AmazonClientException("Faled to parse " + url, e);
}
}
public DescribeVpcClassicLinkResult describeVpcClassicLink() {
return describeVpcClassicLink(new DescribeVpcClassicLinkRequest());
}
public DescribeVpcClassicLinkResult describeVpcClassicLink(DescribeVpcClassicLinkRequest request) {
validateEmpty("Filter", request.getFilters());
TypeReference<List<VpcClassicLink>> ref = new TypeReference<List<VpcClassicLink>>() {};
String url = config.url() + "/api/v2/aws/vpcClassicLinks;_expand";
try {
List<VpcClassicLink> vpcs = parse(ref, doGet(url));
List<String> ids = request.getVpcIds();
if (shouldFilter(ids)) {
List<VpcClassicLink> vs = new ArrayList<VpcClassicLink>();
for (VpcClassicLink v : vpcs) {
if (matches(ids, v.getVpcId()))
vs.add(v);
}
vpcs = vs;
}
return new DescribeVpcClassicLinkResult()
.withVpcs(vpcs);
}
catch (IOException e) {
throw new AmazonClientException("Faled to parse " + url, e);
}
}
}
| 1,564 |
0 | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda/util/Hash.java | /*
* Copyright 2014-2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda.util;
import java.math.BigInteger;
import java.io.UnsupportedEncodingException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
public class Hash {
private Hash() {}
public static BigInteger md5(String input) throws UnsupportedEncodingException {
return md5(input.getBytes("UTF-8"));
}
public static BigInteger md5(byte[] input) {
return computeHash("MD5", input);
}
public static BigInteger sha1(String input) throws UnsupportedEncodingException {
return sha1(input.getBytes("UTF-8"));
}
public static BigInteger sha1(byte[] input) {
return computeHash("SHA1", input);
}
private static BigInteger computeHash(String algorithm, byte[] bytes) {
try {
MessageDigest md = MessageDigest.getInstance(algorithm);
md.update(bytes);
return new BigInteger(1, md.digest());
}
catch (NoSuchAlgorithmException e) {
throw new RuntimeException("algorithm " + algorithm + " not found", e);
}
}
}
| 1,565 |
0 | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda/util/ProxyHelper.java | /*
* Copyright 2014-2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda.util;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
public class ProxyHelper {
private ProxyHelper() {}
private static Throwable findCause(Throwable e) {
Throwable t = e;
while (t != null && t.getClass().getName().startsWith("java.lang.reflect."))
t = t.getCause();
return (t == null) ? e : t;
}
@SuppressWarnings("unchecked")
public static <T> T wrapper(final Class<T> ctype, final T delegate, final Object overrides) {
InvocationHandler handler = new InvocationHandler() {
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
try {
Method m = overrides.getClass().getMethod(method.getName(), method.getParameterTypes());
return m.invoke(overrides, args);
}
catch(NoSuchMethodException e) {
try {
return method.invoke(delegate, args);
}
catch (Throwable t) {
throw findCause(t);
}
}
catch(Throwable t) {
throw findCause(t);
}
}
};
return (T) Proxy.newProxyInstance(ctype.getClassLoader(), new Class[]{ctype}, handler);
}
@SuppressWarnings("unchecked")
public static <T> T unsupported(final Class<T> ctype) {
InvocationHandler handler = new InvocationHandler() {
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
throw new UnsupportedOperationException(ctype.getName() + "." + method.getName());
}
};
return (T) Proxy.newProxyInstance(ctype.getClassLoader(), new Class[]{ctype}, handler);
}
@SuppressWarnings("unchecked")
public static <T> T unsupported(final Class<T> ctype, final Object overrides) {
InvocationHandler handler = new InvocationHandler() {
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
try {
Method m = overrides.getClass().getMethod(method.getName(), method.getParameterTypes());
return m.invoke(overrides, args);
}
catch(NoSuchMethodException e) {
throw new UnsupportedOperationException(ctype.getName() + "." + method.getName());
}
catch(Throwable t) {
throw findCause(t);
}
}
};
return (T) Proxy.newProxyInstance(ctype.getClassLoader(), new Class[]{ctype}, handler);
}
}
| 1,566 |
0 | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda/mapper/InstanceStateView.java | /*
* Copyright 2014-2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda.mapper;
import java.util.Collections;
import java.util.List;
import java.util.ArrayList;
import com.amazonaws.services.elasticloadbalancing.model.InstanceState;
public class InstanceStateView {
private String name = null;
private List<InstanceState> instances = new ArrayList<InstanceState>();
public String getName() {
return name;
}
public List<InstanceState> getInstances() {
return Collections.unmodifiableList(instances);
}
public void setName(String name) {
this.name = name;
}
public void setInstances(List<InstanceState> instances) {
this.instances = new ArrayList<InstanceState>(instances);
}
}
| 1,567 |
0 | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda/mapper/LoadBalancerAttributesView.java | /*
* Copyright 2014-2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda.mapper;
import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerAttributes;
public class LoadBalancerAttributesView {
private String name = null;
private LoadBalancerAttributes attributes = null;
public String getName() {
return name;
}
public LoadBalancerAttributes getAttributes() {
return attributes;
}
public void setName(String name) {
this.name = name;
}
public void setAttributes(LoadBalancerAttributes attributes) {
this.attributes = attributes;
}
}
| 1,568 |
0 | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda/mapper/InstanceStateViewMixIn.java | /*
* Copyright 2014-2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda.mapper;
public interface InstanceStateViewMixIn {
}
| 1,569 |
0 | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda | Create_ds/edda-client/edda-client/src/main/java/com/netflix/edda/mapper/LoadBalancerAttributesViewMixIn.java | /*
* Copyright 2014-2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda.mapper;
public interface LoadBalancerAttributesViewMixIn {
}
| 1,570 |
0 | Create_ds/edda-client/edda-client-module/src/test/java/com/netflix | Create_ds/edda-client/edda-client-module/src/test/java/com/netflix/edda/EddaModuleTest.java | /*
* Copyright 2014-2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.netflix.iep.eureka.EurekaModule;
import com.netflix.iep.rxnetty.RxNettyModule;
import com.netflix.iep.http.RxHttp;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
@RunWith(JUnit4.class)
public class EddaModuleTest {
@Test
public void module() throws java.io.IOException {
Injector injector = Guice.createInjector(
new EddaModule(),
new RxNettyModule(),
new EurekaModule()
);
Assert.assertNotNull(injector.getInstance(RxHttp.class));
Assert.assertNotNull(EddaContext.getContext().getRxHttp());
}
}
| 1,571 |
0 | Create_ds/edda-client/edda-client-module/src/main/java/com/netflix | Create_ds/edda-client/edda-client-module/src/main/java/com/netflix/edda/EddaModule.java | /*
* Copyright 2014-2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda;
import com.google.inject.AbstractModule;
import com.netflix.iep.rxnetty.RxNettyModule;
public final class EddaModule extends AbstractModule {
@Override protected void configure() {
install(new RxNettyModule());
bind(EddaContext.class).asEagerSingleton();
}
@Override public boolean equals(Object obj) {
return obj != null && getClass().equals(obj.getClass());
}
@Override public int hashCode() {
return getClass().hashCode();
}
}
| 1,572 |
0 | Create_ds/aws-codeguru-cli/test-data/source-and-class/src/org/owasp/benchmark | Create_ds/aws-codeguru-cli/test-data/source-and-class/src/org/owasp/benchmark/testcode/BenchmarkTest00001.java | //{fact rule=path-traversal@v1.0 defects=1}
/**
* OWASP Benchmark v1.2
*
* <p>This file is part of the Open Web Application Security Project (OWASP) Benchmark Project. For
* details, please see <a
* href="https://owasp.org/www-project-benchmark/">https://owasp.org/www-project-benchmark/</a>.
*
* <p>The OWASP Benchmark is free software: you can redistribute it and/or modify it under the terms
* of the GNU General Public License as published by the Free Software Foundation, version 2.
*
* <p>The OWASP Benchmark is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* @author Dave Wichers
* @created 2015
*/
package org.owasp.benchmark.testcode;
import java.io.IOException;
import javax.servlet.ServletException;
import javax.servlet.annotation.WebServlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
@WebServlet(value = "/pathtraver-00/BenchmarkTest00001")
public class BenchmarkTest00001 extends HttpServlet {
private static final long serialVersionUID = 1L;
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
response.setContentType("text/html;charset=UTF-8");
javax.servlet.http.Cookie userCookie =
new javax.servlet.http.Cookie("BenchmarkTest00001", "FileName");
userCookie.setMaxAge(60 * 3); // Store cookie for 3 minutes
userCookie.setSecure(true);
userCookie.setPath(request.getRequestURI());
userCookie.setDomain(new java.net.URL(request.getRequestURL().toString()).getHost());
response.addCookie(userCookie);
javax.servlet.RequestDispatcher rd =
request.getRequestDispatcher("/pathtraver-00/BenchmarkTest00001.html");
rd.include(request, response);
}
@Override
public void doPost(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
// some code
response.setContentType("text/html;charset=UTF-8");
javax.servlet.http.Cookie[] theCookies = request.getCookies();
String param = "noCookieValueSupplied";
if (theCookies != null) {
for (javax.servlet.http.Cookie theCookie : theCookies) {
if (theCookie.getName().equals("BenchmarkTest00001")) {
param = java.net.URLDecoder.decode(theCookie.getValue(), "UTF-8");
break;
}
}
}
String fileName = null;
java.io.FileInputStream fis = null;
try {
fileName = org.owasp.benchmark.helpers.Utils.TESTFILES_DIR + param;
fis = new java.io.FileInputStream(new java.io.File(fileName));
byte[] b = new byte[1000];
int size = fis.read(b);
response.getWriter()
.println(
"The beginning of file: '"
+ org.owasp.esapi.ESAPI.encoder().encodeForHTML(fileName)
+ "' is:\n\n"
+ org.owasp
.esapi
.ESAPI
.encoder()
.encodeForHTML(new String(b, 0, size)));
} catch (Exception e) {
System.out.println("Couldn't open FileInputStream on file: '" + fileName + "'");
response.getWriter()
.println(
"Problem getting FileInputStream: "
+ org.owasp
.esapi
.ESAPI
.encoder()
.encodeForHTML(e.getMessage()));
} finally {
if (fis != null) {
try {
fis.close();
fis = null;
} catch (Exception e) {
// we tried...
}
}
}
}
}
//{/fact}
| 1,573 |
0 | Create_ds/aws-codeguru-cli/src/test/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/test/java/com/amazonaws/gurureviewercli/util/ZipUtilsTest.java | package com.amazonaws.gurureviewercli.util;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.Comparator;
import java.util.Enumeration;
import java.util.HashSet;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
import lombok.val;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class ZipUtilsTest {
private Path workDir;
@BeforeEach
void beforeEach() throws IOException {
workDir = Files.createTempDirectory("zip-files");
}
@AfterEach
void afterEach() throws IOException {
Files.walk(workDir)
.sorted(Comparator.reverseOrder())
.map(Path::toFile)
.forEach(File::delete);
}
@Test
void test_packUnpack() throws IOException {
val testDir = Paths.get("test-data/fake-repo");
val zipName = workDir.resolve("test.zip").toString();
ZipUtils.pack(Arrays.asList(testDir), testDir, zipName);
try (ZipFile zipFile = new ZipFile(zipName)) {
Enumeration<? extends ZipEntry> entries = zipFile.entries();
val expectedFileNames =
new HashSet<String>(Arrays.asList("build-dir/should-not-be-included.txt",
"build-dir/lib/included.txt",
"should-not-be-included.txt"));
while (entries.hasMoreElements()) {
ZipEntry entry = entries.nextElement();
Assertions.assertFalse(entry.toString().contains("\\"), "Unexpected zip entry " + entry);
Assertions.assertTrue(expectedFileNames.contains(entry.toString()));
expectedFileNames.remove(entry.toString());
}
Assertions.assertTrue(expectedFileNames.isEmpty());
}
}
/*
If a aws-codeguru-reviewer.yml file is present, it has to be included in the zip file even if the root folder
is not mentioned in the list of source directories.
*/
@Test
void test_packUnpackWithConfig() throws IOException {
val testDir = Paths.get("test-data/fake-repo-with-config");
val zipName = workDir.resolve("test.zip").toString();
ZipUtils.pack(Arrays.asList(testDir.resolve("src-dir")), testDir, zipName);
try (ZipFile zipFile = new ZipFile(zipName)) {
Enumeration<? extends ZipEntry> entries = zipFile.entries();
val expectedFileNames =
new HashSet<String>(Arrays.asList("src-dir/included-src.txt",
"aws-codeguru-reviewer.yml"));
while (entries.hasMoreElements()) {
ZipEntry entry = entries.nextElement();
Assertions.assertFalse(entry.toString().contains("\\"), "Unexpected zip entry " + entry);
Assertions.assertTrue(expectedFileNames.contains(entry.toString()));
expectedFileNames.remove(entry.toString());
}
Assertions.assertTrue(expectedFileNames.isEmpty());
}
}
}
| 1,574 |
0 | Create_ds/aws-codeguru-cli/src/test/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/test/java/com/amazonaws/gurureviewercli/util/CodeInsightExportTest.java | package com.amazonaws.gurureviewercli.util;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.fasterxml.jackson.databind.json.JsonMapper;
import lombok.val;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import com.amazonaws.gurureviewercli.model.ScanMetaData;
import com.amazonaws.gurureviewercli.model.bitbucket.CodeInsightsAnnotation;
class CodeInsightExportTest {
private static final Path TEST_DIR = Paths.get("test-data");
private static final Path RECOMMENDATIONS_DIR = TEST_DIR.resolve("recommendations");
private static final JsonMapper JSON_MAPPER =
JsonMapper.builder()
.serializationInclusion(JsonInclude.Include.NON_ABSENT)
.disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS)
.disable(SerializationFeature.WRITE_DURATIONS_AS_TIMESTAMPS)
.disable(DeserializationFeature.ADJUST_DATES_TO_CONTEXT_TIME_ZONE)
.enable(DeserializationFeature.READ_UNKNOWN_ENUM_VALUES_USING_DEFAULT_VALUE)
.build();
@Test
void test_happyCase() throws Exception {
val recommendations =
JsonUtil.loadRecommendations(RECOMMENDATIONS_DIR.resolve("exclude01.json"));
val outDir = Files.createTempDirectory("test-output");
val scanMetaData = ScanMetaData.builder()
.associationArn("asdf")
.region("1234")
.build();
CodeInsightExport.report(recommendations, scanMetaData, outDir);
Assertions.assertTrue(outDir.resolve("report.json").toFile().isFile());
val annotations = JSON_MAPPER.readValue(outDir.resolve("annotations.json").toFile(),
new TypeReference<List<CodeInsightsAnnotation>>() {});
Assertions.assertEquals(recommendations.size(), annotations.size());
}
} | 1,575 |
0 | Create_ds/aws-codeguru-cli/src/test/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/test/java/com/amazonaws/gurureviewercli/util/RecommendationsFilterTest.java | package com.amazonaws.gurureviewercli.util;
import java.nio.file.Path;
import java.nio.file.Paths;
import lombok.val;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import com.amazonaws.gurureviewercli.model.configfile.CustomConfiguration;
class RecommendationsFilterTest {
private static final Path TEST_DIR = Paths.get("test-data");
private static final Path CONFIG_DIR = TEST_DIR.resolve("custom-configs");
private static final Path RECOMMENDATIONS_DIR = TEST_DIR.resolve("recommendations");
@Test
void test_filterRecommendations_byRecommendation() throws Exception {
val configFile = CustomConfiguration.load(CONFIG_DIR.resolve("exclude-recommendations.yml"));
val recommendations =
JsonUtil.loadRecommendations(RECOMMENDATIONS_DIR.resolve("exclude01.json"));
val output = RecommendationsFilter.filterRecommendations(recommendations, configFile);
Assertions.assertEquals(1, output.size());
}
@Test
void test_filterRecommendations_byTag() throws Exception {
val configFile = CustomConfiguration.load(CONFIG_DIR.resolve("exclude-tag.yml"));
val recommendations =
JsonUtil.loadRecommendations(RECOMMENDATIONS_DIR.resolve("exclude01.json"));
val output = RecommendationsFilter.filterRecommendations(recommendations, configFile);
Assertions.assertEquals(3, output.size());
}
@Test
void test_filterRecommendations_bySeverity() throws Exception {
val configFile = CustomConfiguration.load(CONFIG_DIR.resolve("exclude-severity.yml"));
val recommendations =
JsonUtil.loadRecommendations(RECOMMENDATIONS_DIR.resolve("exclude01.json"));
val output = RecommendationsFilter.filterRecommendations(recommendations, configFile);
Assertions.assertEquals(2, output.size());
}
@Test
void test_filterRecommendations_byId() throws Exception {
val configFile = CustomConfiguration.load(CONFIG_DIR.resolve("exclude-id.yml"));
val recommendations =
JsonUtil.loadRecommendations(RECOMMENDATIONS_DIR.resolve("exclude01.json"));
val output = RecommendationsFilter.filterRecommendations(recommendations, configFile);
Assertions.assertEquals(0, output.size());
}
} | 1,576 |
0 | Create_ds/aws-codeguru-cli/src/test/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/test/java/com/amazonaws/gurureviewercli/adapter/ScanAdapterTest.java | package com.amazonaws.gurureviewercli.adapter;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.List;
import lombok.val;
import org.junit.jupiter.api.Assumptions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import software.amazon.awssdk.services.codegurureviewer.CodeGuruReviewerClient;
import software.amazon.awssdk.services.codegurureviewer.model.CodeReview;
import software.amazon.awssdk.services.codegurureviewer.model.CreateCodeReviewRequest;
import software.amazon.awssdk.services.codegurureviewer.model.CreateCodeReviewResponse;
import software.amazon.awssdk.services.codegurureviewer.model.DescribeRepositoryAssociationRequest;
import software.amazon.awssdk.services.codegurureviewer.model.DescribeRepositoryAssociationResponse;
import software.amazon.awssdk.services.codegurureviewer.model.ListRepositoryAssociationsRequest;
import software.amazon.awssdk.services.codegurureviewer.model.ListRepositoryAssociationsResponse;
import software.amazon.awssdk.services.codegurureviewer.model.RepositoryAssociation;
import software.amazon.awssdk.services.codegurureviewer.model.RepositoryAssociationState;
import software.amazon.awssdk.services.codegurureviewer.model.RepositoryAssociationSummary;
import software.amazon.awssdk.services.codegurureviewer.model.S3RepositoryDetails;
import software.amazon.awssdk.services.s3.S3Client;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
import com.amazonaws.gurureviewercli.model.Configuration;
import com.amazonaws.gurureviewercli.model.GitMetaData;
@ExtendWith(MockitoExtension.class)
class ScanAdapterTest {
@Mock
private CodeGuruReviewerClient guruFrontendService;
@Mock
private S3Client s3client;
@Test
public void test_startScan_HappyCase() throws Exception {
// skip the test if the test container stripped to the top level .git folder
Assumptions.assumeTrue(Paths.get("./.git").toFile().isDirectory());
val fakeArn = "123";
val bucketName = "some-bucket";
val repoDetails = S3RepositoryDetails.builder().bucketName(bucketName).build();
val expected = RepositoryAssociation.builder().associationArn(fakeArn)
.s3RepositoryDetails(repoDetails)
.state(RepositoryAssociationState.ASSOCIATED)
.build();
val summary = RepositoryAssociationSummary.builder()
.associationArn(fakeArn)
.state(RepositoryAssociationState.ASSOCIATED)
.build();
val response = ListRepositoryAssociationsResponse.builder().repositoryAssociationSummaries(summary).build();
when(guruFrontendService.listRepositoryAssociations(any(ListRepositoryAssociationsRequest.class)))
.thenReturn(response);
val describeResponse = DescribeRepositoryAssociationResponse.builder().repositoryAssociation(expected).build();
when(guruFrontendService.describeRepositoryAssociation(any(DescribeRepositoryAssociationRequest.class)))
.thenReturn(describeResponse);
val review = CodeReview.builder().codeReviewArn(fakeArn).build();
val crResponse = CreateCodeReviewResponse.builder().codeReview(review).build();
when(guruFrontendService.createCodeReview(any(CreateCodeReviewRequest.class))).thenReturn(crResponse);
val config = Configuration.builder()
.guruFrontendService(guruFrontendService)
.s3Client(s3client)
.build();
val gitMetaData = GitMetaData.builder()
.repoRoot(Paths.get("./"))
.build();
val sourceDirs = Arrays.asList(Paths.get("src"));
List<Path> buildDirs = Arrays.asList();
ScanAdapter.startScan(config, gitMetaData, sourceDirs, buildDirs);
}
} | 1,577 |
0 | Create_ds/aws-codeguru-cli/src/test/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/test/java/com/amazonaws/gurureviewercli/adapter/AssociationAdapterTest.java | package com.amazonaws.gurureviewercli.adapter;
import java.util.Collections;
import lombok.val;
import org.beryx.textio.TextIO;
import org.beryx.textio.mock.MockTextTerminal;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import software.amazon.awssdk.services.codegurureviewer.CodeGuruReviewerClient;
import software.amazon.awssdk.services.codegurureviewer.model.AssociateRepositoryRequest;
import software.amazon.awssdk.services.codegurureviewer.model.AssociateRepositoryResponse;
import software.amazon.awssdk.services.codegurureviewer.model.DescribeRepositoryAssociationRequest;
import software.amazon.awssdk.services.codegurureviewer.model.DescribeRepositoryAssociationResponse;
import software.amazon.awssdk.services.codegurureviewer.model.ListRepositoryAssociationsRequest;
import software.amazon.awssdk.services.codegurureviewer.model.ListRepositoryAssociationsResponse;
import software.amazon.awssdk.services.codegurureviewer.model.RepositoryAssociation;
import software.amazon.awssdk.services.codegurureviewer.model.RepositoryAssociationState;
import software.amazon.awssdk.services.codegurureviewer.model.RepositoryAssociationSummary;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.HeadBucketRequest;
import software.amazon.awssdk.services.s3.model.HeadBucketResponse;
import software.amazon.awssdk.services.s3.model.NoSuchBucketException;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
import com.amazonaws.gurureviewercli.exceptions.GuruCliException;
import com.amazonaws.gurureviewercli.model.Configuration;
import com.amazonaws.gurureviewercli.model.ErrorCodes;
@ExtendWith(MockitoExtension.class)
class AssociationAdapterTest {
@Mock
private CodeGuruReviewerClient guruFrontendService;
@Mock
private S3Client s3client;
@Test
public void test_getAssociatedGuruRepo_associationExists() {
val fakeArn = "123";
val expected = RepositoryAssociation.builder()
.associationArn(fakeArn)
.state(RepositoryAssociationState.ASSOCIATED)
.build();
val summary = RepositoryAssociationSummary.builder()
.associationArn(fakeArn)
.state(RepositoryAssociationState.ASSOCIATED)
.build();
val response = ListRepositoryAssociationsResponse.builder().repositoryAssociationSummaries(summary).build();
when(guruFrontendService.listRepositoryAssociations(any(ListRepositoryAssociationsRequest.class)))
.thenReturn(response);
val describeResponse = DescribeRepositoryAssociationResponse.builder().repositoryAssociation(expected).build();
when(guruFrontendService.describeRepositoryAssociation(any(DescribeRepositoryAssociationRequest.class)))
.thenReturn(describeResponse);
val config = Configuration.builder()
.guruFrontendService(guruFrontendService)
.repoName("some-repo-name")
.build();
val association = AssociationAdapter.getAssociatedGuruRepo(config);
Assertions.assertEquals(expected.associationArn(), association.associationArn());
}
@Test
public void test_getAssociatedGuruRepo_createNewWithExistingBucket() {
val bucketName = "some-bucket";
val fakeArn = "123";
val expected = RepositoryAssociation.builder()
.associationArn(fakeArn)
.state(RepositoryAssociationState.ASSOCIATED)
.build();
val emptyListResponse =
ListRepositoryAssociationsResponse.builder()
.repositoryAssociationSummaries(Collections.emptyList())
.build();
when(guruFrontendService.listRepositoryAssociations(any(ListRepositoryAssociationsRequest.class)))
.thenReturn(emptyListResponse);
when(s3client.headBucket(any(HeadBucketRequest.class))).thenReturn(HeadBucketResponse.builder().build());
when(guruFrontendService.associateRepository(any(AssociateRepositoryRequest.class)))
.thenReturn(AssociateRepositoryResponse.builder().repositoryAssociation(expected).build());
when(guruFrontendService.describeRepositoryAssociation(any(DescribeRepositoryAssociationRequest.class)))
.thenReturn(DescribeRepositoryAssociationResponse.builder().repositoryAssociation(expected).build());
val config = Configuration.builder()
.guruFrontendService(guruFrontendService)
.interactiveMode(false)
.s3Client(s3client)
.repoName("some-repo-name")
.build();
val association = AssociationAdapter.getAssociatedGuruRepo(config);
Assertions.assertEquals(expected.associationArn(), association.associationArn());
}
@Test
public void test_getAssociatedGuruRepo_createNewWithCreateBucket() {
// Same test as test_getAssociatedGuruRepo_createNewWithExistingBucket since creating the bucket does not
// return anything
val bucketName = "some-bucket";
val fakeArn = "123";
val expected = RepositoryAssociation.builder()
.associationArn(fakeArn)
.state(RepositoryAssociationState.ASSOCIATED)
.build();
val emptyListResponse =
ListRepositoryAssociationsResponse.builder()
.repositoryAssociationSummaries(Collections.emptyList())
.build();
when(guruFrontendService.listRepositoryAssociations(any(ListRepositoryAssociationsRequest.class)))
.thenReturn(emptyListResponse);
when(s3client.headBucket(any(HeadBucketRequest.class))).thenThrow(NoSuchBucketException.class);
when(guruFrontendService.associateRepository(any(AssociateRepositoryRequest.class)))
.thenReturn(AssociateRepositoryResponse.builder().repositoryAssociation(expected).build());
when(guruFrontendService.describeRepositoryAssociation(any(DescribeRepositoryAssociationRequest.class)))
.thenReturn(DescribeRepositoryAssociationResponse.builder().repositoryAssociation(expected).build());
val config = Configuration.builder()
.guruFrontendService(guruFrontendService)
.interactiveMode(false)
.s3Client(s3client)
.repoName("some-repo-name")
.build();
val association = AssociationAdapter.getAssociatedGuruRepo(config);
Assertions.assertEquals(expected.associationArn(), association.associationArn());
}
@Test
public void test_getAssociatedGuruRepo_createNewWithCreateBucketInteractive() {
val bucketName = "some-bucket";
val fakeArn = "123";
val expected = RepositoryAssociation.builder()
.associationArn(fakeArn)
.state(RepositoryAssociationState.ASSOCIATED)
.build();
val emptyListResponse =
ListRepositoryAssociationsResponse.builder()
.repositoryAssociationSummaries(Collections.emptyList())
.build();
when(guruFrontendService.listRepositoryAssociations(any(ListRepositoryAssociationsRequest.class)))
.thenReturn(emptyListResponse);
when(s3client.headBucket(any(HeadBucketRequest.class))).thenThrow(NoSuchBucketException.class);
when(guruFrontendService.associateRepository(any(AssociateRepositoryRequest.class)))
.thenReturn(AssociateRepositoryResponse.builder().repositoryAssociation(expected).build());
when(guruFrontendService.describeRepositoryAssociation(any(DescribeRepositoryAssociationRequest.class)))
.thenReturn(DescribeRepositoryAssociationResponse.builder().repositoryAssociation(expected).build());
val mockTerminal = new MockTextTerminal();
mockTerminal.getInputs().add("y");
val config = Configuration.builder()
.guruFrontendService(guruFrontendService)
.interactiveMode(true)
.s3Client(s3client)
.repoName("some-repo-name")
.textIO(new TextIO(mockTerminal))
.build();
val association = AssociationAdapter.getAssociatedGuruRepo(config);
Assertions.assertEquals(expected.associationArn(), association.associationArn());
}
@Test
public void test_getAssociatedGuruRepo_createNewWithCreateBucketInteractiveAbort() {
val bucketName = "some-bucket";
val emptyListResponse =
ListRepositoryAssociationsResponse.builder()
.repositoryAssociationSummaries(Collections.emptyList())
.build();
when(guruFrontendService.listRepositoryAssociations(any(ListRepositoryAssociationsRequest.class)))
.thenReturn(emptyListResponse);
when(s3client.headBucket(any(HeadBucketRequest.class))).thenThrow(NoSuchBucketException.class);
val mockTerminal = new MockTextTerminal();
mockTerminal.getInputs().add("n");
val config = Configuration.builder()
.guruFrontendService(guruFrontendService)
.interactiveMode(true)
.s3Client(s3client)
.repoName("some-repo-name")
.textIO(new TextIO(mockTerminal))
.build();
GuruCliException ret = Assertions.assertThrows(GuruCliException.class, () ->
AssociationAdapter.getAssociatedGuruRepo(config));
Assertions.assertEquals(ErrorCodes.USER_ABORT, ret.getErrorCode());
}
} | 1,578 |
0 | Create_ds/aws-codeguru-cli/src/test/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/test/java/com/amazonaws/gurureviewercli/adapter/ResultsAdapterTest.java | package com.amazonaws.gurureviewercli.adapter;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.Collections;
import lombok.val;
import org.junit.jupiter.api.Test;
import com.amazonaws.gurureviewercli.model.ScanMetaData;
import com.amazonaws.gurureviewercli.util.JsonUtil;
class ResultsAdapterTest {
@Test
void saveResults() throws Exception {
val recommendations =
JsonUtil.loadRecommendations(Paths.get("test-data/recommendations/recommendations.json"));
val scanMetaData = ScanMetaData.builder()
.repositoryRoot(Paths.get("./").toRealPath())
.associationArn("123")
.codeReviewArn("456")
.sourceDirectories(Collections.emptyList())
.build();
val outDir = Files.createTempDirectory("test-output");
ResultsAdapter.saveResults(outDir, recommendations, scanMetaData);
}
} | 1,579 |
0 | Create_ds/aws-codeguru-cli/src/test/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/test/java/com/amazonaws/gurureviewercli/adapter/ArtifactAdapterTest.java | package com.amazonaws.gurureviewercli.adapter;
import com.amazonaws.gurureviewercli.model.Configuration;
import lombok.val;
import org.beryx.textio.TextIO;
import org.beryx.textio.mock.MockTextTerminal;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Assumptions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.mockito.stubbing.Answer;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.PutObjectRequest;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.zip.ZipFile;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doAnswer;
@ExtendWith(MockitoExtension.class)
class ArtifactAdapterTest {
@Mock
private S3Client s3client;
@Test
public void test_zipAndUpload_happyCaseSourceOnly() throws Exception {
val repoDir = Paths.get("./");
// skip the test if the test container stripped to the top level .git folder
Assumptions.assumeTrue(repoDir.resolve(".git").toFile().isDirectory());
val tempDir = Files.createTempDirectory("test_zipAndUpload_happyCase");
val bucketName = "some-bucket";
val sourceDirs = Arrays.asList(Paths.get("src"));
final List<Path> buildDirs = Collections.emptyList();
val config = Configuration.builder()
.s3Client(s3client)
.build();
Answer<Object> answer = invocationOnMock -> {
Path filePath = invocationOnMock.getArgument(1);
Assertions.assertTrue(filePath.toFile().isFile());
try (val zipFile = new ZipFile(filePath.toFile())) {
val entries = zipFile.entries();
while (entries.hasMoreElements()) {
val s = entries.nextElement().getName();
val original = repoDir.resolve(s).toFile();
Assertions.assertTrue(original.isFile(), "Not a valid file: " + original);
Assertions.assertFalse(s.startsWith(".."));
}
}
return null;
};
doAnswer(answer).when(s3client).putObject(any(PutObjectRequest.class), any(Path.class));
val metaData = ArtifactAdapter.zipAndUpload(config, tempDir, repoDir, sourceDirs, buildDirs, bucketName);
Assertions.assertNull(metaData.getBuildKey());
Assertions.assertNotNull(metaData.getSourceKey());
}
@Test
public void test_zipAndUpload_happyCaseGitFilesOnly() throws Exception {
val repoDir = Paths.get("./test-data/two-commits").toRealPath();
val tempDir = Files.createTempDirectory("test_zipAndUpload_happyCaseGitFilesOnly");
val bucketName = "some-bucket";
// only include files from the util dir.
final List<Path> buildDirs = Collections.emptyList();
val mockTerminal = new MockTextTerminal();
// answer No to the question if only files under version control should be scanned.
mockTerminal.getInputs().add("y");
val config = Configuration.builder()
.s3Client(s3client)
.interactiveMode(true)
.textIO(new TextIO(mockTerminal))
.versionedFiles(Arrays.asList(repoDir.resolve("test.txt")))
.build();
Answer<Object> answer = invocationOnMock -> {
Path filePath = invocationOnMock.getArgument(1);
Assertions.assertTrue(filePath.toFile().isFile());
try (val zipFile = new ZipFile(filePath.toFile())) {
val entries = zipFile.entries();
int count = 0;
while (entries.hasMoreElements()) {
val s = entries.nextElement().getName();
val original = repoDir.resolve(s).toFile();
Assertions.assertTrue(original.isFile(), "Not a valid file: " + original);
Assertions.assertFalse(s.startsWith(".."));
if (!s.startsWith("git/")) {
count++; // count the files that are not in the git folder.
}
}
Assertions.assertEquals(1, count, "Unexpected number of files in zip.");
}
return null;
};
doAnswer(answer).when(s3client).putObject(any(PutObjectRequest.class), any(Path.class));
val metaData =
ArtifactAdapter.zipAndUpload(config, tempDir, repoDir, Arrays.asList(repoDir), buildDirs, bucketName);
Assertions.assertNull(metaData.getBuildKey());
Assertions.assertNotNull(metaData.getSourceKey());
}
@Test
public void test_zipAndUpload_happyCaseAllFiles() throws Exception {
val repoDir = Paths.get("./test-data/two-commits").toRealPath();
val tempDir = Files.createTempDirectory("test_zipAndUpload_happyCaseGitFilesOnly");
val bucketName = "some-bucket";
// only include files from the util dir.
final List<Path> buildDirs = Collections.emptyList();
val mockTerminal = new MockTextTerminal();
// answer No to the question if only files under version control should be scanned.
mockTerminal.getInputs().add("n");
val config = Configuration.builder()
.s3Client(s3client)
.interactiveMode(true)
.textIO(new TextIO(mockTerminal))
.versionedFiles(Arrays.asList(repoDir.resolve("test.txt")))
.build();
Answer<Object> answer = invocationOnMock -> {
Path filePath = invocationOnMock.getArgument(1);
Assertions.assertTrue(filePath.toFile().isFile());
try (val zipFile = new ZipFile(filePath.toFile())) {
val entries = zipFile.entries();
int count = 0;
while (entries.hasMoreElements()) {
val s = entries.nextElement().getName();
val original = repoDir.resolve(s).toFile();
Assertions.assertTrue(original.isFile(), "Not a valid file: " + original);
if (!s.startsWith("git/")) {
count++; // count the files that are not in the git folder.
}
}
Assertions.assertEquals(2, count, "Unexpected number of files in zip: " + count);
}
return null;
};
doAnswer(answer).when(s3client).putObject(any(PutObjectRequest.class), any(Path.class));
val metaData =
ArtifactAdapter.zipAndUpload(config, tempDir, repoDir, Arrays.asList(repoDir), buildDirs, bucketName);
Assertions.assertNull(metaData.getBuildKey());
Assertions.assertNotNull(metaData.getSourceKey());
}
@Test
public void test_zipAndUpload_happyCaseBuildDir() throws Exception {
val tempDir = Files.createTempDirectory("test_zipAndUpload_happyCaseGitFilesOnly");
val bucketName = "some-bucket";
// only include files from the util dir.
val repoDir = Paths.get("./test-data/fake-repo");
val buildArtifacts = repoDir.resolve("build-dir/lib");
val config = Configuration.builder()
.s3Client(s3client)
.interactiveMode(false)
.build();
Answer<Object> answer = invocationOnMock -> {
Path filePath = invocationOnMock.getArgument(1);
if (!filePath.toString().contains("analysis-bin")) {
return null; // only look at the artifacts.
}
Assertions.assertTrue(filePath.toFile().isFile());
try (val zipFile = new ZipFile(filePath.toFile())) {
val entries = zipFile.entries();
int count = 0;
while (entries.hasMoreElements()) {
val s = entries.nextElement().getName();
Assertions.assertTrue(s.endsWith("included.txt"));
count++; // count the files that are not in the git folder.
}
Assertions.assertEquals(1, count);
}
return null;
};
doAnswer(answer).when(s3client).putObject(any(PutObjectRequest.class), any(Path.class));
val metaData =
ArtifactAdapter.zipAndUpload(config, tempDir,
repoDir,
Arrays.asList(repoDir),
Arrays.asList(buildArtifacts),
bucketName);
Assertions.assertNotNull(metaData.getBuildKey());
Assertions.assertNotNull(metaData.getSourceKey());
}
@Test
public void test_zipAndUpload_regression01() throws Exception {
val repoDir = Paths.get("./test-data/source-and-class");
// If we analyze the entire repo without setting build artifacts, we should get 1 archive with 3 files.
val archivedFiles = getArchivedFileNames(repoDir, Arrays.asList(repoDir), Collections.emptyList());
Assertions.assertEquals(1, archivedFiles.keySet().size());
val firstKey = archivedFiles.keySet().iterator().next();
Assertions.assertEquals(3, archivedFiles.get(firstKey).size());
}
@Test
public void test_zipAndUpload_regression02() throws Exception {
val repoDir = Paths.get("./test-data/source-and-class");
val srcDir = repoDir.resolve("src");
// if we analyze only the src dir without setting build artifacts, we should get 1 archive with 1 file.
val archivedFiles = getArchivedFileNames(repoDir, Arrays.asList(srcDir), Collections.emptyList());
Assertions.assertEquals(1, archivedFiles.keySet().size());
val firstKey = archivedFiles.keySet().iterator().next();
Assertions.assertEquals(1, archivedFiles.get(firstKey).size());
}
@Test
public void test_zipAndUpload_regression03() throws Exception {
val repoDir = Paths.get("./test-data/source-and-class");
val srcDir = repoDir.resolve("src");
val buildDir = repoDir.resolve("target");
// If we analyze the src and build dir, we should get 2 archives with 1 file each.
val archivedFiles = getArchivedFileNames(repoDir, Arrays.asList(srcDir), Arrays.asList(buildDir));
Assertions.assertEquals(2, archivedFiles.keySet().size());
val keyIterator = archivedFiles.keySet().iterator();
val firstKey = keyIterator.next();
Assertions.assertEquals(1, archivedFiles.get(firstKey).size());
val secondKey = keyIterator.next();
Assertions.assertEquals(1, archivedFiles.get(secondKey).size());
}
@Test
public void test_zipAndUpload_regression04() throws Exception {
val repoDir = Paths.get("./test-data/source-and-class");
val buildDir = repoDir.resolve("target");
// If we analyze the root and build dir, we should get 2 archives. The source archive should contain 2,
// and the build archive should contain 1.
// Note that the source artifact would actually contain 3 files, but we remove the build artifact from it.
val archivedFiles = getArchivedFileNames(repoDir, Arrays.asList(repoDir), Arrays.asList(buildDir));
Assertions.assertEquals(2, archivedFiles.keySet().size());
val keyIterator = archivedFiles.keySet().iterator();
val firstKey = keyIterator.next();
val secondKey = keyIterator.next();
if (firstKey.contains("analysis-src-")) {
Assertions.assertEquals(2, archivedFiles.get(firstKey).size());
Assertions.assertEquals(1, archivedFiles.get(secondKey).size());
} else if (firstKey.contains("analysis-bin-")) {
Assertions.assertEquals(1, archivedFiles.get(firstKey).size());
Assertions.assertEquals(2, archivedFiles.get(secondKey).size());
} else {
// unexpected case
Assertions.assertTrue(false);
}
}
private Map<String, List<String>> getArchivedFileNames(final Path repoDir,
final List<Path> relativeSrcDirs,
final List<Path> relativeBuildDirs) throws Exception {
val tempDir = Files.createTempDirectory("test_setupRegressionScan");
val bucketName = "some-bucket";
val config = Configuration.builder()
.s3Client(s3client)
.interactiveMode(false)
.build();
// maps archive name to the list of its files.
val archiveFileMap = new HashMap<String, List<String>>();
Answer<Object> answer = invocationOnMock -> {
Path filePath = invocationOnMock.getArgument(1);
Assertions.assertTrue(filePath.toFile().isFile());
try (val zipFile = new ZipFile(filePath.toFile())) {
archiveFileMap.putIfAbsent(filePath.toString(), new ArrayList<>());
val entries = zipFile.entries();
while (entries.hasMoreElements()) {
archiveFileMap.get(filePath.toString()).add(entries.nextElement().getName());
}
}
return null;
};
doAnswer(answer).when(s3client).putObject(any(PutObjectRequest.class), any(Path.class));
ArtifactAdapter.zipAndUpload(config, tempDir, repoDir, relativeSrcDirs, relativeBuildDirs, bucketName);
return archiveFileMap;
}
} | 1,580 |
0 | Create_ds/aws-codeguru-cli/src/test/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/test/java/com/amazonaws/gurureviewercli/adapter/GitAdapterTest.java | package com.amazonaws.gurureviewercli.adapter;
import java.nio.file.Path;
import java.nio.file.Paths;
import lombok.val;
import org.beryx.textio.TextIO;
import org.beryx.textio.mock.MockTextTerminal;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import com.amazonaws.gurureviewercli.exceptions.GuruCliException;
import com.amazonaws.gurureviewercli.model.Configuration;
import com.amazonaws.gurureviewercli.model.ErrorCodes;
class GitAdapterTest {
private static final Path RESOURCE_ROOT = Paths.get("test-data");
@Test
public void test_getGitMetaData_notARepo() {
val repo = RESOURCE_ROOT.resolve("fresh-repo-without-remote");
GuruCliException ret = Assertions.assertThrows(GuruCliException.class, () ->
GitAdapter.tryGetMetaData(configWithoutCommits(repo), repo.resolve("notgit")));
Assertions.assertEquals(ErrorCodes.GIT_INVALID_DIR, ret.getErrorCode());
}
@Test
public void test_getGitMetaData_noRemote() throws Exception {
val repo = RESOURCE_ROOT.resolve("fresh-repo-no-remote");
val metadata = GitAdapter.tryGetMetaData(configWithoutCommits(repo), repo.resolve("git"));
Assertions.assertNull(metadata.getRemoteUrl());
Assertions.assertNotNull(metadata.getCurrentBranch());
Assertions.assertEquals(repo, metadata.getRepoRoot());
}
@Test
public void test_getGitMetaData_oneCommit_packageScan() {
val repo = RESOURCE_ROOT.resolve("one-commit");
val mockTerminal = new MockTextTerminal();
mockTerminal.getInputs().add("y");
val config = Configuration.builder()
.textIO(new TextIO(mockTerminal))
.interactiveMode(true)
.build();
val gitMetaData = GitAdapter.tryGetMetaData(config, repo.resolve("git"));
Assertions.assertNotNull(gitMetaData);
Assertions.assertNull(gitMetaData.getBeforeCommit());
Assertions.assertNull(gitMetaData.getAfterCommit());
Assertions.assertEquals(1, gitMetaData.getVersionedFiles().size());
Assertions.assertEquals("master", gitMetaData.getCurrentBranch());
Assertions.assertEquals("git@amazon.com:username/new_repo", gitMetaData.getRemoteUrl());
}
@Test
public void test_getGitMetaData_oneCommit_packageScanAbort() {
val repo = RESOURCE_ROOT.resolve("one-commit");
val mockTerminal = new MockTextTerminal();
mockTerminal.getInputs().add("n");
val config = Configuration.builder()
.textIO(new TextIO(mockTerminal))
.interactiveMode(true)
.build();
GuruCliException ret = Assertions.assertThrows(GuruCliException.class, () ->
GitAdapter.tryGetMetaData(config, repo.resolve("git")));
Assertions.assertEquals(ErrorCodes.USER_ABORT, ret.getErrorCode());
}
@Test
public void test_getGitMetaData_twoCommits_validCommits() {
val repo = RESOURCE_ROOT.resolve("two-commits");
val config = configWithoutCommits(repo);
config.setBeforeCommit("cdb0fcad7400610b1d1797a326a89414525160fe");
config.setAfterCommit("8ece465b7ecf8337bf767c9602d21bb92f2fad8a");
val gitMetaData = GitAdapter.tryGetMetaData(config, repo.resolve("git"));
Assertions.assertNotNull(gitMetaData);
Assertions.assertNotNull(gitMetaData.getBeforeCommit());
Assertions.assertNotNull(gitMetaData.getAfterCommit());
Assertions.assertEquals(1, gitMetaData.getVersionedFiles().size());
Assertions.assertEquals("master", gitMetaData.getCurrentBranch());
Assertions.assertEquals("git@amazon.com:username/new_repo", gitMetaData.getRemoteUrl());
}
@Test
public void test_getGitMetaData_twoCommits_commitShortHand() {
val repo = RESOURCE_ROOT.resolve("two-commits");
val config = configWithoutCommits(repo);
config.setBeforeCommit("HEAD^");
config.setAfterCommit("HEAD");
val gitMetaData = GitAdapter.tryGetMetaData(config, repo.resolve("git"));
Assertions.assertNotNull(gitMetaData);
Assertions.assertNotNull(gitMetaData.getBeforeCommit());
Assertions.assertNotNull(gitMetaData.getAfterCommit());
Assertions.assertEquals("master", gitMetaData.getCurrentBranch());
Assertions.assertEquals("git@amazon.com:username/new_repo", gitMetaData.getRemoteUrl());
}
@Test
public void test_getGitMetaData_twoCommits_invalidCommits() {
val repo = RESOURCE_ROOT.resolve("two-commits");
val config = configWithoutCommits(repo);
config.setBeforeCommit("thisIsNotACommitHash");
config.setAfterCommit("8ece465b7ecf8337bf767c9602d21bb92f2fad8a");
Exception ret = Assertions.assertThrows(Exception.class, () ->
GitAdapter.tryGetMetaData(config, repo.resolve("git")));
Assertions.assertTrue(ret.getMessage().contains("Not a valid commit id "));
}
private Configuration configWithoutCommits(final Path workingDir) {
return Configuration.builder()
.textIO(new TextIO(new MockTextTerminal()))
.interactiveMode(false)
.build();
}
} | 1,581 |
0 | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli/Main.java | package com.amazonaws.gurureviewercli;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import com.beust.jcommander.JCommander;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.ParameterException;
import lombok.val;
import org.beryx.textio.TextIO;
import org.beryx.textio.system.SystemTextTerminal;
import org.eclipse.jgit.util.FileUtils;
import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider;
import software.amazon.awssdk.auth.credentials.ProfileCredentialsProvider;
import software.amazon.awssdk.core.exception.SdkClientException;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.codegurureviewer.CodeGuruReviewerClient;
import software.amazon.awssdk.services.codegurureviewer.model.RecommendationSummary;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.DeleteObjectRequest;
import software.amazon.awssdk.services.sts.StsClient;
import com.amazonaws.gurureviewercli.adapter.GitAdapter;
import com.amazonaws.gurureviewercli.adapter.ResultsAdapter;
import com.amazonaws.gurureviewercli.adapter.ScanAdapter;
import com.amazonaws.gurureviewercli.exceptions.GuruCliException;
import com.amazonaws.gurureviewercli.model.Configuration;
import com.amazonaws.gurureviewercli.model.ErrorCodes;
import com.amazonaws.gurureviewercli.model.GitMetaData;
import com.amazonaws.gurureviewercli.model.ScanMetaData;
import com.amazonaws.gurureviewercli.model.configfile.CustomConfiguration;
import com.amazonaws.gurureviewercli.util.CodeInsightExport;
import com.amazonaws.gurureviewercli.util.Log;
import com.amazonaws.gurureviewercli.util.RecommendationPrinter;
import com.amazonaws.gurureviewercli.util.RecommendationsFilter;
public class Main {
private static final String REVIEWER_ENDPOINT_PATTERN = "https://codeguru-reviewer.%s.amazonaws.com";
@Parameter(names = {"--region"},
description = "Region where CodeGuru Reviewer will run.",
required = false)
private String regionName = "us-east-1";
@Parameter(names = {"--profile"},
description = "Use a named profile to get AWS Credentials",
required = false)
private String profileName;
@Parameter(names = {"--commit-range", "-c"},
description = "Range of commits to analyze separated by ':'. For example HEAD^:HEAD ",
required = false)
private String commitRange;
@Parameter(names = {"--no-prompt"},
description = "Run in non-interactive mode.",
required = false)
private boolean noPrompt;
@Parameter(names = {"--fail-on-recommendations"},
description = "Return error code 5 if CodeGuru reports recommendations.",
required = false)
private boolean failOnRecommendations;
@Parameter(names = {"--bitbucket-code-insights"},
description = "Output directory for Bitbucket insights report and annotation files.",
required = false)
private String bitbucketCodeInsightsDirectory;
@Parameter(names = {"--root-dir", "-r"},
description = "The root directory of the project that should be analyzed.",
required = true)
private String repoDir;
@Parameter(names = {"--src", "-s"},
description = "Source directories to be analyzed. Can be used multiple times.")
private List<String> sourceDirs;
@Parameter(names = {"--build", "-b"},
description = "Directory of all build artifacts. Can be used multiple times.")
private List<String> buildDirs;
@Parameter(names = {"--output", "-o"},
description = "Output directory.")
private String outputDir = "./code-guru";
@Parameter(names = {"--bucket-name"},
description = "Name of S3 bucket that source and build artifacts will be uploaded to for analysis."
+ " The bucket name has to be prefixed with 'codeguru-reviewer-'. If no bucket name"
+ " is provided, the CLI will create a bucket automatically.")
private String bucketName;
@Parameter(names = {"--kms-key-id", "-kms"},
description = "KMS Key ID to encrypt source and build artifacts in S3")
private String kmsKeyId;
public static void main(String[] argv) {
val textIO = new TextIO(new SystemTextTerminal());
val main = new Main();
val jCommander = JCommander.newBuilder()
.addObject(main)
.build();
if (argv.length == 0) {
jCommander.usage();
return;
}
try {
jCommander.parse(argv);
val config = Configuration.builder()
.textIO(textIO)
.interactiveMode(!main.noPrompt)
.bucketName(main.bucketName)
.build();
main.validateInitialConfig(config);
// try to build the AWS client objects first.
main.createAWSClients(config);
String repoName = config.getRootDir().toFile().getName();
config.setRepoName(repoName);
// check if repo is valid git.
val gitMetaData = main.readGitMetaData(config, Paths.get(main.repoDir).toRealPath());
ScanMetaData scanMetaData = null;
List<RecommendationSummary> results = new ArrayList<>();
try {
val sourcePaths = main.sourceDirs.stream()
.map(Paths::get).map(Path::toAbsolutePath).map(Path::normalize)
.collect(Collectors.toList());
List<Path> buildPaths = null;
if (main.buildDirs != null) {
buildPaths = main.buildDirs.stream()
.map(Paths::get).map(Path::toAbsolutePath).map(Path::normalize)
.collect(Collectors.toList());
} scanMetaData = ScanAdapter.startScan(config, gitMetaData, sourcePaths, buildPaths);
results.addAll(ScanAdapter.fetchResults(config, scanMetaData));
} finally {
if (scanMetaData != null) {
// try to clean up objects from S3.
main.tryDeleteS3Object(config.getS3Client(),
scanMetaData.getBucketName(),
scanMetaData.getSourceKey());
main.tryDeleteS3Object(config.getS3Client(),
scanMetaData.getBucketName(),
scanMetaData.getBuildKey());
}
}
val customConfigFile = config.getRootDir().resolve(".codeguru-ignore.yml");
if (customConfigFile.toFile().isFile()) {
Log.info("Using customer provided config: " + customConfigFile.toAbsolutePath());
int originalResultsCount = results.size();
results = RecommendationsFilter.filterRecommendations(results,
CustomConfiguration.load(customConfigFile));
Log.info("%d recommendations were suppressed.", originalResultsCount - results.size());
}
val outputPath = Paths.get(main.outputDir);
if (!outputPath.toFile().exists()) {
if (!outputPath.toFile().mkdirs()) {
Log.error("Failed to create output directory %s.", outputPath);
}
}
ResultsAdapter.saveResults(outputPath, results, scanMetaData);
Log.info("Analysis finished.");
if (main.bitbucketCodeInsightsDirectory != null) {
val bitBucketDir = new File(main.bitbucketCodeInsightsDirectory).getCanonicalFile();
FileUtils.mkdirs(bitBucketDir, true);
CodeInsightExport.report(results, scanMetaData, bitBucketDir.toPath());
}
if (main.failOnRecommendations && !results.isEmpty()) {
RecommendationPrinter.print(results);
Log.error("Exiting with code 5 because %d recommendations were found and --fail-on-recommendations"
+ " is used.", results.size());
System.exit(5);
}
} catch (GuruCliException e) {
Log.error("%s: %s", e.getErrorCode(), e.getMessage());
e.printStackTrace();
System.exit(3);
} catch (ParameterException e) {
Log.error(e);
jCommander.usage();
System.exit(1);
} catch (Exception e) {
e.printStackTrace();
Log.error(e);
System.exit(2);
}
System.exit(0);
}
protected GitMetaData readGitMetaData(final Configuration config, final Path repoRoot) throws IOException {
if (commitRange != null) {
val commits = commitRange.split(":");
if (commits.length != 2) {
throw new GuruCliException(ErrorCodes.GIT_INVALID_COMMITS,
"Invalid value for --commit-range. Use '[before commit]:[after commit]'.");
}
config.setBeforeCommit(commits[0]);
config.setAfterCommit(commits[1]);
}
return GitAdapter.getGitMetaData(config, repoRoot);
}
private void validateInitialConfig(final Configuration config) throws IOException {
if (config.getBucketName() != null && !config.getBucketName().startsWith("codeguru-reviewer-")) {
Log.warn("CodeGuru Reviewer has default settings only for buckets that are prefixed with "
+ "codeguru-reviewer. If you choose a different name, read the instructions in the README.");
}
if (!Paths.get(repoDir).toFile().isDirectory()) {
throw new GuruCliException(ErrorCodes.DIR_NOT_FOUND,
repoDir + " is not a valid directory.");
}
config.setRootDir(Paths.get(repoDir).toRealPath());
if (this.sourceDirs == null || this.sourceDirs.isEmpty()) {
this.sourceDirs = Arrays.asList(config.getRootDir().toString());
}
sourceDirs.forEach(sourceDir -> {
val path = Paths.get(sourceDir);
if (!path.toFile().isDirectory()) {
throw new GuruCliException(ErrorCodes.DIR_NOT_FOUND,
sourceDir + " is not a valid directory.");
}
if (!path.toAbsolutePath().normalize().startsWith(config.getRootDir())) {
throw new GuruCliException(ErrorCodes.DIR_NOT_FOUND,
sourceDir + " is not a sub-directory of " + config.getRootDir());
}
});
if (this.buildDirs != null) {
buildDirs.forEach(buildDir -> {
if (!Paths.get(buildDir).toFile().isDirectory()) {
throw new GuruCliException(ErrorCodes.DIR_NOT_FOUND,
buildDir + " is not a valid directory.");
}
});
}
config.setKeyId(this.kmsKeyId);
}
private void tryDeleteS3Object(final S3Client s3Client, final String s3Bucket, final String s3Key) {
try {
if (s3Key != null) {
s3Client.deleteObject(DeleteObjectRequest.builder().bucket(s3Bucket).key(s3Key).build());
}
} catch (Exception e) {
Log.warn("Failed to delete %s from %s. Please delete the object by hand.", s3Key, s3Bucket);
}
}
protected void createAWSClients(final Configuration config) {
val credentials = getCredentials();
try {
config.setRegion(regionName);
val callerIdentity =
StsClient.builder()
.credentialsProvider(credentials)
.region(Region.of(regionName))
.build().getCallerIdentity();
config.setAccountId(callerIdentity.account());
config.setGuruFrontendService(getNewGuruClient(credentials));
config.setS3Client(getS3Client(credentials));
} catch (IllegalArgumentException e) {
// profile could not be found
throw new GuruCliException(ErrorCodes.AWS_INIT_ERROR,
"Error accessing the provided profile. " + this.profileName
+ "Ensure that the spelling is correct and"
+ " that the role has access to CodeGuru and S3.");
} catch (SdkClientException e) {
throw new GuruCliException(ErrorCodes.AWS_INIT_ERROR,
"No AWS credentials found. Use 'aws configure' to set them up.");
}
}
private AwsCredentialsProvider getCredentials() {
if (profileName == null || profileName.replaceAll("\\s+", "").length() == 0) {
return DefaultCredentialsProvider.create();
}
return ProfileCredentialsProvider.create(profileName);
}
private CodeGuruReviewerClient getNewGuruClient(AwsCredentialsProvider credentialsProvider) {
final String endpoint = String.format(REVIEWER_ENDPOINT_PATTERN, regionName);
return CodeGuruReviewerClient.builder()
.credentialsProvider(credentialsProvider)
.endpointOverride(URI.create(endpoint))
.region(Region.of(regionName))
.build();
}
private S3Client getS3Client(AwsCredentialsProvider credentialsProvider) {
return S3Client.builder()
.credentialsProvider(credentialsProvider)
.region(Region.of(regionName))
.build();
}
}
| 1,582 |
0 | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli/util/CodeInsightExport.java | package com.amazonaws.gurureviewercli.util;
import java.io.IOException;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collection;
import java.util.stream.Collectors;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.fasterxml.jackson.databind.json.JsonMapper;
import lombok.val;
import software.amazon.awssdk.services.codegurureviewer.model.RecommendationSummary;
import software.amazon.awssdk.services.codegurureviewer.model.Severity;
import com.amazonaws.gurureviewercli.model.ScanMetaData;
import com.amazonaws.gurureviewercli.model.bitbucket.CodeInsightsAnnotation;
import com.amazonaws.gurureviewercli.model.bitbucket.CodeInsightsReport;
/**
* Export Report and Annotations file for BitBucket CodeInsights.
*/
public final class CodeInsightExport {
private static final String REPORT_FILE_NAME = "report.json";
private static final String ANNOTATIONS_FILE_NAME = "annotations.json";
private static final JsonMapper JSON_MAPPER =
JsonMapper.builder()
.serializationInclusion(JsonInclude.Include.NON_ABSENT)
.disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS)
.disable(SerializationFeature.WRITE_DURATIONS_AS_TIMESTAMPS)
.disable(DeserializationFeature.ADJUST_DATES_TO_CONTEXT_TIME_ZONE)
.enable(DeserializationFeature.READ_UNKNOWN_ENUM_VALUES_USING_DEFAULT_VALUE)
.build();
public static void report(final Collection<RecommendationSummary> recommendations,
final ScanMetaData scanMetaData,
final Path outputDir) throws IOException {
val reportTitle = "CodeGuru Reviewer report";
val url = String.format("https://console.aws.amazon.com/codeguru/reviewer?region=%s#/codereviews/details/%s",
scanMetaData.getRegion(), scanMetaData.getCodeReviewArn());
val report = CodeInsightsReport.builder()
.title(reportTitle)
.reporter("CodeGuru Reviewer CLI")
.details(String.format("CodeGuru Reviewer reported %d recommendations",
recommendations.size()))
.result(recommendations.isEmpty() ? "PASSED" : "FAILED")
.link(url)
.data(new ArrayList<>())
.build();
val annotations = recommendations.stream().map(r -> convert(r, reportTitle, url))
.collect(Collectors.toList());
JSON_MAPPER.writeValue(outputDir.resolve(REPORT_FILE_NAME).toFile(), report);
JSON_MAPPER.writeValue(outputDir.resolve(ANNOTATIONS_FILE_NAME).toFile(), annotations);
}
private static CodeInsightsAnnotation convert(final RecommendationSummary recommendation,
final String reportTitle,
final String url) {
String description = recommendation.recommendationCategoryAsString();
if (recommendation.ruleMetadata() != null) {
description = recommendation.ruleMetadata().shortDescription();
}
return CodeInsightsAnnotation.builder()
.title(reportTitle)
.externalId(recommendation.recommendationId())
.path(recommendation.filePath())
.line(recommendation.startLine())
.summary(description)
.details(recommendation.description())
.link(url)
.annotationType("Vulnerability".toUpperCase())
.severity(convertSeverity(recommendation.severity()))
.build();
}
private static String convertSeverity(Severity guruSeverity) {
if (guruSeverity != null) {
return guruSeverity.toString().toUpperCase(); // Bitbucket uses the same severity levels as CodeGuru.
}
return "Unknown";
}
}
| 1,583 |
0 | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli/util/JsonUtil.java | package com.amazonaws.gurureviewercli.util;
import java.io.IOException;
import java.nio.file.Path;
import java.util.List;
import java.util.stream.Collectors;
import com.contrastsecurity.sarif.SarifSchema210;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.PropertyAccessor;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.MapperFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.fasterxml.jackson.databind.json.JsonMapper;
import lombok.NonNull;
import software.amazon.awssdk.services.codegurureviewer.model.RecommendationSummary;
import com.amazonaws.gurureviewercli.model.Recommendation;
/**
* Util class to load scan metadata
*/
public final class JsonUtil {
private static final ObjectMapper OBJECT_MAPPER =
JsonMapper.builder()
.enable(SerializationFeature.INDENT_OUTPUT)
.configure(MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES, true)
.visibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY)
.serializationInclusion(JsonInclude.Include.NON_EMPTY)
.build();
public static List<RecommendationSummary> loadRecommendations(@NonNull final Path jsonFile) throws IOException {
return OBJECT_MAPPER.readValue(jsonFile.toFile(), new TypeReference<List<Recommendation>>() {
})
.stream().map(Recommendation::toRecommendationSummary).collect(Collectors.toList());
}
public static void storeRecommendations(@NonNull final List<RecommendationSummary> recommendations,
@NonNull final Path targetFile) throws IOException {
OBJECT_MAPPER.writeValue(targetFile.toFile(), recommendations);
}
public static void writeSarif(@NonNull final SarifSchema210 sarif, @NonNull final Path targetFile)
throws IOException {
OBJECT_MAPPER.writeValue(targetFile.toFile(), sarif);
}
private JsonUtil() {
// do not initialize utility
}
}
| 1,584 |
0 | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli/util/SarifConverter.java | package com.amazonaws.gurureviewercli.util;
import java.io.IOException;
import java.net.URI;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import com.contrastsecurity.sarif.ArtifactLocation;
import com.contrastsecurity.sarif.Location;
import com.contrastsecurity.sarif.Message;
import com.contrastsecurity.sarif.MultiformatMessageString;
import com.contrastsecurity.sarif.PhysicalLocation;
import com.contrastsecurity.sarif.PropertyBag;
import com.contrastsecurity.sarif.Region;
import com.contrastsecurity.sarif.ReportingConfiguration;
import com.contrastsecurity.sarif.ReportingDescriptor;
import com.contrastsecurity.sarif.Result;
import com.contrastsecurity.sarif.Run;
import com.contrastsecurity.sarif.SarifSchema210;
import com.contrastsecurity.sarif.Tool;
import com.contrastsecurity.sarif.ToolComponent;
import lombok.val;
import software.amazon.awssdk.services.codegurureviewer.model.RecommendationSummary;
/**
* Utility class to convert CodeGuru Recommendations to SARIF
*/
public final class SarifConverter {
private SarifConverter() {
// do not instantiate
}
/**
* Convert CodeGuru Reviewer recommendations into SARIF format.
*
* @param recommendations CodeGuru Reviewer recommendations.
* @return Sarif report object.
* @throws IOException If conversion fails.
*/
public static SarifSchema210 createSarifReport(final List<RecommendationSummary> recommendations)
throws IOException {
val docUrl = "https://docs.aws.amazon.com/codeguru/latest/reviewer-ug/how-codeguru-reviewer-works.html";
val rulesMap = createSarifRuleDescriptions(recommendations);
val driver = new ToolComponent().withName("CodeGuru Reviewer Scanner")
.withInformationUri(URI.create(docUrl))
.withRules(new HashSet<>(rulesMap.values()));
val results = recommendations.stream().map(SarifConverter::convertToSarif)
.collect(Collectors.toList());
val run = new Run().withTool(new Tool().withDriver(driver)).withResults(results);
return new SarifSchema210()
.withVersion(SarifSchema210.Version._2_1_0)
.with$schema(URI.create("http://json.schemastore.org/sarif-2.1.0-rtm.4"))
.withRuns(Arrays.asList(run));
}
private static Map<String, ReportingDescriptor> createSarifRuleDescriptions(
final List<RecommendationSummary> recommendations) {
val rulesMap = new HashMap<String, ReportingDescriptor>();
for (val recommendation : recommendations) {
val metaData = recommendation.ruleMetadata();
if (metaData != null && !rulesMap.containsKey(metaData.ruleId())) {
val properties = new PropertyBag().withTags(new HashSet<>(metaData.ruleTags()));
MultiformatMessageString foo;
val descriptor = new ReportingDescriptor()
.withName(metaData.ruleName())
.withId(metaData.ruleId())
.withShortDescription(new MultiformatMessageString().withText(metaData.ruleName()))
.withFullDescription(new MultiformatMessageString().withText(metaData.shortDescription()))
.withHelp(new MultiformatMessageString().withText(metaData.longDescription()))
.withProperties(properties);
if (recommendation.severityAsString() != null) {
val level = ReportingConfiguration.Level.fromValue(getSarifSeverity(recommendation));
descriptor.setDefaultConfiguration(new ReportingConfiguration().withLevel(level));
}
rulesMap.put(metaData.ruleId(), descriptor);
}
}
return rulesMap;
}
private static Result convertToSarif(final RecommendationSummary recommendation) {
List<Location> locations = Arrays.asList(getSarifLocation(recommendation));
return new Result().withRuleId(recommendation.ruleMetadata().ruleId())
.withLevel(Result.Level.fromValue(getSarifSeverity(recommendation)))
.withMessage(new Message().withMarkdown(recommendation.description()))
.withLocations(locations);
}
private static Location getSarifLocation(final RecommendationSummary recommendation) {
val loc = new PhysicalLocation()
.withArtifactLocation(new ArtifactLocation().withUri(recommendation.filePath()))
.withRegion(new Region().withStartLine(recommendation.startLine())
.withEndLine(recommendation.endLine()));
return new Location()
.withPhysicalLocation(loc);
}
private static String getSarifSeverity(RecommendationSummary recommendation) {
if (recommendation.severity() == null) {
return Result.Level.NONE.value(); // can happen for legacy rules
}
switch (recommendation.severity()) {
case INFO:
case LOW:
return Result.Level.NOTE.value();
case MEDIUM:
case HIGH:
return Result.Level.WARNING.value();
case CRITICAL:
return Result.Level.ERROR.value();
default:
return Result.Level.NONE.value();
}
}
}
| 1,585 |
0 | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli/util/RecommendationsFilter.java | package com.amazonaws.gurureviewercli.util;
import java.nio.file.FileSystems;
import java.nio.file.PathMatcher;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import lombok.val;
import software.amazon.awssdk.services.codegurureviewer.model.RecommendationSummary;
import com.amazonaws.gurureviewercli.model.configfile.CustomConfiguration;
/**
* Utility class to filter CodeGuru Reviewer recommendations based on the criteria in a
* custom configuration file.
*/
public final class RecommendationsFilter {
private static final String GLOB_PREFIX = "glob:";
private RecommendationsFilter() {
// do not instantiate.
}
/**
* Filter excluded recommendations.
*
* @param recommendations List of recommendations.
* @param configuration Custom Configuration file that defines filters.
* @return Filtered list.
*/
public static List<RecommendationSummary> filterRecommendations(
final Collection<RecommendationSummary> recommendations,
final CustomConfiguration configuration) {
val matchers = new ArrayList<PathMatcher>();
if (configuration.getExcludeFiles() != null) {
for (val globString : configuration.getExcludeFiles()) {
val matcher = FileSystems.getDefault().getPathMatcher(GLOB_PREFIX + globString);
matchers.add(matcher);
}
}
val result = new ArrayList<RecommendationSummary>();
for (val rec : recommendations) {
if (configuration.getExcludeBelowSeverity() != null) {
val threshold = RecommendationPrinter.severityToInt(configuration.getExcludeBelowSeverity());
if (RecommendationPrinter.severityToInt(rec) > threshold) {
continue;
}
}
if (configuration.getExcludeById() != null &&
configuration.getExcludeById()
.stream()
.anyMatch(id -> id.equals(rec.recommendationId()))) {
continue;
}
if (matchers.stream().anyMatch(m -> m.matches(Paths.get(rec.filePath())))) {
continue;
}
if (rec.ruleMetadata() == null || rec.filePath().equals(".")) {
continue; // Always drop rules without metadata or the stats recomendation
}
val metaData = rec.ruleMetadata();
if (metaData.ruleTags() != null && configuration.getExcludeTags() != null) {
if (configuration.getExcludeTags().stream().anyMatch(t -> metaData.ruleTags().contains(t))) {
continue;
}
}
if (excludeRecommendation(rec, configuration)) {
continue;
}
result.add(rec);
}
return result;
}
private static boolean excludeRecommendation(final RecommendationSummary recommendationSummary,
final CustomConfiguration configuration) {
val metaData = recommendationSummary.ruleMetadata();
if (configuration.getExcludeRecommendations() != null) {
return configuration.getExcludeRecommendations().stream().anyMatch(ex -> {
if (metaData.ruleId().equals(ex.getDetectorId())) {
if (ex.getLocations() != null && !ex.getLocations().isEmpty()) {
for (val globString : ex.getLocations()) {
val matcher = FileSystems.getDefault().getPathMatcher(GLOB_PREFIX + globString);
if (matcher.matches(Paths.get(recommendationSummary.filePath()))) {
return true;
}
}
return false;
} else {
return true;
}
}
return false;
});
}
return false;
}
}
| 1,586 |
0 | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli/util/Log.java | package com.amazonaws.gurureviewercli.util;
import java.io.PrintWriter;
import java.io.StringWriter;
import org.beryx.textio.TextTerminal;
import org.beryx.textio.system.SystemTextTerminal;
public final class Log {
private static final String TEXT_RESET = "\u001B[0m";
private static final String TEXT_BLACK = "\u001B[30m";
private static final String TEXT_RED = "\u001B[31m";
private static final String TEXT_GREEN = "\u001B[32m";
private static final String TEXT_YELLOW = "\u001B[33m";
private static final String TEXT_BLUE = "\u001B[34m";
private static final String TEXT_PURPLE = "\u001B[35m";
private static final String TEXT_CYAN = "\u001B[36m";
private static final String TEXT_WHITE = "\u001B[37m";
private static final String AWS_URL_PREFIX = "https://console.aws.amazon.com/codeguru/reviewer";
// can be overriden
private static TextTerminal terminal = new SystemTextTerminal();
public static void setTerminal(final TextTerminal t) {
terminal = t;
}
public static void print(final String format, final Object... args) {
terminal.printf(format, args);
}
public static void println(final String format, final Object... args) {
terminal.printf(format + "%n", args);
}
public static void info(final String format, final Object... args) {
terminal.printf(TEXT_GREEN + format + TEXT_RESET + "%n", args);
}
public static void warn(final String format, final Object... args) {
terminal.printf(TEXT_YELLOW + format + TEXT_RESET + "%n", args);
}
public static void error(final String format, final Object... args) {
terminal.printf(TEXT_RED + format + TEXT_RESET + "%n", args);
}
public static void awsUrl(final String format, final Object... args) {
terminal.printf(TEXT_CYAN + AWS_URL_PREFIX + format + TEXT_RESET + "%n", args);
}
public static void error(final Throwable t) {
terminal.println(TEXT_RED + t.getMessage() + TEXT_RESET);
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
t.printStackTrace(pw);
terminal.println(sw.toString());
}
private Log() {
// do not initialize
}
}
| 1,587 |
0 | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli/util/RecommendationPrinter.java | package com.amazonaws.gurureviewercli.util;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Comparator;
import lombok.NonNull;
import lombok.val;
import software.amazon.awssdk.services.codegurureviewer.model.RecommendationSummary;
import software.amazon.awssdk.services.codegurureviewer.model.Severity;
/**
* Utility class to print recommendations.
*/
public final class RecommendationPrinter {
private RecommendationPrinter() {
// do not instantiate
}
/**
* Print recommendations to command line.
*
* @param recommendations List of recommendations
*/
public static void print(final Collection<RecommendationSummary> recommendations) {
val sortedRecommendations = new ArrayList<>(recommendations);
sortedRecommendations.sort(Comparator.comparing(RecommendationPrinter::severityToInt));
for (val recommendation : sortedRecommendations) {
val sb = new StringBuilder();
sb.append("-----\n");
sb.append(String.format("ID: %s, rule %s with severity %s%n",
recommendation.recommendationId(),
recommendation.ruleMetadata().ruleId(),
recommendation.severity()));
sb.append(String.format("In %s line %d%n", recommendation.filePath(), recommendation.startLine()));
sb.append(recommendation.description());
sb.append("\n");
Log.info(sb.toString());
}
}
/**
* Convert the severity of a {@link RecommendationSummary} to integer, where lower number
* means higher severity.
*
* @param rs A {@link RecommendationSummary}.
* @return Integer value for severity, where 0 is the highest.
*/
public static Integer severityToInt(final RecommendationSummary rs) {
if (rs == null || rs.severity() == null) {
return 5;
}
return severityToInt(rs.severity().toString());
}
/**
* Convert the severity of a {@link RecommendationSummary} to integer, where lower number
* means higher severity.
*
* @param severity Severity as String.
* @return Integer value for severity, where 0 is the highest.
*/
public static Integer severityToInt(final @NonNull String severity) {
if (Severity.CRITICAL.toString().equalsIgnoreCase(severity)) {
return 0;
} else if (Severity.HIGH.toString().equalsIgnoreCase(severity)) {
return 1;
} else if (Severity.MEDIUM.toString().equalsIgnoreCase(severity)) {
return 2;
} else if (Severity.LOW.toString().equalsIgnoreCase(severity)) {
return 3;
}
return 5;
}
}
| 1,588 |
0 | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli/util/ZipUtils.java | package com.amazonaws.gurureviewercli.util;
import lombok.extern.log4j.Log4j2;
import lombok.val;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
/**
* Util class for ZipFile.
*/
@Log4j2
public final class ZipUtils {
/**
* Zip source directory to destination path.
*
* @param sourceDirPaths source dir paths
* @param zipFilePath destination zip file
* @throws IOException io exception
*/
public static void pack(final List<Path> sourceDirPaths, final String zipFilePath) throws IOException {
pack(sourceDirPaths, Collections.emptyList(), zipFilePath);
}
public static void pack(final List<Path> sourceDirPaths,
final List<Path> excludeDirs,
final String zipFilePath) throws IOException {
Path p = Files.createFile(Paths.get(zipFilePath).normalize().toAbsolutePath());
try (ZipOutputStream zs = new ZipOutputStream(Files.newOutputStream(p))) {
for (val sourceDirPath : sourceDirPaths) {
Path pp = sourceDirPath.toRealPath();
try (val walk = Files.walk(pp)) {
walk.filter(path -> !Files.isDirectory(path))
.filter(path -> isIncluded(path, excludeDirs))
.forEach(path -> {
val relativePath = pp.relativize(path.normalize().toAbsolutePath());
// in case we run on Windows
ZipEntry zipEntry = new ZipEntry(getUnixStylePathName(relativePath));
try {
zs.putNextEntry(zipEntry);
zs.write(Files.readAllBytes(path));
zs.closeEntry();
} catch (Exception e) {
log.error("Skipping file {} because of error: {}", path, e.getMessage());
}
});
}
}
}
}
/**
* Zip source directory to destination path.
*
* @param sourceDirPaths source dir paths
* @param relativeRoot The a shared parent of the sourceDirPaths that should be used for all entries.
* @param zipFilePath destination zip file
* @throws IOException io exception
*/
public static void pack(final List<Path> sourceDirPaths,
final Path relativeRoot,
final String zipFilePath) throws IOException {
pack(sourceDirPaths, Collections.emptyList(), relativeRoot, zipFilePath);
}
public static void pack(final List<Path> sourceDirPaths,
final List<Path> excludeDirs,
final Path relativeRoot,
final String zipFilePath) throws IOException {
val files = getFilesInDirectories(sourceDirPaths);
val codeGuruConfigFile = relativeRoot.resolve("aws-codeguru-reviewer.yml");
if (codeGuruConfigFile != null && codeGuruConfigFile.toFile().isFile()) {
files.add(codeGuruConfigFile);
}
packFiles(files, excludeDirs, relativeRoot, Paths.get(zipFilePath));
}
/**
* Zip source directory to destination path.
*
* @param files source file paths
* @param relativeRoot The shared parent of the sourceDirPaths that should be used for all entries.
* @param zipFilePath destination zip file
* @throws IOException io exception
*/
public static void packFiles(final Collection<Path> files,
final List<Path> excludeDirs,
final Path relativeRoot,
final Path zipFilePath) throws IOException {
val normalizedRoot = relativeRoot.toRealPath();
val normalizedFiles = files.stream()
.map(Path::toAbsolutePath)
.map(Path::normalize)
.filter(p -> isIncluded(p, excludeDirs))
.collect(Collectors.toList());
normalizedFiles.forEach(file -> {
if (!file.startsWith(normalizedRoot)) {
val msg = String.format("%s is not a parent directory of %s", normalizedRoot, file);
throw new RuntimeException(msg);
}
});
Path zipFile = Files.createFile(zipFilePath);
try (ZipOutputStream zs = new ZipOutputStream(Files.newOutputStream(zipFile))) {
for (val file : normalizedFiles) {
val relPath = normalizedRoot.relativize(file);
// replace Windows file separators
ZipEntry zipEntry = new ZipEntry(getUnixStylePathName(relPath));
try {
zs.putNextEntry(zipEntry);
zs.write(Files.readAllBytes(file));
zs.closeEntry();
} catch (Exception e) {
log.error("Skipping file {} because of error: {}", file, e.getMessage());
}
}
}
}
/**
* Get files under directory recursively.
*
* @param directories Root directory.
* @return All files under the root directory.
* @throws IOException If reading the file system fails.
*/
public static List<Path> getFilesInDirectories(Collection<Path> directories) throws IOException {
val files = new ArrayList<Path>();
for (val directory : directories) {
Path pp = directory.toRealPath();
files.addAll(getFilesInDirectory(pp));
}
return files;
}
/**
* Get files under directory recursively.
*
* @param directory Root directory.
* @return All files under the root directory.
* @throws IOException If reading the file system fails.
*/
public static List<Path> getFilesInDirectory(final Path directory) throws IOException {
if (directory == null || !directory.toFile().isDirectory()) {
return Collections.emptyList();
}
try (val walk = Files.walk(directory.toRealPath())) {
return walk.filter(path -> !Files.isDirectory(path)).collect(Collectors.toList());
}
}
private static String getUnixStylePathName(final Path path) {
return path.normalize().toString().replace('\\', '/');
}
private static boolean isIncluded(final Path p, final Collection<Path> excludes) {
if (excludes != null) {
if (excludes.stream().anyMatch(ex -> p.startsWith(ex.toAbsolutePath().normalize()))) {
log.warn("File excluded from source zip because it is part of the build zip already: {}", p);
return false;
}
}
return true;
}
/**
* private construct.
*/
private ZipUtils() {
}
}
| 1,589 |
0 | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli/adapter/ArtifactAdapter.java | package com.amazonaws.gurureviewercli.adapter;
import com.amazonaws.gurureviewercli.exceptions.GuruCliException;
import com.amazonaws.gurureviewercli.model.Configuration;
import com.amazonaws.gurureviewercli.model.ErrorCodes;
import com.amazonaws.gurureviewercli.model.ScanMetaData;
import com.amazonaws.gurureviewercli.util.Log;
import com.amazonaws.gurureviewercli.util.ZipUtils;
import lombok.val;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.PutObjectRequest;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.UUID;
import java.util.stream.Collectors;
/**
* Utility class class to Zip and upload source and build artifacts to S3.
*/
public final class ArtifactAdapter {
/**
* Zip and upload source and build artifacts to S3.
*
* @param config The current {@link Configuration}
* @param tempDir A temp directory where files can be copied to and zipped. Will be deleted after completion.
* @param repositoryDir The root directory of the repo to analyze
* @param sourceDirs The list of source directories under repositoryDir.
* @param buildDirs The list of build directories (can be empty).
* @param bucketName The name of the S3 bucket that should be used for the upload.
* @return Metadata about what was zipped and uploaded.
* @throws IOException If writing to tempDir fails.
*/
public static ScanMetaData zipAndUpload(final Configuration config,
final Path tempDir,
final Path repositoryDir,
final List<Path> sourceDirs,
final List<Path> buildDirs,
final String bucketName) throws IOException {
try {
boolean scanVersionedFilesOnly = false;
if (config.getVersionedFiles() != null && !config.getVersionedFiles().isEmpty()) {
scanVersionedFilesOnly =
!config.isInteractiveMode() ||
config.getTextIO()
.newBooleanInputReader()
.withTrueInput("y")
.withFalseInput("n")
.read("Only analyze files under version control?");
}
final String sourceKey;
if (scanVersionedFilesOnly) {
val filesToScan = new ArrayList<Path>(ZipUtils.getFilesInDirectories(sourceDirs));
val totalFiles = filesToScan.size();
filesToScan.retainAll(config.getVersionedFiles()); // only keep versioned files.
val versionedFiles = filesToScan.size();
if (versionedFiles == 0) {
Log.error(sourceDirs.toString());
Log.error(config.getVersionedFiles().toString());
throw new GuruCliException(ErrorCodes.GIT_EMPTY_DIFF,
"No versioned files to analyze in directories: " + sourceDirs);
}
Log.info("Adding %d out of %d files under version control in %s",
versionedFiles, totalFiles, repositoryDir.toAbsolutePath());
filesToScan.addAll(ZipUtils.getFilesInDirectory(repositoryDir.resolve(".git")));
sourceKey = zipAndUploadFiles("analysis-src-" + UUID.randomUUID(), filesToScan, buildDirs,
repositoryDir, bucketName, tempDir, config.getAccountId(), config.getS3Client());
} else {
val sourceDirsAndGit = new ArrayList<Path>(sourceDirs);
if (config.getBeforeCommit() != null && config.getAfterCommit() != null) {
// only add the git folder if a commit range is provided.
sourceDirsAndGit.add(repositoryDir.resolve(".git"));
}
sourceKey = zipAndUploadDir("analysis-src-" + UUID.randomUUID(), sourceDirsAndGit,
buildDirs, repositoryDir, bucketName, tempDir, config.getAccountId(), config.getS3Client());
}
final String buildKey;
if (buildDirs != null && !buildDirs.isEmpty()) {
for (val buildDir : buildDirs) {
if (!buildDir.toFile().isDirectory()) {
throw new FileNotFoundException("Provided build directory not found " + buildDir);
}
}
buildKey =
zipAndUploadDir("analysis-bin-" + UUID.randomUUID(), buildDirs,
Collections.emptyList(), bucketName, tempDir, config.getAccountId(), config.getS3Client());
} else {
buildKey = null;
}
return ScanMetaData.builder()
.bucketName(bucketName)
.repositoryRoot(repositoryDir)
.sourceDirectories(sourceDirs)
.sourceKey(sourceKey)
.buildKey(buildKey)
.build();
} finally {
// Delete the temp dir.
try (val walker = Files.walk(tempDir)) {
walker.sorted(Comparator.reverseOrder())
.map(Path::toFile)
.forEach(File::delete);
}
}
}
private static String zipAndUploadDir(final String artifactName,
final List<Path> dirNames,
final List<Path> excludeList,
final String bucketName,
final Path tempDir,
final String accountId,
final S3Client s3Client) throws IOException {
return zipAndUploadDir(artifactName, dirNames, excludeList, null, bucketName, tempDir, accountId, s3Client);
}
private static String zipAndUploadDir(final String artifactName,
final List<Path> dirNames,
final List<Path> excludeList,
final Path rootDir,
final String bucketName,
final Path tempDir,
final String accountId,
final S3Client s3Client) throws IOException {
if (dirNames != null) {
val zipFileName = artifactName + ".zip";
val zipFile = tempDir.resolve(zipFileName).toAbsolutePath();
val s3Key = zipFileName;
if (!zipFile.toFile().isFile()) {
if (rootDir != null) {
ZipUtils.pack(dirNames, excludeList, rootDir, zipFile.toString());
} else {
ZipUtils.pack(dirNames, excludeList, zipFile.toString());
}
}
val putObjectRequest = PutObjectRequest.builder()
.bucket(bucketName)
.key(s3Key)
.expectedBucketOwner(accountId)
.build();
s3Client.putObject(putObjectRequest, zipFile);
return s3Key;
}
return null;
}
private static String zipAndUploadFiles(final String artifactName,
final List<Path> files,
final List<Path> excludeDirs,
final Path rootDir,
final String bucketName,
final Path tempDir,
final String accountId,
final S3Client s3Client) throws IOException {
if (files != null && rootDir != null) {
val zipFileName = artifactName + ".zip";
val zipFile = tempDir.resolve(zipFileName).toAbsolutePath();
val s3Key = zipFileName;
if (!zipFile.toFile().isFile()) {
ZipUtils.packFiles(files, excludeDirs, rootDir, zipFile);
}
val putObjectRequest = PutObjectRequest.builder()
.bucket(bucketName)
.key(s3Key)
.expectedBucketOwner(accountId)
.build();
s3Client.putObject(putObjectRequest, zipFile);
return s3Key;
}
return null;
}
private static List<Path> filterAgainstExcludeDirs(final List<Path> original, final List<Path> exclude) {
return original.stream().filter(path -> exclude.stream().anyMatch(ex -> path.startsWith(ex)))
.collect(Collectors.toList());
}
private ArtifactAdapter() {
// do not instantiate
}
}
| 1,590 |
0 | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli/adapter/ScanAdapter.java | package com.amazonaws.gurureviewercli.adapter;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import com.beust.jcommander.internal.Nullable;
import lombok.val;
import software.amazon.awssdk.services.codegurureviewer.CodeGuruReviewerClient;
import software.amazon.awssdk.services.codegurureviewer.model.AnalysisType;
import software.amazon.awssdk.services.codegurureviewer.model.CodeArtifacts;
import software.amazon.awssdk.services.codegurureviewer.model.CodeReviewType;
import software.amazon.awssdk.services.codegurureviewer.model.CommitDiffSourceCodeType;
import software.amazon.awssdk.services.codegurureviewer.model.CreateCodeReviewRequest;
import software.amazon.awssdk.services.codegurureviewer.model.DescribeCodeReviewRequest;
import software.amazon.awssdk.services.codegurureviewer.model.DescribeCodeReviewResponse;
import software.amazon.awssdk.services.codegurureviewer.model.EventInfo;
import software.amazon.awssdk.services.codegurureviewer.model.JobState;
import software.amazon.awssdk.services.codegurureviewer.model.ListRecommendationsRequest;
import software.amazon.awssdk.services.codegurureviewer.model.RecommendationSummary;
import software.amazon.awssdk.services.codegurureviewer.model.RepositoryAnalysis;
import software.amazon.awssdk.services.codegurureviewer.model.RepositoryAssociation;
import software.amazon.awssdk.services.codegurureviewer.model.RepositoryHeadSourceCodeType;
import software.amazon.awssdk.services.codegurureviewer.model.RequestMetadata;
import software.amazon.awssdk.services.codegurureviewer.model.S3BucketRepository;
import software.amazon.awssdk.services.codegurureviewer.model.S3RepositoryDetails;
import software.amazon.awssdk.services.codegurureviewer.model.SourceCodeType;
import software.amazon.awssdk.services.codegurureviewer.model.ValidationException;
import software.amazon.awssdk.services.codegurureviewer.model.VendorName;
import com.amazonaws.gurureviewercli.model.Configuration;
import com.amazonaws.gurureviewercli.model.GitMetaData;
import com.amazonaws.gurureviewercli.model.ScanMetaData;
import com.amazonaws.gurureviewercli.util.Log;
/**
* Wraps the commands to start a code-review and to poll and download the results.
*/
public final class ScanAdapter {
private static final String SCAN_PREFIX_NAME = "codeguru-reviewer-cli-";
private static final long WAIT_TIME_IN_SECONDS = 2L;
public static ScanMetaData startScan(final Configuration config,
final GitMetaData gitMetaData,
final List<Path> sourceDirs,
final List<Path> buildDirs) throws IOException {
val association = AssociationAdapter.getAssociatedGuruRepo(config);
val bucketName = association.s3RepositoryDetails().bucketName();
Log.info("Starting analysis of %s with association %s and S3 bucket %s",
config.getRootDir(), association.associationArn(), bucketName);
try {
val tempDir = Files.createTempDirectory("artifact-packing-dir");
val metadata = ArtifactAdapter.zipAndUpload(config, tempDir, config.getRootDir(),
sourceDirs, buildDirs, bucketName);
val request = createRepoAnalysisRequest(gitMetaData, metadata.getSourceKey(),
metadata.getBuildKey(), association);
val response = config.getGuruFrontendService().createCodeReview(request);
if (response == null) {
throw new RuntimeException("Failed to start scan: " + request);
}
Log.print("Started new CodeGuru Reviewer scan: ");
Log.awsUrl("?region=%s#/codereviews/details/%s", config.getRegion(),
response.codeReview().codeReviewArn());
metadata.setCodeReviewArn(response.codeReview().codeReviewArn());
metadata.setAssociationArn(association.associationArn());
metadata.setRegion(config.getRegion());
return metadata;
} catch (ValidationException e) {
throw new RuntimeException(e);
}
}
public static List<RecommendationSummary> fetchResults(final Configuration config,
final ScanMetaData scanMetaData) {
val reviewARN = scanMetaData.getCodeReviewArn();
val describeReviewRequest = DescribeCodeReviewRequest.builder().codeReviewArn(reviewARN).build();
DescribeCodeReviewResponse response = config.getGuruFrontendService().describeCodeReview(describeReviewRequest);
while (response != null) {
val state = response.codeReview().state();
if (JobState.COMPLETED.equals(state)) {
Log.println(":)");
return downloadResults(config.getGuruFrontendService(), reviewARN);
} else if (JobState.PENDING.equals(state)) {
Log.print(".");
try {
Thread.sleep(TimeUnit.SECONDS.toMillis(WAIT_TIME_IN_SECONDS));
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
} else if (JobState.FAILED.equals(state)) {
val msg = String.format("CodeGuru scan failed for ARN %s: %s%nCheck the AWS Console for more detail",
reviewARN, response.codeReview().stateReason());
throw new RuntimeException(msg);
} else {
val msg = String.format("CodeGuru scan is in an unexpected state %s: %s%n"
+ "Check the AWS Console for more detail",
state, response.codeReview().stateReason());
throw new RuntimeException(msg);
}
response = config.getGuruFrontendService().describeCodeReview(describeReviewRequest);
}
throw new RuntimeException("Unable to find information for scan " + reviewARN);
}
private static List<RecommendationSummary> downloadResults(final CodeGuruReviewerClient guruFrontendService,
final String reviewARN) {
val recommendations = new ArrayList<RecommendationSummary>();
val listRequest = ListRecommendationsRequest.builder().codeReviewArn(reviewARN).build();
guruFrontendService.listRecommendationsPaginator(listRequest)
.forEach(resp -> recommendations.addAll(resp.recommendationSummaries()));
return recommendations;
}
private static CreateCodeReviewRequest createRepoAnalysisRequest(final GitMetaData gitMetaData,
final String sourceKey,
final @Nullable String buildArtifactKey,
final RepositoryAssociation association) {
final CodeArtifacts codeArtifacts;
final AnalysisType[] analysisTypes;
if (buildArtifactKey == null) {
codeArtifacts = CodeArtifacts.builder().sourceCodeArtifactsObjectKey(sourceKey).build();
analysisTypes = new AnalysisType[]{AnalysisType.CODE_QUALITY};
} else {
codeArtifacts = CodeArtifacts.builder().sourceCodeArtifactsObjectKey(sourceKey)
.buildArtifactsObjectKey(buildArtifactKey)
.build();
analysisTypes = new AnalysisType[]{AnalysisType.SECURITY, AnalysisType.CODE_QUALITY};
}
val s3repoDetails = S3RepositoryDetails.builder().bucketName(association.s3RepositoryDetails()
.bucketName())
.codeArtifacts(codeArtifacts).build();
val s3repo = S3BucketRepository.builder().name(association.name())
.details(s3repoDetails).build();
val sourceCodeType = getSourceCodeType(s3repo, gitMetaData);
val repoAnalysis = RepositoryAnalysis.builder().sourceCodeType(sourceCodeType).build();
val reviewType = CodeReviewType.builder().repositoryAnalysis(repoAnalysis)
.analysisTypes(analysisTypes)
.build();
return CreateCodeReviewRequest.builder().type(reviewType)
.name(SCAN_PREFIX_NAME + UUID.randomUUID().toString())
.repositoryAssociationArn(association.associationArn())
.build();
}
private static SourceCodeType getSourceCodeType(final S3BucketRepository s3BucketRepository,
final GitMetaData gitMetaData) {
val hasDiff = gitMetaData.getBeforeCommit() != null && gitMetaData.getAfterCommit() != null;
val eventInfo = hasDiff ? EventInfo.builder().name("push").build() :
EventInfo.builder().name("schedule").build();
val requestMetaData = RequestMetadata.builder().requestId(gitMetaData.getPullRequestId())
.eventInfo(eventInfo)
.requester(gitMetaData.getUserName())
.vendorName(VendorName.GIT_HUB)
.build();
if (hasDiff) {
val commitDiff = CommitDiffSourceCodeType.builder().sourceCommit(gitMetaData.getAfterCommit())
.destinationCommit(gitMetaData.getBeforeCommit())
.build();
val repoHead =
RepositoryHeadSourceCodeType.builder().branchName(gitMetaData.getCurrentBranch()).build();
return SourceCodeType.builder().s3BucketRepository(s3BucketRepository)
.commitDiff(commitDiff)
.repositoryHead(repoHead)
.requestMetadata(requestMetaData)
.build();
} else {
val repoHead =
RepositoryHeadSourceCodeType.builder().branchName(gitMetaData.getCurrentBranch()).build();
return SourceCodeType.builder().s3BucketRepository(s3BucketRepository)
.repositoryHead(repoHead)
.requestMetadata(requestMetaData)
.build();
}
}
private ScanAdapter() {
// do not instantiate
}
}
| 1,591 |
0 | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli/adapter/GitAdapter.java | package com.amazonaws.gurureviewercli.adapter;
import javax.annotation.Nonnull;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import lombok.val;
import org.eclipse.jgit.api.Git;
import org.eclipse.jgit.api.errors.GitAPIException;
import org.eclipse.jgit.lib.Constants;
import org.eclipse.jgit.lib.ObjectReader;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.revwalk.RevCommit;
import org.eclipse.jgit.revwalk.RevWalk;
import org.eclipse.jgit.storage.file.FileRepositoryBuilder;
import org.eclipse.jgit.treewalk.CanonicalTreeParser;
import org.eclipse.jgit.treewalk.TreeWalk;
import com.amazonaws.gurureviewercli.exceptions.GuruCliException;
import com.amazonaws.gurureviewercli.model.Configuration;
import com.amazonaws.gurureviewercli.model.ErrorCodes;
import com.amazonaws.gurureviewercli.model.GitMetaData;
import com.amazonaws.gurureviewercli.util.Log;
/**
* Util to sanity-check if a repo is a valid git repository that can be analyzed by CodeGuru.
*/
public final class GitAdapter {
private static final String GITHUB_UNKNOWN_COMMIT = "0000000000000000000000000000000000000000";
// this is the sha for an empty commit, so any diff against this will return the full repo content.
private static final String GITHUB_EMPTY_COMMIT_SHA = "4b825dc642cb6eb9a060e54bf8d69288fbee4904";
@Nonnull
public static GitMetaData getGitMetaData(final Configuration config, final Path pathToRepo) throws IOException {
val gitDir = pathToRepo.toRealPath().resolve(".git");
if (!gitDir.toFile().isDirectory()) {
// if the directory is not under version control, return a dummy object.
return GitMetaData.builder()
.repoRoot(pathToRepo)
.userName("nobody")
.currentBranch("unknown")
.build();
}
return tryGetMetaData(config, pathToRepo.toRealPath().resolve(".git"));
}
@Nonnull
protected static GitMetaData tryGetMetaData(final Configuration config, final Path gitDir) {
if (!gitDir.toFile().isDirectory()) {
throw new GuruCliException(ErrorCodes.GIT_INVALID_DIR);
}
val builder = new FileRepositoryBuilder();
try (val repository = builder.setGitDir(gitDir.toFile()).findGitDir().build()) {
val userName = repository.getConfig().getString("user", null, "email");
val urlString = repository.getConfig().getString("remote", "origin", "url");
val branchName = repository.getBranch();
if (branchName == null) {
throw new GuruCliException(ErrorCodes.GIT_BRANCH_MISSING);
}
val metadata = GitMetaData.builder()
.currentBranch(branchName)
.userName(userName)
.repoRoot(gitDir.getParent())
.remoteUrl(urlString)
.build();
metadata.setVersionedFiles(getChangedFiles(repository));
config.setVersionedFiles(metadata.getVersionedFiles());
if (config.getBeforeCommit() == null || config.getAfterCommit() == null) {
// ask if commits should be inferred or if the entire repo should be scanned.
Log.warn("CodeGuru will perform a full repository analysis if you do not provide a commit range.");
Log.warn("For pricing details see: https://aws.amazon.com/codeguru/pricing/");
val doPackageScan =
!config.isInteractiveMode() ||
config.getTextIO()
.newBooleanInputReader()
.withTrueInput("y")
.withFalseInput("n")
.read("Do you want to perform a full repository analysis?");
if (doPackageScan) {
return metadata;
} else {
throw new GuruCliException(ErrorCodes.USER_ABORT, "Use --commit-range to set a commit range");
}
}
validateCommits(config, repository);
metadata.setBeforeCommit(config.getBeforeCommit());
metadata.setAfterCommit(config.getAfterCommit());
return metadata;
} catch (IOException | GitAPIException e) {
throw new GuruCliException(ErrorCodes.GIT_INVALID_DIR, "Cannot read " + gitDir, e);
}
}
private static Collection<Path> getChangedFiles(final Repository repository) throws IOException {
val headCommitId = repository.resolve(Constants.HEAD);
if (headCommitId == null) {
return Collections.emptySet();
}
val rootDir = repository.getWorkTree().toPath();
RevWalk revWalk = new RevWalk(repository);
RevCommit commit = revWalk.parseCommit(headCommitId);
val treeWalk = new TreeWalk(repository);
treeWalk.addTree(commit.getTree());
treeWalk.setRecursive(false);
val allFiles = new HashSet<Path>();
while (treeWalk.next()) {
if (treeWalk.isSubtree()) {
treeWalk.enterSubtree();
} else {
val normalizedFile = rootDir.resolve(treeWalk.getPathString()).toFile().getCanonicalFile();
if (normalizedFile.isFile()) {
allFiles.add(normalizedFile.toPath());
}
}
}
return allFiles;
}
private static boolean validateCommits(final Configuration config, final Repository repo)
throws GitAPIException {
String beforeCommitSha = config.getBeforeCommit();
if (GITHUB_UNKNOWN_COMMIT.equals(config.getBeforeCommit())) {
beforeCommitSha = GITHUB_EMPTY_COMMIT_SHA;
}
val beforeTreeIter = treeForCommitId(repo, beforeCommitSha);
val afterTreeIter = treeForCommitId(repo, config.getAfterCommit());
// Resolve git constants, such as HEAD^^ to the actual commit hash
config.setBeforeCommit(resolveSha(repo, beforeCommitSha));
config.setAfterCommit(resolveSha(repo, config.getAfterCommit()));
val diffEntries = new Git(repo).diff().setOldTree(beforeTreeIter).setNewTree(afterTreeIter).call();
if (diffEntries.isEmpty()) {
throw new GuruCliException(ErrorCodes.GIT_EMPTY_DIFF, String.format("No difference between {} and {}",
beforeTreeIter, afterTreeIter));
}
return true;
}
private static String resolveSha(final Repository repo, final String commitName) {
try {
return repo.resolve(commitName).getName();
} catch (Throwable e) {
throw new GuruCliException(ErrorCodes.GIT_INVALID_COMMITS, "Invalid commit " + commitName);
}
}
private static CanonicalTreeParser treeForCommitId(final Repository repo, final String commitId) {
try (RevWalk walk = new RevWalk(repo)) {
val commit = walk.parseCommit(repo.resolve(commitId));
val treeId = commit.getTree().getId();
try (ObjectReader reader = repo.newObjectReader()) {
return new CanonicalTreeParser(null, reader, treeId);
}
} catch (NullPointerException e) {
throw new GuruCliException(ErrorCodes.GIT_INVALID_COMMITS, "Not a valid commit id " + commitId, e);
} catch (IOException e) {
throw new GuruCliException(ErrorCodes.GIT_INVALID_COMMITS, "Cannot parse commit id " + commitId, e);
}
}
private GitAdapter() {
// do not instantiate
}
}
| 1,592 |
0 | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli/adapter/AssociationAdapter.java | package com.amazonaws.gurureviewercli.adapter;
import java.util.concurrent.TimeUnit;
import lombok.val;
import software.amazon.awssdk.services.codegurureviewer.model.AssociateRepositoryRequest;
import software.amazon.awssdk.services.codegurureviewer.model.DescribeRepositoryAssociationRequest;
import software.amazon.awssdk.services.codegurureviewer.model.DescribeRepositoryAssociationResponse;
import software.amazon.awssdk.services.codegurureviewer.model.EncryptionOption;
import software.amazon.awssdk.services.codegurureviewer.model.KMSKeyDetails;
import software.amazon.awssdk.services.codegurureviewer.model.ListRepositoryAssociationsRequest;
import software.amazon.awssdk.services.codegurureviewer.model.ProviderType;
import software.amazon.awssdk.services.codegurureviewer.model.Repository;
import software.amazon.awssdk.services.codegurureviewer.model.RepositoryAssociation;
import software.amazon.awssdk.services.codegurureviewer.model.RepositoryAssociationState;
import software.amazon.awssdk.services.codegurureviewer.model.S3Repository;
import software.amazon.awssdk.services.s3.model.CreateBucketRequest;
import software.amazon.awssdk.services.s3.model.HeadBucketRequest;
import software.amazon.awssdk.services.s3.model.NoSuchBucketException;
import com.amazonaws.gurureviewercli.exceptions.GuruCliException;
import com.amazonaws.gurureviewercli.model.Configuration;
import com.amazonaws.gurureviewercli.model.ErrorCodes;
import com.amazonaws.gurureviewercli.util.Log;
/**
* Utility class to get or create a CodeGuru Reviewer Repository association.
*/
public final class AssociationAdapter {
private static final String BUCKET_NAME_PATTERN = "codeguru-reviewer-cli-%s-%s";
private static final long WAIT_TIME_IN_SECONDS = 1L;
/**
* Get or create a CodeGuru Repository Association (and, if necessary an S3 bucket).
*
* @param config The {@link Configuration} with name of repo, account, and region.
* @return A CodeGuru Repository association.
*/
public static RepositoryAssociation getAssociatedGuruRepo(final Configuration config) {
val guruFrontendService = config.getGuruFrontendService();
val repositoryAssociationsRequest =
ListRepositoryAssociationsRequest.builder()
.providerTypes(ProviderType.S3_BUCKET)
.names(config.getRepoName())
.build();
val associationResults = guruFrontendService.listRepositoryAssociations(repositoryAssociationsRequest);
if (associationResults.repositoryAssociationSummaries().size() == 1) {
val summary = associationResults.repositoryAssociationSummaries().get(0);
val describeAssociationRequest =
DescribeRepositoryAssociationRequest.builder().associationArn(summary.associationArn()).build();
val association =
guruFrontendService.describeRepositoryAssociation(describeAssociationRequest)
.repositoryAssociation();
if (!RepositoryAssociationState.ASSOCIATED.equals(association.state())) {
val msg = String.format("Repository association in unexpected state %s: %s",
association.state(),
association.stateReason());
throw new GuruCliException(ErrorCodes.ASSOCIATION_FAILED, msg);
}
if (config.getKeyId() != null &&
!config.getKeyId().equals(association.kmsKeyDetails().kmsKeyId())) {
val msg = String.format("Provided KMS Key alias %s for repository %s does "
+ "not match existing key: %s",
config.getKeyId(),
association.name(),
association.kmsKeyDetails().kmsKeyId());
throw new GuruCliException(ErrorCodes.ASSOCIATION_FAILED, msg);
}
if (config.getBucketName() != null &&
!config.getBucketName().equals(association.s3RepositoryDetails().bucketName())) {
val msg = String.format("Provided Bucket name %s for repository %s does "
+ "not match existing key: %s",
config.getBucketName(),
association.name(),
association.s3RepositoryDetails().bucketName());
throw new GuruCliException(ErrorCodes.ASSOCIATION_FAILED, msg);
}
return association;
} else if (associationResults.repositoryAssociationSummaries().isEmpty()) {
return createBucketAndAssociation(config);
} else {
throw new RuntimeException("Found more than one matching association: " + associationResults);
}
}
private static RepositoryAssociation createBucketAndAssociation(final Configuration config) {
final String bucketName;
if (config.getBucketName() != null) {
bucketName = config.getBucketName();
} else {
bucketName = String.format(BUCKET_NAME_PATTERN, config.getAccountId(), config.getRegion());
}
try {
config.getS3Client().headBucket(HeadBucketRequest.builder().bucket(bucketName).build());
} catch (NoSuchBucketException e) {
Log.info("CodeGuru Reviewer requires an S3 bucket to upload the analysis artifacts to.");
val createBucket =
!config.isInteractiveMode() ||
config.getTextIO()
.newBooleanInputReader()
.withTrueInput("y")
.withFalseInput("n")
.read("Do you want to create a new S3 bucket: " + bucketName, bucketName);
if (createBucket) {
Log.info("Creating new bucket: %s", bucketName);
config.getS3Client().createBucket(CreateBucketRequest.builder().bucket(bucketName).build());
} else {
throw new GuruCliException(ErrorCodes.USER_ABORT, "CodeGuru needs an S3 bucket to continue.");
}
}
val repository = Repository.builder()
.s3Bucket(S3Repository.builder()
.bucketName(bucketName)
.name(config.getRepoName())
.build())
.build();
AssociateRepositoryRequest associateRequest;
if (config.getKeyId() != null) {
val keyDetails = KMSKeyDetails.builder()
.encryptionOption(EncryptionOption.CUSTOMER_MANAGED_CMK)
.kmsKeyId(config.getKeyId())
.build();
associateRequest = AssociateRepositoryRequest.builder()
.repository(repository)
.kmsKeyDetails(keyDetails)
.build();
} else {
associateRequest = AssociateRepositoryRequest.builder().repository(repository).build();
}
val associateResponse = config.getGuruFrontendService().associateRepository(associateRequest);
val associationArn = associateResponse.repositoryAssociation().associationArn();
Log.print("Creating association ");
DescribeRepositoryAssociationRequest associationRequest =
DescribeRepositoryAssociationRequest.builder().associationArn(associationArn).build();
DescribeRepositoryAssociationResponse associationResponse =
config.getGuruFrontendService().describeRepositoryAssociation(associationRequest);
while (associationResponse != null) {
val association = associationResponse.repositoryAssociation();
if (RepositoryAssociationState.ASSOCIATED.equals(association.state())) {
Log.println(" done");
Log.print("Created new repository association: ");
Log.awsUrl("?region=%s#/ciworkflows/associationdetails/%s", config.getRegion(),
association.associationArn());
return association;
} else if (RepositoryAssociationState.ASSOCIATING.equals(association.state())) {
Log.print(".");
try {
Thread.sleep(TimeUnit.SECONDS.toMillis(WAIT_TIME_IN_SECONDS));
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
} else {
val msg = String.format("Repository association in unexpected state %s: %s",
association.state(),
association.stateReason());
throw new GuruCliException(ErrorCodes.ASSOCIATION_FAILED, msg);
}
associationResponse = config.getGuruFrontendService().describeRepositoryAssociation(associationRequest);
}
throw new GuruCliException(ErrorCodes.ASSOCIATION_FAILED, "Unexpected error during association");
}
private AssociationAdapter() {
// do not instantiate
}
}
| 1,593 |
0 | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli/adapter/ResultsAdapter.java | package com.amazonaws.gurureviewercli.adapter;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import lombok.val;
import org.commonmark.node.Node;
import org.commonmark.parser.Parser;
import org.commonmark.renderer.html.HtmlRenderer;
import software.amazon.awssdk.services.codegurureviewer.model.RecommendationSummary;
import com.amazonaws.gurureviewercli.model.ScanMetaData;
import com.amazonaws.gurureviewercli.util.JsonUtil;
import com.amazonaws.gurureviewercli.util.Log;
import com.amazonaws.gurureviewercli.util.SarifConverter;
/**
* Util to save Guru recommendations to disk and convert them to HTML.
*/
public final class ResultsAdapter {
public static void saveResults(final Path outputDir,
final List<RecommendationSummary> results,
final ScanMetaData scanMetaData) throws IOException {
val jsonFile = outputDir.resolve("recommendations.json");
JsonUtil.storeRecommendations(results, jsonFile);
Log.info("Recommendations in Json format written to:%n%s", jsonFile.normalize().toUri());
val sarifFile = outputDir.resolve("recommendations.sarif.json");
JsonUtil.writeSarif(SarifConverter.createSarifReport(results), sarifFile);
Log.info("Recommendations in SARIF format written to:%n%s", sarifFile.normalize().toUri());
createHtmlReport(outputDir, scanMetaData, results);
}
private static void createHtmlReport(final Path outputDir,
final ScanMetaData scanMetaData,
final List<RecommendationSummary> recommendations) throws IOException {
int validFindings = 0;
// sort by file name and line number
sortByFileName(recommendations);
Parser parser = Parser.builder().build();
HtmlRenderer renderer = HtmlRenderer.builder().build();
val htmlFile = outputDir.resolve("codeguru-report.html");
try (OutputStreamWriter writer =
new OutputStreamWriter(new FileOutputStream(htmlFile.toFile()), StandardCharsets.UTF_8)) {
writer.write("<!DOCTYPE html>\n<html lang=\"en\">\n");
writer.write("<body>\n");
writer.write("<h2>CodeGuru Reviewer Recommendations</h2>\n");
val awsUrlPrfix = "https://console.aws.amazon.com/codeguru/reviewer";
val associationUrl = String.format("%s?region=%s#/ciworkflows/associationdetails/%s",
awsUrlPrfix, scanMetaData.getRegion(), scanMetaData.getAssociationArn());
val scanUrl = String.format("%s?region=%s#/codereviews/details/%s",
awsUrlPrfix, scanMetaData.getRegion(), scanMetaData.getCodeReviewArn());
writer.write(renderer.render(parser.parse(String.format("**CodeGuru Repository ARN**: [%s](%s)%n",
scanMetaData.getAssociationArn(),
associationUrl))));
writer.write(renderer.render(parser.parse(String.format("**CodeGuru Scan ARN**: [%s](%s)%n",
scanMetaData.getCodeReviewArn(),
scanUrl))));
writer.write("\n<br/><hr style=\"width:90%\"><br/>\n");
for (val recommendation : recommendations) {
val filePath = scanMetaData.getRepositoryRoot().resolve(recommendation.filePath()).toAbsolutePath();
if (filePath == null || !filePath.toFile().isFile()) {
if (filePath != null && !(filePath.endsWith(".") || filePath.endsWith("/"))) {
Log.warn("Dropping finding because file not found on disk: %s", filePath);
}
continue;
}
validFindings++;
String lineMsg;
if (!recommendation.startLine().equals(recommendation.endLine())
&& recommendation.endLine() != null) {
lineMsg = String.format("### In: [%s](%s) L%d %n",
filePath, filePath.toUri(),
recommendation.startLine());
} else {
lineMsg = String.format("### In: [%s](%s) L%d - L%d %n",
filePath, filePath.toUri(),
recommendation.startLine(),
recommendation.endLine());
}
Node document = parser.parse(String.format("### In: [%s](%s) L%d %n",
filePath, filePath.toUri(),
recommendation.startLine()));
writer.write(renderer.render(document));
document = parser.parse("**Issue:** " + recommendation.description());
writer.write(renderer.render(document));
writer.write(String.format("<p><strong>Severity:</strong> %s<p/>", recommendation.severity()));
if (recommendation.ruleMetadata() != null && recommendation.ruleMetadata().ruleId() != null) {
val manifest = recommendation.ruleMetadata();
writer.write(String.format("<p><strong>Rule ID:</strong> %s<p/>", manifest.ruleId()));
writer.write(String.format("<p><strong>Rule Name:</strong> %s<p/>", manifest.ruleName()));
document = parser.parse("**Description:** " + manifest.longDescription());
writer.write(renderer.render(document));
if (manifest.ruleTags() != null && !manifest.ruleTags().isEmpty()) {
val mdList = manifest.ruleTags().stream()
.map(s -> String.format("- %s%n", s))
.collect(Collectors.joining());
document = parser.parse("**Tags:**\n" + mdList);
writer.write(renderer.render(document));
}
}
writer.write("\n<hr style=\"width:80%\">\n");
}
writer.write("</body>\n");
writer.write("</html>\n");
}
Log.info("Report with %d recommendations written to:%n%s", validFindings, htmlFile.normalize().toUri());
}
private static void sortByFileName(final List<RecommendationSummary> recommendations) {
Collections.sort(recommendations, (o1, o2) -> {
int pathComp = o1.filePath().compareTo(o2.filePath());
if (pathComp == 0) {
return o1.startLine().compareTo(o2.startLine());
}
return pathComp;
});
}
private ResultsAdapter() {
// do not instantiate
}
}
| 1,594 |
0 | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli/exceptions/GuruCliException.java | package com.amazonaws.gurureviewercli.exceptions;
import lombok.Getter;
import com.amazonaws.gurureviewercli.model.ErrorCodes;
public class GuruCliException extends RuntimeException {
public GuruCliException(final ErrorCodes errorCode) {
this.errorCode = errorCode;
}
public GuruCliException(final ErrorCodes errorCode, final String msg) {
super(msg);
this.errorCode = errorCode;
}
public GuruCliException(final ErrorCodes errorCode, final String msg, final Throwable cause) {
super(msg, cause);
this.errorCode = errorCode;
}
@Getter
private ErrorCodes errorCode;
}
| 1,595 |
0 | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli/model/Recommendation.java | package com.amazonaws.gurureviewercli.model;
import java.util.List;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.val;
import software.amazon.awssdk.services.codegurureviewer.model.RecommendationSummary;
/**
* Serializable recommendation class
*/
@Data
@NoArgsConstructor
public class Recommendation {
private String filePath;
private String recommendationId;
private Integer startLine;
private Integer endLine;
private String description;
private String recommendationCategory;
private RuleMetadata ruleMetadata;
private String severity;
@Data
public static final class RuleMetadata {
private String ruleId;
private String ruleName;
private String shortDescription;
private String longDescription;
private List<String> ruleTags;
}
public RecommendationSummary toRecommendationSummary() {
val rm = software.amazon.awssdk.services.codegurureviewer.model.
RuleMetadata.builder()
.ruleId(ruleMetadata.ruleId)
.longDescription(ruleMetadata.longDescription)
.shortDescription(ruleMetadata.shortDescription)
.ruleName(ruleMetadata.ruleName)
.ruleTags(ruleMetadata.ruleTags)
.build();
return RecommendationSummary.builder()
.description(description)
.recommendationId(recommendationId)
.recommendationCategory(recommendationCategory)
.filePath(filePath)
.startLine(startLine)
.endLine(endLine)
.severity(severity)
.ruleMetadata(rm)
.build();
}
}
| 1,596 |
0 | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli/model/Configuration.java | package com.amazonaws.gurureviewercli.model;
import javax.annotation.Nullable;
import java.nio.file.Path;
import java.util.Collection;
import lombok.Builder;
import lombok.Data;
import org.beryx.textio.TextIO;
import software.amazon.awssdk.services.codegurureviewer.CodeGuruReviewerClient;
import software.amazon.awssdk.services.s3.S3Client;
/**
* Class to hold all shared configuration data. This object is mutable and information is added as it becomes
* available.
*/
@Data
@Builder
public class Configuration {
private boolean interactiveMode;
private CodeGuruReviewerClient guruFrontendService;
private S3Client s3Client;
private String accountId;
private String region;
private String repoName;
private String keyId;
private Path rootDir;
private TextIO textIO;
private String bucketName;
private @Nullable
String beforeCommit;
private @Nullable
String afterCommit;
private @Nullable
Collection<Path> versionedFiles;
}
| 1,597 |
0 | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli/model/GitMetaData.java | package com.amazonaws.gurureviewercli.model;
import javax.annotation.Nullable;
import java.nio.file.Path;
import java.util.Collection;
import lombok.Builder;
import lombok.Data;
/**
* Metadata collected about the analyzed git repo.
*/
@Builder
@Data
public class GitMetaData {
private Path repoRoot;
private String userName;
private String currentBranch;
@Builder.Default
private String pullRequestId = "0";
private @Nullable String remoteUrl;
private @Nullable String beforeCommit;
private @Nullable String afterCommit;
private @Nullable Collection<Path> versionedFiles;
}
| 1,598 |
0 | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli | Create_ds/aws-codeguru-cli/src/main/java/com/amazonaws/gurureviewercli/model/ErrorCodes.java | package com.amazonaws.gurureviewercli.model;
import lombok.Getter;
/**
* Error Codes for the CLI.
*/
public enum ErrorCodes {
ASSOCIATION_FAILED("Failed to associate with CodeGuru"),
GIT_INVALID_DIR("Invalid Git Directory"),
GIT_BRANCH_MISSING("Cannot determine Git branch"),
DIR_NOT_FOUND("Provided path is not a valid directory"),
GIT_INVALID_COMMITS("Not a valid commit"),
GIT_EMPTY_DIFF("Git Diff is empty"),
AWS_INIT_ERROR("Failed to initialize AWS API"),
BAD_BUCKET_NAME("CodeGuru Reviewer expects bucket names to start with codeguru-reviewer-"),
USER_ABORT("Abort");
@Getter
final String errorMessage;
ErrorCodes(String msg) {
this.errorMessage = msg;
}
@Override
public String toString() {
return errorMessage;
}
}
| 1,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.