index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation/service/AggregatingJobManagementServiceHelper.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.federation.service;
import java.util.function.BiConsumer;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import com.netflix.titus.common.util.rx.ReactorExt;
import com.netflix.titus.federation.startup.GrpcConfiguration;
import com.netflix.titus.grpc.protogen.Job;
import com.netflix.titus.grpc.protogen.JobId;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc.JobManagementServiceStub;
import io.grpc.stub.AbstractStub;
import io.grpc.stub.StreamObserver;
import reactor.core.publisher.Mono;
import rx.Observable;
import static com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil.createWrappedStub;
@Singleton
public class AggregatingJobManagementServiceHelper {
private final AggregatingCellClient aggregatingCellClient;
private final GrpcConfiguration grpcConfiguration;
@Inject
public AggregatingJobManagementServiceHelper(AggregatingCellClient aggregatingCellClient,
GrpcConfiguration grpcConfiguration) {
this.aggregatingCellClient = aggregatingCellClient;
this.grpcConfiguration = grpcConfiguration;
}
private <STUB extends AbstractStub<STUB>> STUB wrap(STUB stub, CallMetadata callMetadata) {
return createWrappedStub(stub, callMetadata, grpcConfiguration.getRequestTimeoutMs());
}
public Observable<CellResponse<JobManagementServiceStub, Job>> findJobInAllCells(String jobId, CallMetadata callMetadata) {
return aggregatingCellClient.callExpectingErrors(JobManagementServiceGrpc::newStub, findJobInCell(jobId, callMetadata))
.reduce(ResponseMerger.singleValue())
.flatMap(response -> response.getResult()
.map(v -> Observable.just(CellResponse.ofValue(response)))
.onErrorGet(Observable::error)
);
}
public Mono<CellResponse<JobManagementServiceStub, Job>> findJobInAllCellsReact(String jobId, CallMetadata callMetadata) {
return ReactorExt.toMono(findJobInAllCells(jobId, callMetadata).toSingle());
}
public ClientCall<Job> findJobInCell(String jobId, CallMetadata callMetadata) {
JobId id = JobId.newBuilder().setId(jobId).build();
return (client, streamObserver) -> wrap(client, callMetadata).findJob(id, streamObserver);
}
public interface ClientCall<T> extends BiConsumer<JobManagementServiceStub, StreamObserver<T>> {
// generics sanity
}
}
| 9,900 |
0 | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation/service | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation/service/router/RoutingRuleSelector.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.federation.service.router;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.regex.Pattern;
import com.netflix.titus.api.federation.model.Cell;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.Evaluators;
import com.netflix.titus.federation.service.CellInfoResolver;
import com.netflix.titus.federation.service.CellInfoUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class RoutingRuleSelector {
private static final Logger logger = LoggerFactory.getLogger(RoutingRuleSelector.class);
private final Function<String, Map<Cell, Pattern>> compileRoutingPatterns;
private final Supplier<String> routingRulesSupplier;
RoutingRuleSelector(CellInfoResolver cellInfoResolver, Supplier<String> routingRulesSupplier) {
compileRoutingPatterns = Evaluators.memoizeLast((spec, lastCompiledPatterns) -> {
logger.info("Detected new routing rules, compiling them: {}", spec);
try {
List<Cell> cells = cellInfoResolver.resolve();
Map<Cell, String> cellRoutingRules = CellInfoUtil.extractCellRoutingFromCellSpecification(cells, spec);
return CollectionsExt.mapValues(cellRoutingRules, Pattern::compile, LinkedHashMap::new);
} catch (RuntimeException e) {
logger.error("Bad cell routing spec, ignoring: {}", spec);
return lastCompiledPatterns.orElseThrow(() -> e /* there is nothing to do if the first spec is bad */);
}
});
this.routingRulesSupplier = routingRulesSupplier;
// ensure the initial spec can be compiled or fail fast
compileRoutingPatterns.apply(routingRulesSupplier.get());
}
Optional<Cell> select(String routeKey, Predicate<Cell> filter) {
Map<Cell, Pattern> cellRoutingPatterns = compileRoutingPatterns.apply(routingRulesSupplier.get());
return cellRoutingPatterns.entrySet().stream()
.filter(entry -> filter.test(entry.getKey()))
.filter(entry -> entry.getValue().matcher(routeKey).matches())
.findFirst()
.map(Map.Entry::getKey);
}
}
| 9,901 |
0 | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation/service | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation/service/router/FallbackCellRouter.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.federation.service.router;
import java.util.Optional;
import com.netflix.titus.api.federation.model.Cell;
import com.netflix.titus.federation.service.CellInfoResolver;
import com.netflix.titus.grpc.protogen.JobDescriptor;
public class FallbackCellRouter implements CellRouter {
private final CellInfoResolver cellInfoResolver;
public FallbackCellRouter(CellInfoResolver cellInfoResolver) {
this.cellInfoResolver = cellInfoResolver;
}
@Override
public Optional<Cell> routeKey(JobDescriptor jobDescriptor) {
return Optional.of(cellInfoResolver.getDefault());
}
}
| 9,902 |
0 | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation/service | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation/service/router/CellRouter.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.federation.service.router;
import java.util.Optional;
import com.netflix.titus.api.federation.model.Cell;
import com.netflix.titus.grpc.protogen.JobDescriptor;
/**
* CellRouter determines which cell to route a request to amongst a set of Cells.
*/
public interface CellRouter {
/**
* routeKey is a blocking call that returns the Cell to use based on the {@link JobDescriptor}.
* The key should correspond to what the routing implementation expects, e.g., Job IDs
* or Capacity Group names.
*
* @param jobDescriptor
* @return Cell
*/
Optional<Cell> routeKey(JobDescriptor jobDescriptor);
}
| 9,903 |
0 | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation/service | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation/service/router/ChainCellRouter.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.federation.service.router;
import java.util.List;
import java.util.Optional;
import com.netflix.titus.api.federation.model.Cell;
import com.netflix.titus.grpc.protogen.JobDescriptor;
public class ChainCellRouter implements CellRouter {
private final List<CellRouter> cellRouters;
public ChainCellRouter(List<CellRouter> cellRouters) {
this.cellRouters = cellRouters;
}
@Override
public Optional<Cell> routeKey(JobDescriptor jobDescriptor) {
for (CellRouter cellRouter : cellRouters) {
Optional<Cell> result = cellRouter.routeKey(jobDescriptor);
if (result.isPresent()) {
return result;
}
}
return Optional.empty();
}
}
| 9,904 |
0 | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation/service | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation/service/router/SpecialInstanceTypeRouter.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.federation.service.router;
import java.util.Optional;
import java.util.function.Function;
import com.netflix.titus.api.federation.model.Cell;
import com.netflix.titus.federation.service.CellInfoResolver;
import com.netflix.titus.federation.startup.TitusFederationConfiguration;
import com.netflix.titus.grpc.protogen.JobDescriptor;
/**
* {@link CellRouter} for special instance types, like GPU instances. With this selector the specialized and expensive
* instance types can be centralized in one cell, and all jobs that require these resources can be routed to it
* irrespective of to which application they belong to.
* <br/>
* If a job requires specialized resources, a cell which provides these resources is returned. If a job does not
* require specialized resources or none of the cells provide it, the result is {@link Optional#empty()}.
*/
public class SpecialInstanceTypeRouter implements CellRouter {
private static final String REGULAR = "regularInstanceType";
private static final String SPECIAL_INSTANCE_GPU = "gpu";
private final Function<JobDescriptor, String> instanceTypeRouteKeyResolver;
private final RoutingRuleSelector selector;
public SpecialInstanceTypeRouter(CellInfoResolver cellInfoResolver,
Function<JobDescriptor, String> instanceTypeRouteKeyResolver,
TitusFederationConfiguration federationConfiguration) {
this.instanceTypeRouteKeyResolver = instanceTypeRouteKeyResolver;
this.selector = new RoutingRuleSelector(cellInfoResolver, federationConfiguration::getInstanceTypeRoutingRules);
}
@Override
public Optional<Cell> routeKey(JobDescriptor jobDescriptor) {
return selector.select(instanceTypeRouteKeyResolver.apply(jobDescriptor), c -> true);
}
public static SpecialInstanceTypeRouter getGpuInstanceTypeRouter(CellInfoResolver cellInfoResolver,
TitusFederationConfiguration federationConfiguration) {
return new SpecialInstanceTypeRouter(
cellInfoResolver,
jobDescriptor -> jobDescriptor.getContainer().getResources().getGpu() > 0 ? SPECIAL_INSTANCE_GPU : REGULAR,
federationConfiguration
);
}
}
| 9,905 |
0 | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation/service | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation/service/router/ApplicationCellRouter.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.federation.service.router;
import java.util.Optional;
import java.util.Set;
import com.netflix.titus.api.federation.model.Cell;
import com.netflix.titus.api.jobmanager.JobAttributes;
import com.netflix.titus.common.util.StringExt;
import com.netflix.titus.federation.service.CellInfoResolver;
import com.netflix.titus.federation.startup.TitusFederationConfiguration;
import com.netflix.titus.grpc.protogen.JobDescriptor;
/**
* Route using the application name or a capacity group.
*/
public class ApplicationCellRouter implements CellRouter {
// The key used to route jobs if no other key could be determined.
private static final String DEFAULT_ROUTE_KEY = "DEFAULT_FEDERATION_ROUTE_KEY";
private final CellInfoResolver cellInfoResolver;
private final RoutingRuleSelector selector;
public ApplicationCellRouter(CellInfoResolver cellInfoResolver, TitusFederationConfiguration federationConfiguration) {
this.cellInfoResolver = cellInfoResolver;
this.selector = new RoutingRuleSelector(cellInfoResolver, federationConfiguration::getRoutingRules);
}
/**
* routeKeyFor is a memory-based, blocking call that extracts a Cell route key from a JobDescriptor.
*
* @param jobDescriptor
* @return key used for finding a suitable cell from configured routing rules
*/
private static String routeKeyFor(JobDescriptor jobDescriptor) {
if (!jobDescriptor.getCapacityGroup().isEmpty()) {
return jobDescriptor.getCapacityGroup();
}
if (!jobDescriptor.getApplicationName().isEmpty()) {
return jobDescriptor.getApplicationName();
}
return DEFAULT_ROUTE_KEY;
}
/**
* Iterate each of the regular expressions in the order they are defined, and returns the first match it encounters.
* If no match, defaults to {@link CellInfoResolver#getDefault()}.
*
* @param jobDescriptor
*/
@Override
public Optional<Cell> routeKey(JobDescriptor jobDescriptor) {
Optional<Cell> pinnedCell = getCellPinnedToJob(jobDescriptor);
if (pinnedCell.isPresent()) {
return pinnedCell;
}
String routeKey = routeKeyFor(jobDescriptor);
Set<String> antiAffinityNames = StringExt.splitByCommaIntoSet(
jobDescriptor.getAttributesMap().get(JobAttributes.JOB_PARAMETER_ATTRIBUTES_CELL_AVOID)
);
Optional<Cell> found = selector.select(routeKey, cell -> !antiAffinityNames.contains(cell.getName()));
if (found.isPresent()) {
return found;
}
// fallback to any cell that is not in the anti affinity list, or the default when all available cells are rejected
return cellInfoResolver.resolve().stream()
.filter(cell -> !antiAffinityNames.contains(cell.getName()))
.findAny();
}
private Optional<Cell> getCellPinnedToJob(JobDescriptor jobDescriptor) {
String requestedCell = jobDescriptor.getAttributesMap().get(JobAttributes.JOB_PARAMETER_ATTRIBUTES_CELL_REQUEST);
if (StringExt.isEmpty(requestedCell)) {
return Optional.empty();
}
return cellInfoResolver.resolve().stream()
.filter(cell -> cell.getName().equals(requestedCell))
.findAny();
}
}
| 9,906 |
0 | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation/startup/TitusFederationRuntimeComponent.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.federation.startup;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.spectator.api.Registry;
import com.netflix.titus.common.jhiccup.JHiccupComponent;
import com.netflix.titus.common.environment.MyEnvironment;
import com.netflix.titus.common.environment.MyEnvironments;
import com.netflix.titus.common.runtime.SystemAbortListener;
import com.netflix.titus.common.runtime.SystemLogService;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.internal.DefaultTitusRuntime;
import com.netflix.titus.common.runtime.internal.LoggingSystemAbortListener;
import com.netflix.titus.common.runtime.internal.LoggingSystemLogService;
import com.netflix.titus.common.util.archaius2.Archaius2Ext;
import com.netflix.titus.common.util.code.CodeInvariants;
import com.netflix.titus.common.util.code.CompositeCodeInvariants;
import com.netflix.titus.common.util.code.LoggingCodeInvariants;
import com.netflix.titus.common.util.code.SpectatorCodeInvariants;
import com.netflix.titus.runtime.connector.common.reactor.GrpcToReactorClientFactoryComponent;
import com.netflix.titus.runtime.connector.common.reactor.GrpcToReactorServerFactoryComponent;
import com.netflix.titus.runtime.endpoint.metadata.CallMetadataResolveComponent;
import com.netflix.titus.runtime.endpoint.rest.RestAddOnsComponent;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Import;
import org.springframework.core.env.Environment;
@Configuration
@Import({
JHiccupComponent.class,
CallMetadataResolveComponent.class,
GrpcToReactorClientFactoryComponent.class,
GrpcToReactorServerFactoryComponent.class,
RestAddOnsComponent.class,
})
public class TitusFederationRuntimeComponent {
@Bean
public Registry getRegistry() {
return new DefaultRegistry();
}
@Bean
public SystemLogService getSystemLogService() {
return LoggingSystemLogService.getInstance();
}
@Bean
public SystemAbortListener getSystemAbortListener() {
return LoggingSystemAbortListener.getDefault();
}
@Bean
public TitusRuntime getTitusRuntime(Environment environment, SystemLogService systemLogService, SystemAbortListener systemAbortListener, Registry registry) {
CodeInvariants codeInvariants = new CompositeCodeInvariants(
LoggingCodeInvariants.getDefault(),
new SpectatorCodeInvariants(registry.createId("titus.runtime.invariant.violations"), registry)
);
return new DefaultTitusRuntime(MyEnvironments.newSpring(environment), codeInvariants, systemLogService, true, systemAbortListener, registry);
}
@Bean
public TitusFederationConfiguration getTitusFederationConfiguration(MyEnvironment environment) {
return Archaius2Ext.newConfiguration(TitusFederationConfiguration.class, environment);
}
@Bean
public GrpcConfiguration getGrpcConfiguration(MyEnvironment environment) {
return Archaius2Ext.newConfiguration(GrpcConfiguration.class, environment);
}
}
| 9,907 |
0 | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation/startup/TitusFederationConfiguration.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.federation.startup;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
@Configuration(prefix = "titus.federation")
public interface TitusFederationConfiguration {
@DefaultValue("hostName1:7001")
String getRemoteFederation();
@DefaultValue("vpcService:7001")
String getVpcService();
@DefaultValue("cell1=hostName1:7001;cell2=hostName2:7002")
String getCells();
@DefaultValue("dev")
String getStack();
@DefaultValue("cell1=(app1.*|app2.*);cell2=(.*)")
String getRoutingRules();
@DefaultValue("cell1=(gpu.*)")
String getInstanceTypeRoutingRules();
@DefaultValue("false")
boolean isFederationJobIdCreationEnabled();
@DefaultValue("false")
boolean isRemoteFederationEnabled();
}
| 9,908 |
0 | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation/startup/TitusFederationMain.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.federation.startup;
import com.netflix.archaius.guice.ArchaiusModule;
import com.netflix.governator.InjectorBuilder;
import com.netflix.governator.guice.jetty.Archaius2JettyModule;
public class TitusFederationMain {
public static void main(String[] args) throws Exception {
InjectorBuilder.fromModules(
new TitusFederationModule(),
new Archaius2JettyModule(),
new ArchaiusModule() {
@Override
protected void configureArchaius() {
bindApplicationConfigurationOverrideResource("laptop");
}
})
.createInjector()
.awaitTermination();
}
}
| 9,909 |
0 | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation/startup/TitusFederationModule.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.federation.startup;
import java.util.Arrays;
import javax.inject.Singleton;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
import com.netflix.archaius.ConfigProxyFactory;
import com.netflix.archaius.api.Config;
import com.netflix.governator.guice.jersey.GovernatorJerseySupportModule;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.spectator.api.Registry;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import com.netflix.titus.api.model.callmetadata.CallMetadataConstants;
import com.netflix.titus.common.environment.MyEnvironments;
import com.netflix.titus.common.runtime.SystemAbortListener;
import com.netflix.titus.common.runtime.SystemLogService;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.internal.DefaultTitusRuntime;
import com.netflix.titus.common.runtime.internal.LoggingSystemAbortListener;
import com.netflix.titus.common.runtime.internal.LoggingSystemLogService;
import com.netflix.titus.common.util.archaius2.Archaius2ConfigurationLogger;
import com.netflix.titus.common.util.code.CodeInvariants;
import com.netflix.titus.common.util.code.CompositeCodeInvariants;
import com.netflix.titus.common.util.code.LoggingCodeInvariants;
import com.netflix.titus.common.util.code.SpectatorCodeInvariants;
import com.netflix.titus.common.util.grpc.reactor.GrpcToReactorServerFactory;
import com.netflix.titus.common.util.grpc.reactor.server.DefaultGrpcToReactorServerFactory;
import com.netflix.titus.common.util.guice.ContainerEventBusModule;
import com.netflix.titus.federation.endpoint.FederationEndpointModule;
import com.netflix.titus.federation.service.*;
import com.netflix.titus.federation.service.router.ApplicationCellRouter;
import com.netflix.titus.federation.service.router.CellRouter;
import com.netflix.titus.federation.service.router.ChainCellRouter;
import com.netflix.titus.federation.service.router.FallbackCellRouter;
import com.netflix.titus.federation.service.router.SpecialInstanceTypeRouter;
import com.netflix.titus.runtime.TitusEntitySanitizerModule;
import com.netflix.titus.runtime.endpoint.metadata.CallMetadataResolver;
import com.netflix.titus.runtime.endpoint.resolver.HostCallerIdResolver;
import com.netflix.titus.runtime.endpoint.resolver.NoOpHostCallerIdResolver;
public class TitusFederationModule extends AbstractModule {
@Override
protected void configure() {
bind(Archaius2ConfigurationLogger.class).asEagerSingleton();
bind(Registry.class).toInstance(new DefaultRegistry());
bind(SystemLogService.class).to(LoggingSystemLogService.class);
bind(SystemAbortListener.class).to(LoggingSystemAbortListener.class);
install(new GovernatorJerseySupportModule());
install(new ContainerEventBusModule());
install(new TitusEntitySanitizerModule());
bind(HostCallerIdResolver.class).to(NoOpHostCallerIdResolver.class);
bind(CellConnector.class).to(DefaultCellConnector.class);
bind(RemoteFederationConnector.class).to(DefaultRemoteFederationConnector.class);
bind(CellWebClientConnector.class).to(DefaultCellWebClientConnector.class);
bind(WebClientFactory.class).toInstance(SimpleWebClientFactory.getInstance());
bind(CellInfoResolver.class).to(DefaultCellInfoResolver.class);
bind(RemoteFederationInfoResolver.class).to(DefaultRemoteFederationInfoResolver.class);
bind(VpcServiceConnector.class).to(DefaultVpcServiceConnector.class);
install(new FederationEndpointModule());
install(new ServiceModule());
}
@Provides
@Singleton
public TitusFederationConfiguration getConfiguration(ConfigProxyFactory factory) {
return factory.newProxy(TitusFederationConfiguration.class);
}
@Provides
@Singleton
public TitusRuntime getTitusRuntime(Config config, SystemLogService systemLogService, SystemAbortListener systemAbortListener, Registry registry) {
CodeInvariants codeInvariants = new CompositeCodeInvariants(
LoggingCodeInvariants.getDefault(),
new SpectatorCodeInvariants(registry.createId("titus.runtime.invariant.violations"), registry)
);
return new DefaultTitusRuntime(MyEnvironments.newArchaius(config), codeInvariants, systemLogService, false, systemAbortListener, registry);
}
@Provides
@Singleton
public GrpcConfiguration getGrpcConfiguration(ConfigProxyFactory factory) {
return factory.newProxy(GrpcConfiguration.class);
}
@Provides
@Singleton
public GrpcToReactorServerFactory getGrpcToReactorServerFactory(CallMetadataResolver callMetadataResolver) {
return new DefaultGrpcToReactorServerFactory<>(
CallMetadata.class,
() -> callMetadataResolver.resolve().orElse(CallMetadataConstants.UNDEFINED_CALL_METADATA)
);
}
@Provides
@Singleton
public CellRouter getCellRouter(CellInfoResolver cellInfoResolver, TitusFederationConfiguration federationConfiguration) {
return new ChainCellRouter(Arrays.asList(
SpecialInstanceTypeRouter.getGpuInstanceTypeRouter(cellInfoResolver, federationConfiguration),
new ApplicationCellRouter(cellInfoResolver, federationConfiguration),
new FallbackCellRouter(cellInfoResolver)
));
}
}
| 9,910 |
0 | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation/startup/TitusFederationComponent.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.federation.startup;
import java.util.Arrays;
import com.netflix.titus.federation.endpoint.FederationEndpointComponent;
import com.netflix.titus.federation.service.AggregatingJobServiceGateway;
import com.netflix.titus.federation.service.CellInfoResolver;
import com.netflix.titus.federation.service.DefaultCellConnector;
import com.netflix.titus.federation.service.DefaultCellInfoResolver;
import com.netflix.titus.federation.service.DefaultCellWebClientConnector;
import com.netflix.titus.federation.service.FallbackJobServiceGateway;
import com.netflix.titus.federation.service.JobActivityServiceComponent;
import com.netflix.titus.federation.service.RemoteJobServiceGateway;
import com.netflix.titus.federation.service.ServiceComponent;
import com.netflix.titus.federation.service.SimpleWebClientFactory;
import com.netflix.titus.federation.service.WebClientFactory;
import com.netflix.titus.federation.service.router.ApplicationCellRouter;
import com.netflix.titus.federation.service.router.CellRouter;
import com.netflix.titus.federation.service.router.ChainCellRouter;
import com.netflix.titus.federation.service.router.FallbackCellRouter;
import com.netflix.titus.federation.service.router.SpecialInstanceTypeRouter;
import com.netflix.titus.runtime.TitusEntitySanitizerComponent;
import com.netflix.titus.runtime.endpoint.resolver.HostCallerIdResolver;
import com.netflix.titus.runtime.endpoint.resolver.NoOpHostCallerIdResolver;
import com.netflix.titus.runtime.jobmanager.gateway.JobServiceGateway;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Import;
@Configuration
@Import({
TitusEntitySanitizerComponent.class,
DefaultCellInfoResolver.class,
DefaultCellConnector.class,
DefaultCellWebClientConnector.class,
ServiceComponent.class,
FederationEndpointComponent.class,
JobActivityServiceComponent.class
})
public class TitusFederationComponent {
@Bean
public HostCallerIdResolver getHostCallerIdResolver() {
return NoOpHostCallerIdResolver.getInstance();
}
@Bean
public WebClientFactory getWebClientFactory() {
return SimpleWebClientFactory.getInstance();
}
@Bean
public CellRouter getCellRouter(CellInfoResolver cellInfoResolver, TitusFederationConfiguration federationConfiguration) {
return new ChainCellRouter(Arrays.asList(
SpecialInstanceTypeRouter.getGpuInstanceTypeRouter(cellInfoResolver, federationConfiguration),
new ApplicationCellRouter(cellInfoResolver, federationConfiguration),
new FallbackCellRouter(cellInfoResolver)
));
}
}
| 9,911 |
0 | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation | Create_ds/titus-control-plane/titus-server-federation/src/main/java/com/netflix/titus/federation/startup/GrpcConfiguration.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.federation.startup;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
@Configuration(prefix = "titus.federation.grpc")
public interface GrpcConfiguration {
@DefaultValue("10000")
long getRequestTimeoutMs();
@DefaultValue("1000")
long getPrimaryFallbackTimeoutMs();
}
| 9,912 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/SampleTitusChangeActions.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import com.netflix.titus.api.jobmanager.service.V3JobOperations;
import com.netflix.titus.api.jobmanager.service.V3JobOperations.Trigger;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import com.netflix.titus.common.framework.reconciler.ModelActionHolder;
import com.netflix.titus.master.jobmanager.service.common.action.TitusChangeAction;
import rx.Observable;
/**
* A collection of {@link TitusChangeAction}s, and methods for testing.
*/
public final class SampleTitusChangeActions {
private static final CallMetadata TEST_CALLMETADATA = CallMetadata.newBuilder().withCallerId("junit").build();
public static TitusChangeAction successfulJob() {
return new SuccessfulChangeAction(V3JobOperations.Trigger.API, "jobId");
}
public static TitusChangeAction failingJob(int failureCount) {
return new FailingChangeAction(V3JobOperations.Trigger.API, "jobId", failureCount);
}
private static class SuccessfulChangeAction extends TitusChangeAction {
private SuccessfulChangeAction(Trigger trigger, String id) {
super(trigger, id, null, "simulatedChangeAction", "Simulated successful action", TEST_CALLMETADATA);
}
@Override
public Observable<List<ModelActionHolder>> apply() {
return Observable.just(Collections.emptyList());
}
}
private static class FailingChangeAction extends TitusChangeAction {
private final AtomicInteger failureCounter;
protected FailingChangeAction(Trigger trigger, String id, int failureCount) {
super(trigger, id, null, "simulatedFailingAction", "Simulated initial failure repeated " + failureCount + " times", TEST_CALLMETADATA);
this.failureCounter = new AtomicInteger(failureCount);
}
@Override
public Observable<List<ModelActionHolder>> apply() {
if (failureCounter.decrementAndGet() >= 0) {
return Observable.error(new RuntimeException("Simulated failure; remaining failures=" + failureCounter.get()));
}
return Observable.just(Collections.emptyList());
}
}
}
| 9,913 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/endpoint/v3 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/endpoint/v3/grpc/ObserveJobsSubscriptionTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.endpoint.v3.grpc;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingDeque;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.grpc.protogen.JobChangeNotification;
import com.netflix.titus.grpc.protogen.JobChangeNotification.JobUpdate;
import com.netflix.titus.grpc.protogen.JobChangeNotification.TaskUpdate;
import com.netflix.titus.grpc.protogen.KeepAliveRequest;
import com.netflix.titus.grpc.protogen.ObserveJobsQuery;
import com.netflix.titus.grpc.protogen.ObserveJobsWithKeepAliveRequest;
import com.netflix.titus.runtime.endpoint.metadata.AnonymousCallMetadataResolver;
import com.netflix.titus.testkit.model.job.JobComponentStub;
import com.netflix.titus.testkit.model.job.JobDescriptorGenerator;
import com.netflix.titus.testkit.model.job.NoOpGrpcObjectsCache;
import io.grpc.stub.ServerCallStreamObserver;
import io.grpc.stub.StreamObserver;
import org.junit.Before;
import org.junit.Test;
import rx.schedulers.Schedulers;
import rx.schedulers.TestScheduler;
import static org.assertj.core.api.Assertions.assertThat;
public class ObserveJobsSubscriptionTest {
private static final String SERVICE_JOB_WITH_ONE_TASK = "serviceJobWithOneTask";
private static final ObserveJobsQuery QUERY = ObserveJobsQuery.newBuilder().putFilteringCriteria("jobType", "service").build();
private static final ObserveJobsWithKeepAliveRequest QUERY_REQUEST = ObserveJobsWithKeepAliveRequest.newBuilder()
.setQuery(QUERY)
.build();
private final TitusRuntime titusRuntime = TitusRuntimes.test();
private final TestScheduler testScheduler = Schedulers.test();
private final JobComponentStub jobComponentStub = new JobComponentStub(titusRuntime);
private final ObserveJobsContext context = new ObserveJobsContext(
jobComponentStub.getJobOperations(),
AnonymousCallMetadataResolver.getInstance(),
new NoOpGrpcObjectsCache(),
testScheduler,
new DefaultJobManagementServiceGrpcMetrics(titusRuntime),
titusRuntime
);
private final ObserveJobsSubscription jobsSubscription = new ObserveJobsSubscription(context, true);
private StreamObserver<JobChangeNotification> responseStreamObserver;
private final BlockingQueue<JobChangeNotification> responseEvents = new LinkedBlockingDeque<>();
private Throwable responseError;
private boolean responseCompleted;
@Before
public void setUp() throws Exception {
jobComponentStub.addJobTemplate(SERVICE_JOB_WITH_ONE_TASK,
JobDescriptorGenerator.serviceJobDescriptors()
.map(jd -> JobFunctions.changeServiceJobCapacity(jd, 1))
.cast(JobDescriptor.class)
);
this.responseStreamObserver = new ServerCallStreamObserver<JobChangeNotification>() {
@Override
public boolean isCancelled() {
return false;
}
@Override
public void setOnCancelHandler(Runnable onCancelHandler) {
}
@Override
public void setCompression(String compression) {
}
@Override
public boolean isReady() {
return true;
}
@Override
public void setOnReadyHandler(Runnable onReadyHandler) {
}
@Override
public void disableAutoInboundFlowControl() {
}
@Override
public void request(int count) {
}
@Override
public void setMessageCompression(boolean enable) {
}
@Override
public void onNext(JobChangeNotification event) {
responseEvents.add(event);
}
@Override
public void onError(Throwable error) {
responseError = error;
}
@Override
public void onCompleted() {
responseCompleted = true;
}
};
}
@Test
public void testObserveJobsSnapshot() {
Job<?> job1 = jobComponentStub.createJob(SERVICE_JOB_WITH_ONE_TASK);
Task task1 = jobComponentStub.createDesiredTasks(job1).get(0);
// First snapshot
jobsSubscription.observeJobs(QUERY, responseStreamObserver);
assertThat(expectJobUpdateEvent().getJob().getId()).isEqualTo(job1.getId());
assertThat(expectTaskUpdateEvent().getTask().getId()).isEqualTo(task1.getId());
expectSnapshotEvent();
triggerActions(1);
// Now changes
Job<?> job2 = jobComponentStub.createJob(SERVICE_JOB_WITH_ONE_TASK);
triggerActions(1);
assertThat(expectJobUpdateEvent().getJob().getId()).isEqualTo(job2.getId());
}
@Test
public void testObserveJobsWithKeepAliveSnapshot() {
Job<?> job1 = jobComponentStub.createJob(SERVICE_JOB_WITH_ONE_TASK);
Task task1 = jobComponentStub.createDesiredTasks(job1).get(0);
StreamObserver<ObserveJobsWithKeepAliveRequest> request = jobsSubscription.observeJobsWithKeepAlive(responseStreamObserver);
// Check that nothing is emitted until we send the query request
request.onNext(newKeepAliveRequest(1));
triggerActions(5);
assertThat(responseEvents.poll()).isNull();
// Now send the request and read the snapshot
request.onNext(QUERY_REQUEST);
request.onNext(newKeepAliveRequest(2));
assertThat(expectJobUpdateEvent().getJob().getId()).isEqualTo(job1.getId());
assertThat(expectTaskUpdateEvent().getTask().getId()).isEqualTo(task1.getId());
expectSnapshotEvent();
triggerActions(1);
jobComponentStub.emitCheckpoint();
triggerActions(1);
expectKeepAlive(2);
triggerActions(1);
// Now changes
Job<?> job2 = jobComponentStub.createJob(SERVICE_JOB_WITH_ONE_TASK);
triggerActions(1);
assertThat(expectJobUpdateEvent().getJob().getId()).isEqualTo(job2.getId());
// Now keep alive
request.onNext(newKeepAliveRequest(3));
jobComponentStub.emitCheckpoint();
triggerActions(1);
expectKeepAlive(3);
}
@Test
public void testClientRequestError() {
StreamObserver<ObserveJobsWithKeepAliveRequest> request = jobsSubscription.observeJobsWithKeepAlive(responseStreamObserver);
request.onNext(QUERY_REQUEST);
assertThat(jobsSubscription.jobServiceSubscription.isUnsubscribed()).isFalse();
request.onError(new RuntimeException("simulated client error"));
jobComponentStub.createJob(SERVICE_JOB_WITH_ONE_TASK);
// Check that job service event stream subscription is closed.
triggerActions(5);
assertThat(jobsSubscription.jobServiceSubscription.isUnsubscribed()).isTrue();
}
@Test
public void testKeepAlive() {
StreamObserver<ObserveJobsWithKeepAliveRequest> request = jobsSubscription.observeJobsWithKeepAlive(responseStreamObserver);
// Query first
request.onNext(QUERY_REQUEST);
expectSnapshotEvent();
// Now keep alive
request.onNext(newKeepAliveRequest(123));
triggerActions(5);
JobChangeNotification nextEvent = responseEvents.poll();
assertThat(nextEvent).isNull();
jobComponentStub.emitCheckpoint();
triggerActions(5);
nextEvent = responseEvents.poll();
assertThat(nextEvent).isNotNull();
assertThat(nextEvent.getNotificationCase()).isEqualTo(JobChangeNotification.NotificationCase.KEEPALIVERESPONSE);
assertThat(nextEvent.getKeepAliveResponse().getRequest().getRequestId()).isEqualTo(123);
}
private ObserveJobsWithKeepAliveRequest newKeepAliveRequest(long id) {
return ObserveJobsWithKeepAliveRequest.newBuilder().setKeepAliveRequest(
KeepAliveRequest.newBuilder().setRequestId(id).build()
).build();
}
private void triggerActions(int count) {
for (int i = 0; i < count; i++) {
testScheduler.triggerActions();
}
}
private JobUpdate expectJobUpdateEvent() {
JobChangeNotification nextEvent = responseEvents.poll();
assertThat(nextEvent).isNotNull();
assertThat(nextEvent.getNotificationCase()).isEqualTo(JobChangeNotification.NotificationCase.JOBUPDATE);
return nextEvent.getJobUpdate();
}
private TaskUpdate expectTaskUpdateEvent() {
JobChangeNotification nextEvent = responseEvents.poll();
assertThat(nextEvent).isNotNull();
assertThat(nextEvent.getNotificationCase()).isEqualTo(JobChangeNotification.NotificationCase.TASKUPDATE);
return nextEvent.getTaskUpdate();
}
private void expectSnapshotEvent() {
JobChangeNotification nextEvent = responseEvents.poll();
assertThat(nextEvent).isNotNull();
assertThat(nextEvent.getNotificationCase()).isEqualTo(JobChangeNotification.NotificationCase.SNAPSHOTEND);
}
private void expectKeepAlive(int keepAliveRequestId) {
JobChangeNotification nextEvent = responseEvents.poll();
assertThat(nextEvent).isNotNull();
assertThat(nextEvent.getNotificationCase()).isEqualTo(JobChangeNotification.NotificationCase.KEEPALIVERESPONSE);
assertThat(nextEvent.getKeepAliveResponse().getRequest().getRequestId()).isEqualTo(keepAliveRequestId);
}
} | 9,914 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/KubeNotificationProcessorTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.service;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
import com.netflix.titus.api.jobmanager.TaskAttributes;
import com.netflix.titus.api.jobmanager.model.job.BatchJobTask;
import com.netflix.titus.api.jobmanager.model.job.ExecutableStatus;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.TaskStatus;
import com.netflix.titus.api.jobmanager.model.job.TwoLevelResource;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.service.V3JobOperations;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.archaius2.Archaius2Ext;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.master.kubernetes.ContainerResultCodeResolver;
import com.netflix.titus.master.kubernetes.KubernetesConfiguration;
import com.netflix.titus.master.kubernetes.client.DirectKubeApiServerIntegrator;
import com.netflix.titus.master.kubernetes.client.model.PodEvent;
import com.netflix.titus.master.kubernetes.client.model.PodWrapper;
import com.netflix.titus.master.kubernetes.controller.KubeJobManagementReconciler;
import com.netflix.titus.testkit.model.job.JobGenerator;
import io.kubernetes.client.openapi.models.V1Node;
import io.kubernetes.client.openapi.models.V1Pod;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.Captor;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import reactor.core.publisher.DirectProcessor;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Scheduler;
import reactor.core.scheduler.Schedulers;
import rx.Completable;
import static com.netflix.titus.common.kube.Annotations.AnnotationKeyIPAddress;
import static com.netflix.titus.master.kubernetes.NodeDataGenerator.andIpAddress;
import static com.netflix.titus.master.kubernetes.NodeDataGenerator.andNodeAnnotations;
import static com.netflix.titus.master.kubernetes.NodeDataGenerator.newNode;
import static com.netflix.titus.master.kubernetes.PodDataGenerator.andPhase;
import static com.netflix.titus.master.kubernetes.PodDataGenerator.andRunning;
import static com.netflix.titus.master.kubernetes.PodDataGenerator.newPod;
import static com.netflix.titus.runtime.kubernetes.KubeConstants.TITUS_NODE_DOMAIN;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class KubeNotificationProcessorTest {
private static final Job<BatchJobExt> JOB = JobGenerator.oneBatchJob();
private static final BatchJobTask TASK = JobGenerator.oneBatchTask();
private final TitusRuntime titusRuntime = TitusRuntimes.test();
private DirectProcessor<PodEvent> podEvents;
private DirectProcessor<PodEvent> reconcilerPodEvents;
private KubeNotificationProcessor processor;
private final KubernetesConfiguration configuration = Archaius2Ext.newConfiguration(KubernetesConfiguration.class);
@Mock
private V3JobOperations jobOperations;
@Mock
private ContainerResultCodeResolver containerResultCodeResolver;
@Captor
private ArgumentCaptor<Function<Task, Optional<Task>>> changeFunctionCaptor;
@Before
public void setUp() {
MockitoAnnotations.initMocks(this);
podEvents = DirectProcessor.create();
reconcilerPodEvents = DirectProcessor.create();
processor = new KubeNotificationProcessor(configuration,
new FakeDirectKube(),
new FakeReconciler(),
jobOperations,
containerResultCodeResolver,
titusRuntime
) {
@Override
protected Scheduler initializeNotificationScheduler() {
return Schedulers.immediate();
}
};
processor.enterActiveMode();
when(jobOperations.findTaskById(eq(TASK.getId()))).thenReturn(Optional.of(Pair.of(JOB, TASK)));
when(jobOperations.updateTask(eq(TASK.getId()), any(), any(), anyString(), any())).thenReturn(Completable.complete());
when(containerResultCodeResolver.resolve(any(), any(), any())).thenReturn(Optional.empty());
}
@After
public void tearDown() {
reconcilerPodEvents.onComplete();
podEvents.onComplete();
processor.shutdown();
}
@Test
public void testUpdateTaskStatusVK() {
V1Pod pod = newPod(TASK.getId(), andRunning());
V1Node node = newNode(andIpAddress("2.2.2.2"), andNodeAnnotations(
TITUS_NODE_DOMAIN + "ami", "ami123",
TITUS_NODE_DOMAIN + "stack", "myStack"
));
Map<String, String> UpdatedAnnotations = new HashMap<>();
UpdatedAnnotations.put(AnnotationKeyIPAddress, "1.2.3.4");
pod.getMetadata().setAnnotations(UpdatedAnnotations);
Task updatedTask = processor.updateTaskStatus(
new PodWrapper(pod),
TaskStatus.newBuilder().withState(TaskState.Started).build(),
Optional.of(node),
TASK,
false
).orElse(null);
Set<TaskState> pastStates = updatedTask.getStatusHistory().stream().map(ExecutableStatus::getState).collect(Collectors.toSet());
assertThat(pastStates).contains(TaskState.Accepted, TaskState.Launched, TaskState.StartInitiated);
assertThat(updatedTask.getTaskContext()).containsEntry(TaskAttributes.TASK_ATTRIBUTES_AGENT_HOST, "2.2.2.2");
assertThat(updatedTask.getTaskContext()).containsEntry(TaskAttributes.TASK_ATTRIBUTES_CONTAINER_IP, "1.2.3.4");
assertThat(updatedTask.getTaskContext()).containsEntry(TaskAttributes.TASK_ATTRIBUTES_AGENT_AMI, "ami123");
assertThat(updatedTask.getTaskContext()).containsEntry(TaskAttributes.TASK_ATTRIBUTES_AGENT_STACK, "myStack");
}
@Test
public void testPodPhaseFailedNoContainerCreated() {
V1Pod pod = newPod(TASK.getId(), andPhase("Failed"));
when(jobOperations.findTaskById(eq(TASK.getId()))).thenReturn(Optional.of(Pair.of(JOB, TASK)));
when(jobOperations.updateTask(eq(TASK.getId()), any(), any(), anyString(), any())).thenReturn(Completable.complete());
podEvents.onNext(PodEvent.onAdd(pod));
verify(jobOperations, times(1)).updateTask(eq(TASK.getId()), changeFunctionCaptor.capture(), eq(V3JobOperations.Trigger.Kube),
eq("Pod status updated from kubernetes node (k8phase='Failed', taskState=Accepted)"), any());
}
@Test
public void testTaskStateDoesNotMoveBack() {
V1Pod pod = newPod(TASK.getId(), andRunning());
Task updatedTask = processor.updateTaskStatus(
new PodWrapper(pod),
TaskStatus.newBuilder().withState(TaskState.Started).build(),
Optional.of(newNode()),
JobFunctions.changeTaskStatus(TASK, TaskStatus.newBuilder().withState(TaskState.KillInitiated).build()),
false
).orElse(null);
assertThat(updatedTask).isNull();
}
@Test
public void testAreTasksEquivalent_Same() {
BatchJobTask first = JobGenerator.oneBatchTask();
BatchJobTask second = first.toBuilder().build();
assertThat(KubeNotificationProcessor.areTasksEquivalent(first, second)).isEmpty();
}
@Test
public void testAreTasksEquivalent_DifferentStatus() {
BatchJobTask first = JobGenerator.oneBatchTask();
BatchJobTask second = first.toBuilder()
.withStatus(first.getStatus().toBuilder().withReasonMessage("my important change").build())
.build();
assertThat(KubeNotificationProcessor.areTasksEquivalent(first, second)).contains("different task status");
}
@Test
public void testAreTasksEquivalent_DifferentAttributes() {
BatchJobTask first = JobGenerator.oneBatchTask();
BatchJobTask second = first.toBuilder()
.withAttributes(Collections.singletonMap("testAreTasksEquivalent_DifferentAttributes", "true"))
.build();
assertThat(KubeNotificationProcessor.areTasksEquivalent(first, second)).contains("different task attributes");
}
@Test
public void testAreTasksEquivalent_DifferentContext() {
BatchJobTask first = JobGenerator.oneBatchTask();
BatchJobTask second = first.toBuilder()
.withTaskContext(Collections.singletonMap("testAreTasksEquivalent_DifferentAttributes", "true"))
.build();
assertThat(KubeNotificationProcessor.areTasksEquivalent(first, second)).contains("different task context");
}
@Test
public void testAreTasksEquivalent_DifferentTwoLevelResource() {
BatchJobTask first = JobGenerator.oneBatchTask();
BatchJobTask second = first.toBuilder()
.withTwoLevelResources(TwoLevelResource.newBuilder().withName("fakeResource").build())
.build();
assertThat(KubeNotificationProcessor.areTasksEquivalent(first, second)).contains("different task two level resources");
}
private class FakeDirectKube implements DirectKubeApiServerIntegrator {
@Override
public Flux<PodEvent> events() {
return podEvents;
}
@Override
public Optional<V1Pod> findPod(String taskId) {
throw new UnsupportedOperationException("not needed");
}
@Override
public Mono<V1Pod> launchTask(Job job, Task task) {
throw new UnsupportedOperationException("not needed");
}
@Override
public Mono<Void> terminateTask(Task task) {
throw new UnsupportedOperationException("not needed");
}
@Override
public String resolveReasonCode(Throwable cause) {
return TaskStatus.REASON_UNKNOWN_SYSTEM_ERROR;
}
}
private class FakeReconciler implements KubeJobManagementReconciler {
@Override
public Flux<PodEvent> getPodEventSource() {
return reconcilerPodEvents;
}
}
}
| 9,915 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/JobTransactionLoggerTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.service;
import java.util.Optional;
import java.util.UUID;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobModel;
import com.netflix.titus.api.jobmanager.model.job.JobState;
import com.netflix.titus.api.jobmanager.model.job.JobStatus;
import com.netflix.titus.api.jobmanager.service.V3JobOperations.Trigger;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import com.netflix.titus.common.framework.reconciler.EntityHolder;
import com.netflix.titus.common.framework.reconciler.ModelActionHolder;
import com.netflix.titus.master.jobmanager.service.common.action.TitusChangeAction;
import com.netflix.titus.master.jobmanager.service.common.action.TitusModelAction;
import com.netflix.titus.master.jobmanager.service.event.JobManagerReconcilerEvent;
import com.netflix.titus.master.jobmanager.service.event.JobModelReconcilerEvent.JobModelUpdateReconcilerEvent;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.assertj.core.api.Assertions.assertThat;
public class JobTransactionLoggerTest {
private static final Logger logger = LoggerFactory.getLogger(JobTransactionLoggerTest.class);
/**
* Sole purpose of this test is visual inspection of the generated log line.
*/
@Test
public void testLogFormatting() throws Exception {
Job previousJob = createJob();
Job currentJob = previousJob.toBuilder().withStatus(JobStatus.newBuilder().withState(JobState.Finished).build()).build();
ModelActionHolder modelActionHolder = ModelActionHolder.reference(
TitusModelAction.newModelUpdate("testModelAction")
.job(previousJob)
.trigger(Trigger.API)
.summary("Job model update")
.jobUpdate(jobHolder -> jobHolder.setEntity(currentJob))
);
TitusChangeAction changeAction = TitusChangeAction.newAction("testChangeAction")
.job(previousJob)
.trigger(Trigger.API)
.summary("Job update")
.callMetadata(CallMetadata.newBuilder().withCallerId("LoggerTest").withCallReason("Testing logger transaction").build())
.applyModelUpdate(self -> modelActionHolder);
JobManagerReconcilerEvent jobReconcilerEvent = new JobModelUpdateReconcilerEvent(
previousJob,
changeAction,
modelActionHolder,
EntityHolder.newRoot(currentJob.getId(), currentJob),
Optional.of(EntityHolder.newRoot(previousJob.getId(), previousJob)),
"1"
);
String logLine = JobTransactionLogger.doFormat(jobReconcilerEvent);
assertThat(logLine).isNotEmpty();
logger.info("Job event: {}", logLine);
}
private Job createJob() {
return JobModel.newJob()
.withId(UUID.randomUUID().toString())
.withStatus(JobModel.newJobStatus().withState(JobState.Accepted).build())
.withJobDescriptor(JobModel.newJobDescriptor().build())
.build();
}
} | 9,916 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/limiter/DefaultJobSubmitLimiterTest.java | package com.netflix.titus.master.jobmanager.service.limiter;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.api.jobmanager.service.JobManagerException;
import com.netflix.titus.api.jobmanager.service.V3JobOperations;
import com.netflix.titus.master.jobmanager.service.JobManagerConfiguration;
import com.netflix.titus.testkit.model.job.JobDescriptorGenerator;
import org.junit.Test;
import static com.netflix.titus.testkit.model.job.JobGenerator.serviceJobs;
import static org.assertj.core.api.AssertionsForClassTypes.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class DefaultJobSubmitLimiterTest {
@Test
public void checkDuplicateJobSequence() {
JobManagerConfiguration config = mock(JobManagerConfiguration.class);
when(config.getMaxActiveJobs()).thenReturn(2L);
V3JobOperations v3JobOperations = mock(V3JobOperations.class);
DefaultJobSubmitLimiter limiter = new DefaultJobSubmitLimiter(config, v3JobOperations);
Job<ServiceJobExt> job = serviceJobs(JobDescriptorGenerator.oneTaskServiceJobDescriptor()).getValue();
List<Job> currentJobs = new ArrayList<>();
currentJobs.add(job);
when(v3JobOperations.getJobs()).thenReturn(currentJobs);
// Using the same job description containing the existing job group (stack, detail, sequence)
Optional<JobManagerException> result = limiter.checkIfAllowed(job.getJobDescriptor());
assertThat(result).isNotNull();
assertThat(result.isPresent()).isTrue();
assertThat(result.get()).isNotNull();
assertThat(result.get().getErrorCode()).isEqualTo(JobManagerException.ErrorCode.InvalidSequenceId);
}
@Test
public void checkMaxActiveJobsLimit() {
JobManagerConfiguration config = mock(JobManagerConfiguration.class);
when(config.getMaxActiveJobs()).thenReturn(1L);
V3JobOperations v3JobOperations = mock(V3JobOperations.class);
DefaultJobSubmitLimiter limiter = new DefaultJobSubmitLimiter(config, v3JobOperations);
Job<ServiceJobExt> job = serviceJobs(JobDescriptorGenerator.oneTaskServiceJobDescriptor()).getValue();
List<Job> currentJobs = new ArrayList<>();
currentJobs.add(job);
when(v3JobOperations.getJobs()).thenReturn(currentJobs);
// Max active jobs already running
Optional<JobManagerException> result = limiter.checkIfAllowed(job.getJobDescriptor());
assertThat(result).isNotNull();
assertThat(result.isPresent()).isTrue();
assertThat(result.get()).isNotNull();
assertThat(result.get().getErrorCode()).isEqualTo(JobManagerException.ErrorCode.JobCreateLimited);
}
} | 9,917 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration/ComputeProviderSchedulerTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.service.integration;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.TaskStatus;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.master.jobmanager.service.integration.scenario.JobScenarioBuilder;
import com.netflix.titus.master.jobmanager.service.integration.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.jobmanager.service.integration.scenario.ScenarioTemplates;
import org.junit.Test;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.changeServiceJobCapacity;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskBatchJobDescriptor;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskServiceJobDescriptor;
import static org.assertj.core.api.Assertions.assertThat;
public class ComputeProviderSchedulerTest {
private final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder();
@Test
public void testRunAndCompleteOkBatchJob() {
jobsScenarioBuilder.scheduleJob(oneTaskBatchJobDescriptor(), jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.template(ScenarioTemplates.startTask(0, 0, TaskState.Started))
.template(ScenarioTemplates.finishSingleTaskJob(0, 0, TaskStatus.REASON_NORMAL, 0))
);
}
@Test
public void testRunAndCompleteOkServiceJob() {
JobDescriptor<ServiceJobExt> twoTaskJob = changeServiceJobCapacity(oneTaskServiceJobDescriptor(), 2);
jobsScenarioBuilder.scheduleJob(twoTaskJob, jobScenario -> jobScenario
.expectJobEvent()
.advance()
.inActiveTasks(ScenarioTemplates::acceptTask)
.inActiveTasks((taskIdx, resubmit) -> ScenarioTemplates.startTask(taskIdx, resubmit, TaskState.Started))
.inActiveTasks((taskIdx, resubmit) -> ScenarioTemplates.triggerComputeProviderFinishedEvent(taskIdx, resubmit, 0))
.advance().advance()
.inActiveTasks(ScenarioTemplates::acceptTask)
.ignoreAvailableEvents()
.template(ScenarioTemplates.killJob())
.advance()
.inActiveTasks((taskIdx, resubmit) -> js -> js
.template(ScenarioTemplates.reconcilerTaskKill(taskIdx, resubmit))
.expectTaskUpdatedInStore(taskIdx, resubmit, task -> assertThat(task.getStatus().getState()).isEqualTo(TaskState.Finished))
)
.template(ScenarioTemplates.verifyJobWithFinishedTasksCompletes())
);
}
@Test
public void testBatchTaskTerminate() {
jobsScenarioBuilder.scheduleJob(oneTaskBatchJobDescriptor(), jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.template(ScenarioTemplates.startTask(0, 0, TaskState.Started))
.template(ScenarioTemplates.killKubeTask(0, 0))
);
}
@Test
public void testBatchPodCreateFailure() {
JobScenarioBuilder js = jobsScenarioBuilder.scheduleJob(oneTaskBatchJobDescriptor(), jobScenario -> jobScenario
.failNextPodCreate(new IllegalStateException("simulated pod create error"))
.expectJobEvent()
.expectTaskStateChangeEvent(0, 0, TaskState.Accepted)
.allTasks(allTasks -> assertThat(allTasks).hasSize(1))
.advance()
.expectTaskStateChangeEvent(0, 0, TaskState.Finished)
).getJobScenario(0);
for (int resubmit = 1; resubmit < 10; resubmit++) {
js.failNextPodCreate(new IllegalStateException("simulated pod create error"))
.advance()
.expectTaskStateChangeEvent(0, resubmit, TaskState.Accepted)
.allTasks(allTasks -> assertThat(allTasks).hasSize(1))
.advance()
.expectTaskStateChangeEvent(0, resubmit, TaskState.Finished)
.advance();
}
}
@Test
public void testServicePodCreateFailure() {
jobsScenarioBuilder.scheduleJob(oneTaskServiceJobDescriptor(), jobScenario -> jobScenario
.failNextPodCreate(new IllegalStateException("simulated pod create error"))
.expectJobEvent()
.expectTaskStateChangeEvent(0, 0, TaskState.Accepted)
.advance()
.expectTaskStateChangeEvent(0, 0, TaskState.Finished)
.advance()
.expectTaskStateChangeEvent(0, 1, TaskState.Accepted)
.allTasks(allTasks -> assertThat(allTasks).hasSize(1))
);
}
@Test
public void testPodIsNotScheduledIfKubeIntegratorNotReady() {
jobsScenarioBuilder.scheduleJob(oneTaskBatchJobDescriptor(), jobScenario -> jobScenario
.enableKubeIntegration(false)
.expectJobEvent()
.expectTaskStateChangeEvent(0, 0, TaskState.Accepted)
.advance()
.expectNoTaskStateChangeEvent()
.enableKubeIntegration(true)
.advance()
.expectTaskStateChangeEvent(0, 0, TaskState.Accepted, TaskStatus.REASON_POD_CREATED)
);
}
}
| 9,918 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration/TaskRetryPolicyTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.service.integration;
import java.util.concurrent.TimeUnit;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobModel;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.TaskStatus;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.api.jobmanager.model.job.retry.DelayedRetryPolicy;
import com.netflix.titus.api.jobmanager.model.job.retry.ExponentialBackoffRetryPolicy;
import com.netflix.titus.api.jobmanager.model.job.retry.ImmediateRetryPolicy;
import com.netflix.titus.master.jobmanager.service.integration.scenario.JobScenarioBuilder;
import com.netflix.titus.master.jobmanager.service.integration.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.jobmanager.service.integration.scenario.ScenarioTemplates;
import org.junit.Before;
import org.junit.Test;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.changeRetryPolicy;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskBatchJobDescriptor;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskServiceJobDescriptor;
public class TaskRetryPolicyTest {
private static final ImmediateRetryPolicy NO_RETRIES = JobModel.newImmediateRetryPolicy().withRetries(0).build();
private static final ImmediateRetryPolicy IMMEDIATE = JobModel.newImmediateRetryPolicy().withRetries(5).build();
private static final int[] IMMEDIATE_DELAYS_SEC = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
private static final DelayedRetryPolicy DELAYED = JobModel.newDelayedRetryPolicy().withDelay(5, TimeUnit.SECONDS).withRetries(5).build();
private static final int[] DELAYED_POLICY_DELAYS_SEC = {5, 5, 5, 5, 5, 5, 5, 5, 5, 5};
private static final ExponentialBackoffRetryPolicy EXPONENTIAL = JobModel.newExponentialBackoffRetryPolicy()
.withInitialDelayMs(1_000)
.withMaxDelayMs(10_000)
.withRetries(5)
.build();
private static final int[] EXPONENTIAL_DELAYS_SEC = {1, 2, 4, 8, 10, 10, 10, 10, 10, 10};
private JobsScenarioBuilder jobsScenarioBuilder;
@Before
public void setUp() throws Exception {
this.jobsScenarioBuilder = new JobsScenarioBuilder();
}
@Test
public void testBatchImmediateRetry() throws Exception {
JobDescriptor<BatchJobExt> jobWithRetries = changeRetryPolicy(oneTaskBatchJobDescriptor(), IMMEDIATE);
batchRunAndFail(jobWithRetries, IMMEDIATE_DELAYS_SEC, TimeUnit.SECONDS);
}
@Test
public void testServiceImmediateRetry() throws Exception {
JobDescriptor<ServiceJobExt> jobWithRetries = changeRetryPolicy(oneTaskServiceJobDescriptor(), IMMEDIATE);
serviceRunAndFail(jobWithRetries, IMMEDIATE_DELAYS_SEC, TimeUnit.SECONDS);
}
@Test
public void testBatchDelayedRetry() throws Exception {
JobDescriptor<BatchJobExt> jobWithRetries = changeRetryPolicy(oneTaskBatchJobDescriptor(), DELAYED);
batchRunAndFail(jobWithRetries, DELAYED_POLICY_DELAYS_SEC, TimeUnit.SECONDS);
}
@Test
public void testServiceDelayedRetry() throws Exception {
JobDescriptor<ServiceJobExt> jobWithRetries = changeRetryPolicy(oneTaskServiceJobDescriptor(), DELAYED);
serviceRunAndFail(jobWithRetries, DELAYED_POLICY_DELAYS_SEC, TimeUnit.SECONDS);
}
@Test
public void testBatchExponentialBackoffRetry() throws Exception {
JobDescriptor<BatchJobExt> jobWithRetries = changeRetryPolicy(oneTaskBatchJobDescriptor(), EXPONENTIAL);
batchRunAndFail(jobWithRetries, EXPONENTIAL_DELAYS_SEC, TimeUnit.SECONDS);
}
@Test
public void testServiceExponentialBackoffRetry() throws Exception {
JobDescriptor<ServiceJobExt> jobWithRetries = changeRetryPolicy(oneTaskServiceJobDescriptor(), EXPONENTIAL);
serviceRunAndFail(jobWithRetries, EXPONENTIAL_DELAYS_SEC, TimeUnit.SECONDS);
}
private void batchRunAndFail(JobDescriptor<BatchJobExt> jobWithRetries, int[] delays, TimeUnit timeUnit) {
int retryLimit = jobWithRetries.getExtensions().getRetryPolicy().getRetries();
JobScenarioBuilder jobScenario = runJob(jobWithRetries);
failRetryableTask(delays, timeUnit, retryLimit);
jobScenario.advance().template(ScenarioTemplates.failLastBatchRetryableTask(0, retryLimit));
}
private void serviceRunAndFail(JobDescriptor<ServiceJobExt> jobWithRetries, int[] delays, TimeUnit timeUnit) {
int retryLimit = jobWithRetries.getExtensions().getRetryPolicy().getRetries();
runJob(jobWithRetries);
failRetryableTask(delays, timeUnit, retryLimit * 2); // Service jobs ignore retry limit value
}
@Test
public void testBatchRetryPolicyResetIfTaskInStartedStateLongEnough() throws Exception {
JobDescriptor<BatchJobExt> jobWithRetries = changeRetryPolicy(
oneTaskBatchJobDescriptor().but(jd ->
jd.getExtensions().toBuilder().withRuntimeLimitMs(3600_000).build() // Prevent runtimeLimit timeout
),
EXPONENTIAL
);
int retryLimit = jobWithRetries.getExtensions().getRetryPolicy().getRetries();
testRetryPolicyResetIfTaskInStartedStateLongEnough(jobWithRetries, retryLimit);
}
@Test
public void testServiceRetryPolicyResetIfTaskInStartedStateLongEnough() throws Exception {
JobDescriptor<ServiceJobExt> jobWithRetries = changeRetryPolicy(oneTaskServiceJobDescriptor(), EXPONENTIAL);
int retryLimit = jobWithRetries.getExtensions().getRetryPolicy().getRetries();
testRetryPolicyResetIfTaskInStartedStateLongEnough(jobWithRetries, retryLimit);
}
@Test
public void testBatchSystemRetryStillRetriesOnNoRetriesJobs() {
JobDescriptor<BatchJobExt> jobWithoutRetries = changeRetryPolicy(oneTaskBatchJobDescriptor(), NO_RETRIES);
runJob(jobWithoutRetries)
.triggerComputePlatformFinishedEvent(0, 0, -1, TaskStatus.REASON_LOCAL_SYSTEM_ERROR)
.template(ScenarioTemplates.cleanAfterFinishedTaskAndRetry(0, 0, TaskStatus.REASON_LOCAL_SYSTEM_ERROR, 0L));
}
private void testRetryPolicyResetIfTaskInStartedStateLongEnough(JobDescriptor<?> jobWithRetries, int retryLimit) {
JobScenarioBuilder jobScenario = runJob(jobWithRetries);
failRetryableTask(EXPONENTIAL_DELAYS_SEC, TimeUnit.SECONDS, retryLimit - 1);
// Start the active task, and keep it running long enough to reset retryer
jobScenario
.template(ScenarioTemplates.startTask(0, retryLimit - 1, TaskState.Started))
.advance(5, TimeUnit.MINUTES);
// Now fail the task again, and expect it to restart immediately.
jobScenario.template(ScenarioTemplates.failRetryableTask(0, retryLimit - 1, 0));
}
private JobScenarioBuilder runJob(JobDescriptor<?> job) {
jobsScenarioBuilder.scheduleJob(job, jobScenario -> jobScenario
.expectJobEvent()
.template(ScenarioTemplates.acceptTask(0, 0))
);
return jobsScenarioBuilder.getJobScenario(0);
}
private void failRetryableTask(int[] delays, TimeUnit timeUnit, int retries) {
JobScenarioBuilder jobScenario = jobsScenarioBuilder.getJobScenario(0);
for (int i = 0; i < retries; i++) {
int retryDelay = delays[i];
jobScenario.template(ScenarioTemplates.failRetryableTask(0, i, timeUnit.toMillis(retryDelay)));
}
}
}
| 9,919 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration/JobDisruptionBudgetTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.service.integration;
import java.util.Collections;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.disruptionbudget.DisruptionBudget;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.master.jobmanager.service.integration.scenario.JobsScenarioBuilder;
import org.junit.Test;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.changeDisruptionBudget;
import static com.netflix.titus.testkit.model.eviction.DisruptionBudgetGenerator.budget;
import static com.netflix.titus.testkit.model.eviction.DisruptionBudgetGenerator.officeHourTimeWindow;
import static com.netflix.titus.testkit.model.eviction.DisruptionBudgetGenerator.percentageOfHealthyPolicy;
import static com.netflix.titus.testkit.model.eviction.DisruptionBudgetGenerator.selfManagedPolicy;
import static com.netflix.titus.testkit.model.eviction.DisruptionBudgetGenerator.unlimitedRate;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskServiceJobDescriptor;
import static org.assertj.core.api.Assertions.assertThat;
public class JobDisruptionBudgetTest {
private static final DisruptionBudget SELF_MANAGED = budget(
selfManagedPolicy(10_000), unlimitedRate(), Collections.singletonList(officeHourTimeWindow())
);
private static final DisruptionBudget PERCENTAGE = budget(
percentageOfHealthyPolicy(80), unlimitedRate(), Collections.singletonList(officeHourTimeWindow())
);
private final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder();
@Test
public void testDisruptionBudgetUpdate() {
JobDescriptor<ServiceJobExt> jobWithSelfManaged = changeDisruptionBudget(oneTaskServiceJobDescriptor(), SELF_MANAGED);
jobsScenarioBuilder.scheduleJob(jobWithSelfManaged, jobScenario -> jobScenario
.expectJobEvent()
.changeDisruptionBudget(PERCENTAGE)
.expectJobUpdatedInStore(job -> assertThat(job.getJobDescriptor().getDisruptionBudget()).isEqualTo(PERCENTAGE))
.expectJobEvent(job -> assertThat(job.getJobDescriptor().getDisruptionBudget()).isEqualTo(PERCENTAGE))
);
}
}
| 9,920 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration/MasterBootstrapTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.service.integration;
import java.util.concurrent.TimeUnit;
import com.netflix.titus.api.jobmanager.model.job.Capacity;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.master.jobmanager.service.integration.scenario.JobsScenarioBuilder;
import com.netflix.titus.testkit.model.job.JobDescriptorGenerator;
import org.junit.Test;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.changeServiceJobCapacity;
import static org.assertj.core.api.Assertions.assertThat;
public class MasterBootstrapTest {
private final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder();
@Test
public void testBatchTaskStuckInLaunchedStateBeforeFailover() {
testTaskStuckInLaunchedStateBeforeFailover(JobDescriptorGenerator.oneTaskBatchJobDescriptor());
}
@Test
public void testServiceTaskStuckInLaunchedStateBeforeFailover() {
testTaskStuckInLaunchedStateBeforeFailover(JobDescriptorGenerator.oneTaskServiceJobDescriptor());
}
private void testTaskStuckInLaunchedStateBeforeFailover(JobDescriptor<?> jobDescriptor) {
jobsScenarioBuilder.scheduleJob(jobDescriptor, jobScenario -> jobScenario
.expectJobEvent()
.expectTaskAddedToStore(0, 0, task -> assertThat(task.getStatus().getState()).isEqualTo(TaskState.Accepted))
.expectTaskStateChangeEvent(0, 0, TaskState.Accepted)
.expectComputeProviderCreateRequest(0, 0)
.triggerSchedulerLaunchEvent(0, 0)
.expectTaskStateChangeEvent(0, 0, TaskState.Launched)
.advance(JobsScenarioBuilder.LAUNCHED_TIMEOUT_MS, TimeUnit.MILLISECONDS)
.breakStore()
.advance().advance()
.enableStore()
).reboot()
.inJob(0, jobScenario -> jobScenario
.expectTaskInActiveState(0, 0, TaskState.Launched)
.advance(JobsScenarioBuilder.LAUNCHED_TIMEOUT_MS, TimeUnit.MILLISECONDS)
.advance()
.expectTaskInActiveState(0, 0, TaskState.KillInitiated)
);
}
/**
* This test passes because by default {@link com.netflix.titus.master.jobmanager.service.JobManagerConfiguration#isFailOnDataValidation}
* is turned off.
*/
@Test
public void testRebootWithJobHavingNegativeDesiredSize() {
JobDescriptor<ServiceJobExt> emptyJob = changeServiceJobCapacity(JobDescriptorGenerator.oneTaskServiceJobDescriptor(), Capacity.newBuilder().build());
jobsScenarioBuilder.scheduleJob(emptyJob, jobScenario -> jobScenario
.expectJobEvent()
.modifyJobStoreRecord(jobStoreRecord -> {
return changeServiceJobCapacity(jobStoreRecord, Capacity.newBuilder().withMin(-1).withDesired(-1).build());
})
).reboot()
.inJob(0, jobScenario -> jobScenario
.assertServiceJob(job -> {
assertThat(job.getJobDescriptor().getExtensions().getCapacity().getDesired()).isEqualTo(-1);
})
);
}
@Test
public void testRestartWithBasicTaskAcceptedWithoutComputeProviderTask() {
testRestartWithTaskAcceptedWithoutComputeProviderTask(JobDescriptorGenerator.oneTaskBatchJobDescriptor());
}
@Test
public void testRestartWithServiceTaskAcceptedWithoutComputeProviderTask() {
testRestartWithTaskAcceptedWithoutComputeProviderTask(JobDescriptorGenerator.oneTaskServiceJobDescriptor());
}
private void testRestartWithTaskAcceptedWithoutComputeProviderTask(JobDescriptor<?> jobDescriptor) {
jobsScenarioBuilder.scheduleJob(jobDescriptor, jobScenario -> jobScenario
.expectJobEvent()
.expectTaskAddedToStore(0, 0, task -> assertThat(task.getStatus().getState()).isEqualTo(TaskState.Accepted))
).reboot()
.inJob(0, jobScenario -> jobScenario
.expectTaskInActiveState(0, 0, TaskState.Accepted)
.advance()
.expectComputeProviderCreateRequest(0, 0)
);
}
}
| 9,921 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration/ServiceJobSchedulingTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.service.integration;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import com.netflix.titus.api.jobmanager.model.job.Capacity;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.JobState;
import com.netflix.titus.api.jobmanager.model.job.ServiceJobProcesses;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.TaskStatus;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.api.jobmanager.service.JobManagerException;
import com.netflix.titus.common.util.code.RecordingCodeInvariants;
import com.netflix.titus.master.jobmanager.service.integration.scenario.JobScenarioBuilder;
import com.netflix.titus.master.jobmanager.service.integration.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.jobmanager.service.integration.scenario.ScenarioTemplates;
import org.junit.After;
import org.junit.Test;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.changeServiceJobCapacity;
import static com.netflix.titus.master.jobmanager.service.integration.scenario.JobsScenarioBuilder.CONCURRENT_STORE_UPDATE_LIMIT;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskServiceJobDescriptor;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.fail;
public class ServiceJobSchedulingTest {
private final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder();
@After
public void tearDown() {
RecordingCodeInvariants invariants = (RecordingCodeInvariants) jobsScenarioBuilder.getTitusRuntime().getCodeInvariants();
assertThat(invariants.getViolations()).describedAs("Invariant violations found").isEmpty();
}
/**
* Run single service task that terminates with exit code 0. The task should be resubmitted.
* When the job is killed, its task is killed as well.
*/
@Test
public void testTaskCompletingOkIsResubmitted() {
testTaskCompletingOkIsResubmitted(0);
}
/**
* Run single service task that terminates with exit code 0. The task should be resubmitted.
* When the job is killed, its task is killed as well.
*/
@Test
public void testFailingTaskIsResubmitted() {
testTaskCompletingOkIsResubmitted(-1);
}
private void testTaskCompletingOkIsResubmitted(int errorCode) {
jobsScenarioBuilder.scheduleJob(oneTaskServiceJobDescriptor(), jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.template(ScenarioTemplates.startTask(0, 0, TaskState.Started))
.template(ScenarioTemplates.triggerComputeProviderFinishedEvent(0, 0, errorCode))
.template(ScenarioTemplates.acceptTask(0, 1))
.template(ScenarioTemplates.killJob())
.template(ScenarioTemplates.reconcilerTaskKill(0, 1))
.template(ScenarioTemplates.handleTaskFinishedTransitionInSingleTaskJob(0, 1, TaskStatus.REASON_TASK_KILLED))
);
}
/**
* Run service with multiple tasks that terminate with exit code 0. The tasks should be resubmitted.
* When the job is killed, all tasks are killed as well.
*/
@Test
public void testAllTasksInJobAreResubmittedWhenCompleteOk() {
JobDescriptor<ServiceJobExt> twoTaskJob = changeServiceJobCapacity(oneTaskServiceJobDescriptor(), 2);
jobsScenarioBuilder.scheduleJob(twoTaskJob, jobScenario -> jobScenario
.expectJobEvent()
.advance()
.inActiveTasks((taskIdx, resubmit) -> ScenarioTemplates.acceptTask(taskIdx, resubmit))
.inActiveTasks((taskIdx, resubmit) -> ScenarioTemplates.startTask(taskIdx, resubmit, TaskState.Started))
.inActiveTasks((taskIdx, resubmit) -> ScenarioTemplates.triggerComputeProviderFinishedEvent(taskIdx, resubmit, 0))
.advance().advance()
.inActiveTasks((taskIdx, resubmit) -> ScenarioTemplates.acceptTask(taskIdx, resubmit))
.template(ScenarioTemplates.killJob())
.inActiveTasks((taskIdx, resubmit) -> js -> js
.template(ScenarioTemplates.reconcilerTaskKill(taskIdx, resubmit))
.expectTaskUpdatedInStore(taskIdx, resubmit, task -> assertThat(task.getStatus().getState()).isEqualTo(TaskState.Finished))
)
.template(ScenarioTemplates.verifyJobWithFinishedTasksCompletes())
);
}
@Test
public void testZeroSizeJobIsNotCompletedAutomatically() {
JobDescriptor<?> emptyJob = JobFunctions.changeServiceJobCapacity(oneTaskServiceJobDescriptor(), 0);
jobsScenarioBuilder.scheduleJob(emptyJob, jobScenario -> jobScenario
.expectJobEvent()
.advance()
.expectNoStoreUpdate()
.expectNoJobStateChangeEvent()
);
}
@Test
public void testJobScaleUp() {
Capacity newCapacity = Capacity.newBuilder().withMin(0).withDesired(2).withMax(5).build();
jobsScenarioBuilder.scheduleJob(oneTaskServiceJobDescriptor(), jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.template(ScenarioTemplates.changeJobCapacity(newCapacity))
.advance()
.expectTaskInActiveState(1, 0, TaskState.Accepted)
);
}
@Test(expected = JobManagerException.class)
public void testFinishedJobScaleUp() {
Capacity newCapacity = Capacity.newBuilder().withMin(0).withDesired(2).withMax(5).build();
testNoUpdatesAllowedForFinishedJob(jobScenario -> jobScenario
.template(ScenarioTemplates.changeJobCapacity(newCapacity))
);
}
@Test
public void testJobScaleDown() {
Capacity newCapacity = Capacity.newBuilder().withMin(0).withDesired(1).withMax(5).build();
JobDescriptor<ServiceJobExt> twoTaskJob = changeServiceJobCapacity(oneTaskServiceJobDescriptor(), 2);
jobsScenarioBuilder.scheduleJob(twoTaskJob, jobScenario -> jobScenario
.expectJobEvent()
.advance()
.inActiveTasks((taskIdx, resubmit) -> ScenarioTemplates.acceptTask(taskIdx, resubmit))
.template(ScenarioTemplates.changeJobCapacity(newCapacity))
.advance()
.firstTaskMatch(task -> task.getStatus().getState() == TaskState.KillInitiated, matchingTask -> {
assertThat(matchingTask.getStatus().getReasonCode()).isEqualTo(TaskStatus.REASON_SCALED_DOWN);
})
);
}
@Test
public void testJobScaleDownWithTaskInKillInitiatedState() {
Capacity newCapacity = Capacity.newBuilder().withMin(0).withDesired(0).withMax(5).build();
jobsScenarioBuilder.scheduleJob(oneTaskServiceJobDescriptor(), jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.killTask(0, 0)
.template(ScenarioTemplates.changeJobCapacity(newCapacity))
.expectTaskStateChangeEvent(0, 0, TaskState.KillInitiated)
.triggerComputePlatformFinishedEvent(0, 0)
.expectTaskStateChangeEvent(0, 0, TaskState.Finished)
.advance().advance().advance()
.allActiveTasks(task -> fail("No active task expected, but found: " + task))
);
}
@Test
public void testJobScaleDownWithParallelTerminateAndShrink() {
JobDescriptor<ServiceJobExt> twoTaskJob = changeServiceJobCapacity(oneTaskServiceJobDescriptor(), 2);
Capacity newCapacity = Capacity.newBuilder().withMin(0).withDesired(0).withMax(5).build();
jobsScenarioBuilder.withConcurrentStoreUpdateLimit(1)
.scheduleJob(twoTaskJob, jobScenario -> jobScenario
.expectJobEvent()
.advance()
.inActiveTasks((taskIdx, resubmit) -> ScenarioTemplates.acceptTask(taskIdx, resubmit))
.inActiveTasks((taskIdx, resubmit) -> ScenarioTemplates.startTask(taskIdx, resubmit, TaskState.Started))
.changeCapacity(newCapacity)
.allTasks(tasks -> tasks.forEach(jobScenario::killTaskAndShrinkNoWait))
.advance().advance().advance()
.assertServiceJob(job -> {
Capacity capacity = job.getJobDescriptor().getExtensions().getCapacity();
assertThat(capacity.getMin()).isEqualTo(0);
assertThat(capacity.getDesired()).isEqualTo(0);
})
);
}
@Test
public void testTaskTerminateInAcceptedState() {
jobsScenarioBuilder.scheduleJob(oneTaskServiceJobDescriptor(), jobScenario -> jobScenario
.expectJobEvent()
.advance()
.template(ScenarioTemplates.acceptTask(0, 0))
.killTask(0, 0)
.expectTaskStateChangeEvent(0, 0, TaskState.KillInitiated)
.triggerComputePlatformFinishedEvent(0, 0, -1, TaskStatus.REASON_TASK_LOST)
.expectTaskStateChangeEvent(0, 0, TaskState.Finished)
);
}
@Test
public void testTaskTerminateInAcceptedStateKillRetries() {
jobsScenarioBuilder.scheduleJob(oneTaskServiceJobDescriptor(), jobScenario -> jobScenario
.expectJobEvent()
.advance()
.template(ScenarioTemplates.acceptTask(0, 0))
.killTask(0, 0)
.expectComputeProviderTaskFinished(0, 0)
.expectTaskStateChangeEvent(0, 0, TaskState.KillInitiated)
.advance(2 * JobsScenarioBuilder.KILL_INITIATED_TIMEOUT_MS, TimeUnit.MILLISECONDS)
.expectComputeProviderTaskFinished(0, 0)
.expectTaskStateChangeEvent(0, 0, TaskState.KillInitiated)
.triggerComputePlatformFinishedEvent(0, 0, -1, TaskStatus.REASON_TASK_LOST)
.expectTaskStateChangeEvent(0, 0, TaskState.Finished)
);
}
@Test
public void testTaskTerminateAndShrinkReducesJobSize() {
JobDescriptor<ServiceJobExt> twoTaskJob = changeServiceJobCapacity(oneTaskServiceJobDescriptor(), 3);
jobsScenarioBuilder.scheduleJob(twoTaskJob, jobScenario -> jobScenario
.expectJobEvent()
.advance()
.inActiveTasks((taskIdx, resubmit) -> ScenarioTemplates.acceptTask(taskIdx, resubmit))
.killTaskAndShrink(0, 0)
.expectTaskStateChangeEvent(0, 0, TaskState.KillInitiated)
.expectTaskUpdatedInStore(0, 0, task -> assertThat(task.getStatus().getState()).isEqualTo(TaskState.KillInitiated))
.triggerComputePlatformFinishedEvent(0, 0, -1, TaskStatus.REASON_TASK_KILLED)
.expectTaskStateChangeEvent(0, 0, TaskState.Finished, TaskStatus.REASON_TASK_KILLED)
.expectTaskUpdatedInStore(0, 0, task -> assertThat(task.getStatus().getState()).isEqualTo(TaskState.Finished))
.expectedTaskArchivedInStore(0, 0)
.expectArchivedTaskEvent(0, 0)
.advance()
.advance()
.expectNoTaskStateChangeEvent()
.expectServiceJobEvent(job -> assertThat(job.getJobDescriptor().getExtensions().getCapacity().getDesired()).isEqualTo(2))
);
}
@Test
public void testJobCapacityUpdateToIdenticalAsCurrentCapacityIsNoOp() {
Capacity fixedCapacity = Capacity.newBuilder().withMin(1).withDesired(1).withMax(1).build();
JobDescriptor<ServiceJobExt> job = JobFunctions.changeServiceJobCapacity(oneTaskServiceJobDescriptor(), fixedCapacity);
jobsScenarioBuilder.scheduleJob(job, jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.changeCapacity(fixedCapacity)
.advance()
.advance()
.expectNoStoreUpdate()
.expectNoJobStateChangeEvent()
.expectNoTaskStateChangeEvent()
);
}
@Test
public void testJobKillWithTaskInAcceptedStateWithRetries() {
jobsScenarioBuilder.scheduleJob(oneTaskServiceJobDescriptor(), jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.template(ScenarioTemplates.killJob())
.expectComputeProviderTaskFinished(0, 0)
.expectTaskStateChangeEvent(0, 0, TaskState.KillInitiated)
.advance(2 * JobsScenarioBuilder.KILL_INITIATED_TIMEOUT_MS, TimeUnit.MILLISECONDS)
.expectComputeProviderTaskFinished(0, 0)
.expectTaskStateChangeEvent(0, 0, TaskState.KillInitiated)
.triggerComputePlatformFinishedEvent(0, 0, -1, TaskStatus.REASON_TASK_LOST)
.expectTaskStateChangeEvent(0, 0, TaskState.Finished)
.advance().advance()
.expectServiceJobEvent(job -> assertThat(job.getStatus().getState() == JobState.Finished))
);
}
@Test
public void testTaskKillConcurrencyIsLimitedWhenJobIsKilled() {
jobsScenarioBuilder.scheduleJob(JobFunctions.changeServiceJobCapacity(oneTaskServiceJobDescriptor(), 100), jobScenario -> jobScenario
.expectJobEvent()
.advance()
.inActiveTasks((taskIdx, resubmit) -> ScenarioTemplates.acceptTask(taskIdx, resubmit))
.inActiveTasks((taskIdx, resubmit) -> ScenarioTemplates.startTask(taskIdx, resubmit, TaskState.Started))
.advance()
.allTasks(tasks -> assertThat(tasks.size() > CONCURRENT_STORE_UPDATE_LIMIT).isTrue())
.ignoreAvailableEvents()
.killJob()
.expectJobEvent(job -> assertThat(job.getStatus().getState()).isEqualTo(JobState.KillInitiated))
.advance()
.allTasks(tasks -> {
long killed = tasks.stream().filter(task -> task.getStatus().getState() == TaskState.KillInitiated).count();
assertThat(killed).isEqualTo(CONCURRENT_STORE_UPDATE_LIMIT);
})
);
}
@Test
public void testTaskKillConcurrencyIsLimitedWhenJobIsScaledDown() {
jobsScenarioBuilder.scheduleJob(JobFunctions.changeServiceJobCapacity(oneTaskServiceJobDescriptor(), 100), jobScenario -> jobScenario
.expectJobEvent()
.advance()
.inActiveTasks((taskIdx, resubmit) -> ScenarioTemplates.acceptTask(taskIdx, resubmit))
.inActiveTasks((taskIdx, resubmit) -> ScenarioTemplates.startTask(taskIdx, resubmit, TaskState.Started))
.advance()
.allTasks(tasks -> assertThat(tasks.size() > CONCURRENT_STORE_UPDATE_LIMIT).isTrue())
.ignoreAvailableEvents()
.changeCapacity(0, 0, 0)
.advance()
.allTasks(tasks -> {
long killed = tasks.stream().filter(task -> task.getStatus().getState() == TaskState.KillInitiated).count();
assertThat(killed).isEqualTo(CONCURRENT_STORE_UPDATE_LIMIT);
})
);
}
@Test
public void testJobEnableStatus() {
jobsScenarioBuilder.scheduleJob(oneTaskServiceJobDescriptor(), jobScenario -> {
Consumer<Boolean> enable = enabled -> jobScenario
.changeJobEnabledStatus(enabled)
.expectJobEvent(job -> assertJobEnableState(job, enabled));
return jobScenario
.expectJobEvent()
.assertServiceJob(job -> assertJobEnableState(job, true))
.andThen(() -> enable.accept(false))
.andThen(() -> enable.accept(true))
.andThen(() -> enable.accept(false));
}
);
}
@Test(expected = JobManagerException.class)
public void testFinishedJobEnableStatus() {
testNoUpdatesAllowedForFinishedJob(jobScenario -> jobScenario.changeJobEnabledStatus(false));
}
@Test
public void testJobEnableStatusUpdateToIdenticalValue() {
JobDescriptor<?> emptyJob = JobFunctions.changeServiceJobCapacity(oneTaskServiceJobDescriptor(), 0);
jobsScenarioBuilder.scheduleJob(emptyJob, jobScenario -> jobScenario
.expectJobEvent()
.assertServiceJob(job -> assertJobEnableState(job, true))
.changeJobEnabledStatus(true)
.advance()
.expectNoStoreUpdate()
.expectNoJobStateChangeEvent()
);
}
@Test(expected = JobManagerException.class)
public void testFinishedJobServiceProcessesUpdate() {
testNoUpdatesAllowedForFinishedJob(jobScenario -> jobScenario
.changServiceJobProcesses(ServiceJobProcesses.newBuilder().withDisableIncreaseDesired(true).build())
);
}
private void testNoUpdatesAllowedForFinishedJob(Consumer<JobScenarioBuilder> changeFun) {
JobDescriptor<ServiceJobExt> zeroSizeJob = oneTaskServiceJobDescriptor().but(JobFunctions.ofServiceSize(0));
jobsScenarioBuilder.scheduleJob(zeroSizeJob, jobScenario -> {
jobScenario
.ignoreAvailableEvents()
.killJob()
.expectJobEvent(job -> assertThat(job.getStatus().getState()).isEqualTo(JobState.KillInitiated))
.expectJobEvent(job -> assertThat(job.getStatus().getState()).isEqualTo(JobState.Finished))
.ignoreAvailableEvents();
changeFun.accept(jobScenario);
return jobScenario;
}
);
}
private void assertJobEnableState(Job<?> job, boolean enabled) {
Job<ServiceJobExt> serviceJob = (Job<ServiceJobExt>) job;
assertThat(serviceJob.getJobDescriptor().getExtensions().isEnabled()).describedAs("Expecting job in enable state: %s", enabled).isEqualTo(enabled);
}
}
| 9,922 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration/MoveTaskTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.service.integration;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.api.jobmanager.service.JobManagerException;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.ExceptionExt;
import com.netflix.titus.master.jobmanager.service.integration.scenario.JobScenarioBuilder;
import com.netflix.titus.master.jobmanager.service.integration.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.jobmanager.service.integration.scenario.ScenarioTemplates;
import com.netflix.titus.testkit.rx.ExtTestSubscriber;
import org.junit.Test;
import rx.schedulers.TestScheduler;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskBatchJobDescriptor;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskServiceJobDescriptor;
import static org.assertj.core.api.Assertions.assertThat;
public class MoveTaskTest {
private final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder();
private final TestScheduler testScheduler = jobsScenarioBuilder.getTestScheduler();
private final CallMetadata callMetadata = CallMetadata.newBuilder().withCallReason("Testing move task").withCallerId("testkit").build();
@Test
public void testMove() {
JobDescriptor<ServiceJobExt> jobDescriptor = oneTaskServiceJobDescriptor();
String targetJobId = startNewJob(jobDescriptor).getJobId();
String sourceJobId = startNewJob(jobDescriptor).getJobId();
jobsScenarioBuilder.getJobScenario(1).moveTask(0, 0, sourceJobId, targetJobId)
.expectJobEvent(job -> assertThat(JobFunctions.getJobDesiredSize(job)).isEqualTo(0));
jobsScenarioBuilder.getJobScenario(0)
.expectJobEvent(job -> assertThat(JobFunctions.getJobDesiredSize(job)).isEqualTo(2))
.expectTaskEvent(1, 0, event -> assertThat(event.isMovedFromAnotherJob()).isTrue());
}
@Test
public void testMoveWithInvalidTaskId() {
ExtTestSubscriber<Void> testSubscriber = new ExtTestSubscriber<>();
jobsScenarioBuilder.getJobOperations().moveServiceTask("sourceJobId", "someJobId", "someTaskId", callMetadata).subscribe(testSubscriber);
assertThat(testSubscriber.isError()).isTrue();
assertThat(((JobManagerException) testSubscriber.getError()).getErrorCode()).isEqualTo(JobManagerException.ErrorCode.TaskNotFound);
}
@Test
public void testMoveWithBatchTask() {
try {
startNewJob(oneTaskBatchJobDescriptor()).moveTask(0, 0, "someSrcJobId", "someTargetJobId");
} catch (JobManagerException e) {
assertThat(e.getErrorCode()).isEqualTo(JobManagerException.ErrorCode.NotServiceJob);
}
}
@Test
public void testMoveWithInvalidTargetJob() {
JobDescriptor<ServiceJobExt> jobDescriptor = oneTaskServiceJobDescriptor();
JobScenarioBuilder sourceJobBuilder = startNewJob(jobDescriptor);
String sourceJobId = sourceJobBuilder.getJobId();
String targetJobId = startNewJob(oneTaskBatchJobDescriptor()).getJobId();
try {
sourceJobBuilder.moveTask(0, 0, sourceJobId, targetJobId);
} catch (JobManagerException e) {
assertThat(e.getErrorCode()).isEqualTo(JobManagerException.ErrorCode.NotServiceJob);
}
}
@Test
public void testMoveWithIncompatibleTargetJob() {
JobDescriptor<ServiceJobExt> jobDescriptor = oneTaskServiceJobDescriptor();
JobScenarioBuilder sourceJobBuilder = startNewJob(jobDescriptor);
String sourceJobId = sourceJobBuilder.getJobId();
JobDescriptor<ServiceJobExt> incompatible = jobDescriptor.but(descriptor ->
descriptor.getContainer().but(container -> container.getImage().toBuilder()
.withName("other/image")
.build()
)
);
String targetJobId = startNewJob(incompatible).getJobId();
try {
sourceJobBuilder.moveTask(0, 0, sourceJobId, targetJobId);
} catch (JobManagerException e) {
assertThat(e.getErrorCode()).isEqualTo(JobManagerException.ErrorCode.JobsNotCompatible);
assertThat(e.getMessage()).contains("container.image.name");
}
}
@Test
public void testMoveWithStoreUpdateFailure() {
JobDescriptor<ServiceJobExt> jobDescriptor = oneTaskServiceJobDescriptor();
String targetJobId = startNewJob(jobDescriptor).getJobId();
JobScenarioBuilder sourceJobBuilder = startNewJob(jobDescriptor);
String sourceJobId = sourceJobBuilder.getJobId();
try {
sourceJobBuilder.advance()
.breakStore()
.allTasks(tasks -> assertThat(tasks).hasSize(1))
.moveTask(0, 0, sourceJobId, targetJobId);
} catch (Exception e) {
assertThat(ExceptionExt.toMessageChain(e)).contains("Store is broken");
}
jobsScenarioBuilder.getJobScenario(0).allTasks(tasks -> assertThat(tasks).hasSize(1));
jobsScenarioBuilder.getJobScenario(1).allTasks(tasks -> assertThat(tasks).hasSize(1));
}
@Test
public void testMoveTimeout() {
JobDescriptor<ServiceJobExt> jobDescriptor = oneTaskServiceJobDescriptor();
JobScenarioBuilder sourceJobBuilder = startNewJob(jobDescriptor);
String sourceJobId = sourceJobBuilder.getJobId();
String targetJobId = startNewJob(jobDescriptor).getJobId();
sourceJobBuilder.advance()
.slowStore()
.inTask(0, 0, task -> {
ExtTestSubscriber<Void> testSubscriber = new ExtTestSubscriber<>();
jobsScenarioBuilder.getJobOperations()
.moveServiceTask(sourceJobId, targetJobId, task.getId(), callMetadata)
.timeout(1, TimeUnit.SECONDS, testScheduler)
.subscribe(testSubscriber);
testScheduler.advanceTimeBy(2, TimeUnit.SECONDS);
assertThat(testSubscriber.isError()).isTrue();
assertThat(testSubscriber.getError()).isInstanceOf(TimeoutException.class);
});
}
private <E extends JobDescriptor.JobDescriptorExt> JobScenarioBuilder startNewJob(JobDescriptor<E> jobDescriptor) {
jobsScenarioBuilder.scheduleJob(jobDescriptor, jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.template(ScenarioTemplates.startTask(0, 0, TaskState.Started))
);
return CollectionsExt.last(jobsScenarioBuilder.getJobScenarios());
}
}
| 9,923 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration/JobSchedulingCommonTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.service.integration;
import java.util.UUID;
import com.netflix.titus.api.jobmanager.JobAttributes;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.TaskStatus;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.master.jobmanager.service.integration.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.jobmanager.service.integration.scenario.ScenarioTemplates;
import com.netflix.titus.testkit.model.job.JobDescriptorGenerator;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class JobSchedulingCommonTest {
private final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder();
@Test
public void testBatchJobWithTaskInAcceptedStateNotScheduledYet() {
testJobWithTaskInAcceptedStateNotScheduledYet(JobDescriptorGenerator.oneTaskBatchJobDescriptor());
}
@Test
public void testServiceJobWithTaskInAcceptedStateNotScheduledYet() {
testJobWithTaskInAcceptedStateNotScheduledYet(JobDescriptorGenerator.oneTaskServiceJobDescriptor());
}
@Test
public void testBatchJobWithFederatedJobId() {
String federatedJobId = UUID.randomUUID().toString();
testJobWithTaskInAcceptedStateNotScheduledYetWithFederatedJobId(JobDescriptorGenerator.oneTaskBatchJobDescriptorWithAttributes(
CollectionsExt.<String, String>newHashMap().entry(JobAttributes.JOB_ATTRIBUTES_FEDERATED_JOB_ID, federatedJobId).build()),
federatedJobId);
}
@Test
public void testServiceJobWithFederatedJobId() {
String federatedJobId = UUID.randomUUID().toString();
testJobWithTaskInAcceptedStateNotScheduledYetWithFederatedJobId(JobDescriptorGenerator.oneTaskServiceJobDescriptorWithAttributes(
CollectionsExt.<String, String>newHashMap().entry(JobAttributes.JOB_ATTRIBUTES_FEDERATED_JOB_ID, federatedJobId).build()),
federatedJobId);
}
private void testJobWithTaskInAcceptedStateNotScheduledYetWithFederatedJobId(JobDescriptor<?> oneTaskJobDescriptor, String federatedJobId) {
jobsScenarioBuilder.scheduleJob(oneTaskJobDescriptor, jobScenario ->
jobScenario.expectJobEvent(job -> {
assertThat(job.getId()).isEqualTo(federatedJobId);
assertThat(job.getJobDescriptor().getAttributes().get(JobAttributes.JOB_ATTRIBUTES_ORIGINAL_FEDERATED_JOB_ID)).isEqualTo(federatedJobId);
assertThat(job.getJobDescriptor().getAttributes().get(JobAttributes.JOB_ATTRIBUTES_FEDERATED_JOB_ID)).isNull();
}));
}
/**
* This test covers the case where a task is created in store, but not added to the compute provider yet, and
* the job while in this state is terminated.
*/
private void testJobWithTaskInAcceptedStateNotScheduledYet(JobDescriptor<?> oneTaskJobDescriptor) {
jobsScenarioBuilder.scheduleJob(oneTaskJobDescriptor, jobScenario -> jobScenario
.expectJobEvent()
.expectTaskAddedToStore(0, 0, task -> assertThat(task.getStatus().getState()).isEqualTo(TaskState.Accepted))
.template(ScenarioTemplates.killJob())
.expectTaskStateChangeEvent(0, 0, TaskState.Accepted)
.template(ScenarioTemplates.handleTaskFinishedTransitionInSingleTaskJob(0, 0, TaskStatus.REASON_TASK_KILLED))
);
}
}
| 9,924 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration/JobRateLimitingTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.service.integration;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor.JobDescriptorExt;
import com.netflix.titus.api.jobmanager.model.job.JobModel;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.TaskStatus;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.master.jobmanager.service.integration.scenario.JobScenarioBuilder;
import com.netflix.titus.master.jobmanager.service.integration.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.jobmanager.service.integration.scenario.ScenarioTemplates;
import org.junit.Test;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.changeBatchJobSize;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.changeRetryPolicy;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.changeServiceJobCapacity;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskBatchJobDescriptor;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskServiceJobDescriptor;
import static org.assertj.core.api.Assertions.assertThat;
public class JobRateLimitingTest {
private static final long RETREY_DELAY_MS = 1_000;
private static final JobDescriptor<BatchJobExt> LARGE_BATCH_JOB = changeRetryPolicy(
changeBatchJobSize(oneTaskBatchJobDescriptor(), 3 * JobsScenarioBuilder.ACTIVE_NOT_STARTED_TASKS_LIMIT),
JobModel.newDelayedRetryPolicy().withDelay(RETREY_DELAY_MS, TimeUnit.MILLISECONDS).withRetries(1).build()
);
private static final JobDescriptor<ServiceJobExt> LARGE_SERVIE_JOB = changeRetryPolicy(
changeServiceJobCapacity(oneTaskServiceJobDescriptor(), 3 * JobsScenarioBuilder.ACTIVE_NOT_STARTED_TASKS_LIMIT),
JobModel.newDelayedRetryPolicy().withDelay(RETREY_DELAY_MS, TimeUnit.MILLISECONDS).withRetries(1).build()
);
private final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder();
/**
* Run a large job, and check that tasks are started in partitions, not all at once.
*/
@Test
public void testLargeBatchJobRateLimiting() throws Exception {
testLargeServiceJobRateLimiting(LARGE_BATCH_JOB);
}
@Test
public void testLargeServiceJobRateLimiting() throws Exception {
testLargeServiceJobRateLimiting(LARGE_SERVIE_JOB);
}
private void testLargeServiceJobRateLimiting(JobDescriptor<?> jobDescriptor) throws Exception {
jobsScenarioBuilder.scheduleJob(jobDescriptor, jobScenario -> jobScenario
.expectJobEvent()
.advance().allTasks(tasks -> acceptAndStartNewPartition(jobScenario, tasks))
.advance().allTasks(tasks -> acceptAndStartNewPartition(jobScenario, tasks))
.advance().allTasks(tasks -> acceptAndStartNewPartition(jobScenario, tasks))
.advance().allTasks(this::expectAllStarted)
);
}
/**
* Run a large job with tasks immediately failing. Check that first batch is rescheduled periodically.
*/
@Test
public void testLargeBatchJobWithFailingTasksRateLimiting() throws Exception {
testLargeBatchJobWithFailingTasksRateLimiting(LARGE_BATCH_JOB);
}
@Test
public void testLargeServiceJobWithFailingTasksRateLimiting() throws Exception {
testLargeBatchJobWithFailingTasksRateLimiting(LARGE_SERVIE_JOB);
}
private void testLargeBatchJobWithFailingTasksRateLimiting(JobDescriptor<?> jobDescriptor) {
jobsScenarioBuilder.scheduleJob(jobDescriptor, jobScenario -> jobScenario
.expectJobEvent()
// First batch fails, and is retried
.advance().allTasks(tasks -> acceptAndFailNewPartition(jobScenario, tasks))
.advance().allTasks(tasks -> startNewBatch(jobScenario, tasks))
// Second batch succeeds immediately
.advance().allTasks(tasks -> acceptAndStartNewPartition(jobScenario, tasks))
// Third batch fails again
.advance().allTasks(tasks -> acceptAndFailNewPartition(jobScenario, tasks))
.advance().allTasks(tasks -> startNewBatch(jobScenario, tasks))
.advance().allTasks(this::expectAllStarted)
);
}
/**
* Run all tasks of a large job. Fail them all, and check that they are rescheduled in partitions.
*/
@Test
public void testLargeBatchJobTaskRetryRateLimiting() {
testLargeServiceJobTaskRetryRateLimiting(LARGE_BATCH_JOB);
}
@Test
public void testLargeServiceJobTaskRetryRateLimiting() {
testLargeServiceJobTaskRetryRateLimiting(LARGE_SERVIE_JOB);
}
private void testLargeServiceJobTaskRetryRateLimiting(JobDescriptor<?> jobDescriptor) {
jobsScenarioBuilder.scheduleJob(jobDescriptor, jobScenario -> jobScenario
.expectJobEvent()
// Start all tasks
.advance().allTasks(tasks -> acceptAndStartNewPartition(jobScenario, tasks))
.advance().allTasks(tasks -> acceptAndStartNewPartition(jobScenario, tasks))
.advance().allTasks(tasks -> acceptAndStartNewPartition(jobScenario, tasks))
// Fail all tasks
.advance().allTasks(tasks -> tasks.forEach(task -> jobScenario.triggerComputePlatformFinishedEvent(task, -1, TaskStatus.REASON_FAILED)))
.advance(1, TimeUnit.SECONDS)
// Expect all be restarted in partitions
.advance().allTasks(tasks -> acceptAndStartNewPartition(jobScenario, tasks))
.advance().allTasks(tasks -> acceptAndStartNewPartition(jobScenario, tasks))
.advance().allTasks(tasks -> acceptAndStartNewPartition(jobScenario, tasks))
.advance().allTasks(this::expectAllStarted)
);
}
private <E extends JobDescriptorExt> void acceptAndStartNewPartition(JobScenarioBuilder jobScenario, List<Task> tasks) {
jobScenario.inAllTasks(filterActiveTasks(tasks, JobsScenarioBuilder.ACTIVE_NOT_STARTED_TASKS_LIMIT), (taskIdx, resubmit) ->
jobScenario
.template(ScenarioTemplates.acceptTask(taskIdx, resubmit))
.template(ScenarioTemplates.startTask(taskIdx, resubmit, TaskState.Started))
);
}
private <E extends JobDescriptorExt> void acceptAndFailNewPartition(JobScenarioBuilder jobScenario, List<Task> tasks) {
jobScenario.inAllTasks(filterActiveTasks(tasks, JobsScenarioBuilder.ACTIVE_NOT_STARTED_TASKS_LIMIT), (taskIdx, resubmit) ->
jobScenario
.template(ScenarioTemplates.acceptTask(taskIdx, resubmit))
.template(ScenarioTemplates.failRetryableTask(taskIdx, resubmit, RETREY_DELAY_MS))
);
}
private <E extends JobDescriptorExt> void startNewBatch(JobScenarioBuilder jobScenario, List<Task> tasks) {
jobScenario.inAllTasks(filterActiveTasks(tasks, JobsScenarioBuilder.ACTIVE_NOT_STARTED_TASKS_LIMIT), (taskIdx, resubmit) ->
jobScenario
.template(ScenarioTemplates.startTask(taskIdx, resubmit, TaskState.Started))
);
}
private void expectAllStarted(List<Task> tasks) {
assertThat(tasks).hasSize(3 * JobsScenarioBuilder.ACTIVE_NOT_STARTED_TASKS_LIMIT);
assertThat(tasks.stream().filter(t -> t.getStatus().getState() == TaskState.Started)).hasSize(3 * JobsScenarioBuilder.ACTIVE_NOT_STARTED_TASKS_LIMIT);
}
private List<Task> filterActiveTasks(List<Task> tasks, int expected) {
List<Task> active = tasks.stream().filter(t -> t.getStatus().getState() == TaskState.Accepted).collect(Collectors.toList());
assertThat(active).describedAs("Expected to find %d active tasks, but is %d", expected, active.size()).hasSize(expected);
return active;
}
}
| 9,925 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration/BatchJobSchedulingTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.service.integration;
import java.util.concurrent.TimeUnit;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.JobModel;
import com.netflix.titus.api.jobmanager.model.job.JobState;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.TaskStatus;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.service.JobManagerException;
import com.netflix.titus.api.jobmanager.service.JobManagerException.ErrorCode;
import com.netflix.titus.api.jobmanager.service.V3JobOperations;
import com.netflix.titus.master.jobmanager.service.integration.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.jobmanager.service.integration.scenario.ScenarioTemplates;
import org.junit.Test;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.changeBatchJobSize;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.changeRetryLimit;
import static com.netflix.titus.master.jobmanager.service.integration.scenario.JobsScenarioBuilder.CONCURRENT_STORE_UPDATE_LIMIT;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskBatchJobDescriptor;
import static org.assertj.core.api.Assertions.assertThat;
public class BatchJobSchedulingTest {
private final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder();
/**
* Run single batch task that terminates with exit code 0.
*/
@Test
public void testRunAndCompleteOkOneJobTask() {
jobsScenarioBuilder.scheduleJob(oneTaskBatchJobDescriptor(), jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.template(ScenarioTemplates.startTask(0, 0, TaskState.Started))
.template(ScenarioTemplates.finishSingleTaskJob(0, 0, TaskStatus.REASON_NORMAL, 0))
);
}
/**
* Run multiple batch tasks that terminates with exit code 0.
*/
@Test
public void testRunAndCompleteOkJobWithManyTasks() {
JobDescriptor<BatchJobExt> twoTaskJob = changeBatchJobSize(oneTaskBatchJobDescriptor(), 2);
jobsScenarioBuilder.scheduleJob(twoTaskJob, jobScenario -> jobScenario
.expectJobEvent()
.advance()
.inActiveTasks((taskIdx, resubmit) -> ScenarioTemplates.acceptTask(taskIdx, resubmit))
.inActiveTasks((taskIdx, resubmit) -> ScenarioTemplates.startTask(taskIdx, resubmit, TaskState.Started))
.inActiveTasks((taskIdx, resubmit) -> ScenarioTemplates.triggerComputeProviderFinishedEvent(taskIdx, resubmit, 0))
.template(ScenarioTemplates.verifyJobWithFinishedTasksCompletes())
);
}
/**
* Check that containers terminating with exit code 0 are not restarted.
*/
@Test
public void testTaskCompletedOkIsNotRestarted() {
JobDescriptor<BatchJobExt> jobWithOneRetry = changeRetryLimit(oneTaskBatchJobDescriptor(), 1);
jobsScenarioBuilder.scheduleJob(jobWithOneRetry, jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.template(ScenarioTemplates.startTask(0, 0, TaskState.Started))
.template(ScenarioTemplates.finishSingleTaskJob(0, 0, TaskStatus.REASON_NORMAL, 0))
);
}
/**
* Check that containers that fail with non zero exit code, and are not retryable, are not restarted.
*/
@Test
public void testFailedTaskWithNoRetriesFinishesImmediately() {
jobsScenarioBuilder.scheduleJob(oneTaskBatchJobDescriptor(), jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.template(ScenarioTemplates.startTask(0, 0, TaskState.Started))
.template(ScenarioTemplates.finishSingleTaskJob(0, 0, TaskStatus.REASON_FAILED, -1))
);
}
/**
* Check container restart for failing containers that are retryable.
*/
@Test
public void testFailedTaskWithRetriesIsResubmitted() {
JobDescriptor<BatchJobExt> jobWithOneRetry = changeRetryLimit(oneTaskBatchJobDescriptor(), 1);
jobsScenarioBuilder.scheduleJob(jobWithOneRetry, jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.template(ScenarioTemplates.startTask(0, 0, TaskState.Started))
.template(ScenarioTemplates.failRetryableTask(0, 0))
.template(ScenarioTemplates.failLastBatchRetryableTask(0, 1))
);
}
/**
* Check that in a job with multiple tasks, if some tasks complete and some fail, only the latter are resubmitted.
*/
@Test
public void testOnlyFailedTasksAreResubmittedInMultiTaskJob() {
JobDescriptor<BatchJobExt> twoTaskJob = changeRetryLimit(changeBatchJobSize(oneTaskBatchJobDescriptor(), 2), 1);
jobsScenarioBuilder.scheduleJob(twoTaskJob, jobScenario -> jobScenario
.expectJobEvent()
.advance()
.inActiveTasks((taskIdx, resubmit) -> ScenarioTemplates.acceptTask(taskIdx, resubmit))
.inActiveTasks((taskIdx, resubmit) -> ScenarioTemplates.startTask(taskIdx, resubmit, TaskState.Started))
.template(ScenarioTemplates.triggerComputeProviderFinishedEvent(0, 0, 0))
.template(ScenarioTemplates.triggerComputeProviderFinishedEvent(1, 0, -1))
.template(ScenarioTemplates.acceptTask(1, 1))
.template(ScenarioTemplates.startTask(1, 1, TaskState.Started))
.template(ScenarioTemplates.triggerComputeProviderFinishedEvent(1, 1, 0))
.template(ScenarioTemplates.verifyJobWithFinishedTasksCompletes())
);
}
/**
* See {@link #testKillingRetryableTaskInActiveState(TaskState)}.
*/
@Test
public void testKillingRetryableTaskInAcceptedState() {
testKillingRetryableTaskInActiveState(TaskState.Accepted);
}
/**
* See {@link #testKillingRetryableTaskInActiveState(TaskState)}.
*/
@Test
public void testKillingRetryableTaskInStartInitiatedState() {
testKillingRetryableTaskInActiveState(TaskState.Launched);
}
/**
* See {@link #testKillingRetryableTaskInActiveState(TaskState)}.
*/
@Test
public void testKillingRetryableTaskInStartedState() {
testKillingRetryableTaskInActiveState(TaskState.Started);
}
/**
* Run a retryable job, with a task in a specific state. Check that the task is resubmitted after kill.
*/
private void testKillingRetryableTaskInActiveState(TaskState taskState) {
JobDescriptor<BatchJobExt> jobWithOneRetry = changeRetryLimit(oneTaskBatchJobDescriptor(), 1);
jobsScenarioBuilder.scheduleJob(jobWithOneRetry, jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.template(ScenarioTemplates.startTask(0, 0, taskState))
.template(ScenarioTemplates.killBatchTask(0, 0))
.template(ScenarioTemplates.acceptTask(0, 1))
);
}
@Test
public void testKillingTaskInFenzoCallback() {
jobsScenarioBuilder.scheduleJob(oneTaskBatchJobDescriptor(), jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.expectTaskInActiveState(0, 0, TaskState.Accepted)
.killTask(0, 0, V3JobOperations.Trigger.Scheduler)
.expectTaskStateChangeEvent(0, 0, TaskState.KillInitiated, TaskStatus.REASON_TRANSIENT_SYSTEM_ERROR)
.triggerComputePlatformFinishedEvent(0, 0, -1, TaskStatus.REASON_TASK_LOST)
.expectTaskStateChangeEvent(0, 0, TaskState.Finished, TaskStatus.REASON_TASK_LOST)
.advance()
.template(ScenarioTemplates.acceptTask(0, 1))
.inTask(0, 1, task -> {
assertThat(task.getResubmitNumber()).isEqualTo(1);
assertThat(task.getSystemResubmitNumber()).isEqualTo(1);
})
);
}
/**
* Check that killing a task that is already in KillInitiated state has no effect.
*/
@Test
public void testKillingTaskInKillInitiatedState() {
jobsScenarioBuilder.scheduleJob(oneTaskBatchJobDescriptor(), jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.template(ScenarioTemplates.startTask(0, 0, TaskState.Started))
.killTask(0, 0)
.expectTaskUpdatedInStore(0, 0, task -> {
assertThat(task.getStatus().getState()).isEqualTo(TaskState.KillInitiated);
assertThat(task.getStatus().getReasonCode()).isEqualTo(TaskStatus.REASON_TASK_KILLED);
})
.expectTaskStateChangeEvent(0, 0, TaskState.KillInitiated, TaskStatus.REASON_TASK_KILLED)
.expectFailure(() -> jobScenario.killTask(0, 0), error -> {
assertThat(error).isInstanceOf(JobManagerException.class);
assertThat(((JobManagerException) error).getErrorCode()).isEqualTo(ErrorCode.TaskTerminating);
})
.advance()
.expectNoStoreUpdate()
.expectNoTaskStateChangeEvent()
);
}
/**
* Check that killing a job with running task, terminates the task first.
*/
@Test
public void testKillingJobInAcceptedState() {
jobsScenarioBuilder.scheduleJob(oneTaskBatchJobDescriptor(), jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.template(ScenarioTemplates.startTask(0, 0, TaskState.Started))
.template(ScenarioTemplates.killJob())
.template(ScenarioTemplates.reconcilerTaskKill(0, 0))
.template(ScenarioTemplates.handleTaskFinishedTransitionInSingleTaskJob(0, 0, TaskStatus.REASON_TASK_KILLED))
);
}
/**
* Check killing a job with retryable tasks. There should be no task resubmit attempts.
*/
@Test
public void testJobKillWithTaskInAcceptedStateWithRetries() {
jobsScenarioBuilder.scheduleJob(oneTaskBatchJobDescriptor(), jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.template(ScenarioTemplates.killJob())
.expectTaskStateChangeEvent(0, 0, TaskState.KillInitiated)
.advance(2 * JobsScenarioBuilder.KILL_INITIATED_TIMEOUT_MS, TimeUnit.MILLISECONDS)
.expectTaskStateChangeEvent(0, 0, TaskState.KillInitiated)
.triggerComputePlatformFinishedEvent(0, 0, -1, TaskStatus.REASON_TASK_LOST)
.expectTaskStateChangeEvent(0, 0, TaskState.Finished)
.advance().advance()
.expectJobEvent(job -> assertThat(job.getStatus().getState() == JobState.Finished))
);
}
/**
* Check that killing a job with a failed tasks, terminates the job.
*/
@Test
public void testKillingJobInAcceptedStateWithFailedTasks() {
JobDescriptor<BatchJobExt> jobWithRetries = JobFunctions.changeRetryLimit(oneTaskBatchJobDescriptor(), 2);
jobsScenarioBuilder.scheduleJob(jobWithRetries, jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.template(ScenarioTemplates.startTask(0, 0, TaskState.Started))
// Fail the task just before job kill operation is triggered
.triggerComputePlatformFinishedEvent(0, 0, -1, TaskStatus.REASON_TASK_LOST)
.template(ScenarioTemplates.killJob())
.template(ScenarioTemplates.handleTaskFinishedTransitionInSingleTaskJob(0, 0, TaskStatus.REASON_TASK_LOST))
);
}
@Test
public void testKillingJobInKillInitiatedState() {
jobsScenarioBuilder.scheduleJob(oneTaskBatchJobDescriptor(), jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.template(ScenarioTemplates.startTask(0, 0, TaskState.Started))
.template(ScenarioTemplates.killJob())
.expectTaskUpdatedInStore(0, 0, task -> assertThat(task.getStatus().getState()).isEqualTo(TaskState.KillInitiated))
.expectTaskStateChangeEvent(0, 0, TaskState.KillInitiated)
.expectFailure(jobScenario::killJob, error -> {
assertThat(error).isInstanceOf(JobManagerException.class);
assertThat(((JobManagerException) error).getErrorCode()).isEqualTo(ErrorCode.JobTerminating);
})
.expectNoStoreUpdate()
.expectNoTaskStateChangeEvent()
);
}
@Test
public void testTaskKillConcurrencyIsLimitedWhenJobIsKilled() {
jobsScenarioBuilder.scheduleJob(JobFunctions.changeBatchJobSize(oneTaskBatchJobDescriptor(), 100), jobScenario -> jobScenario
.expectJobEvent()
.advance()
.inActiveTasks((taskIdx, resubmit) -> ScenarioTemplates.acceptTask(taskIdx, resubmit))
.inActiveTasks((taskIdx, resubmit) -> ScenarioTemplates.startTask(taskIdx, resubmit, TaskState.Started))
.advance()
.allTasks(tasks -> assertThat(tasks.size() > CONCURRENT_STORE_UPDATE_LIMIT).isTrue())
.ignoreAvailableEvents()
.killJob()
.expectJobEvent(job -> assertThat(job.getStatus().getState()).isEqualTo(JobState.KillInitiated))
.advance()
.allTasks(tasks -> {
long killed = tasks.stream().filter(task -> task.getStatus().getState() == TaskState.KillInitiated).count();
assertThat(killed).isEqualTo(CONCURRENT_STORE_UPDATE_LIMIT);
})
);
}
@Test
public void testSystemErrorsAreRetriedAlways() {
JobDescriptor<BatchJobExt> jobWithRetries = JobFunctions.changeRetryPolicy(
oneTaskBatchJobDescriptor(),
JobModel.newImmediateRetryPolicy().withRetries(0).build()
);
jobsScenarioBuilder.scheduleJob(jobWithRetries, jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.template(ScenarioTemplates.startTask(0, 0, TaskState.Started))
// Fail the task just before job kill operation is triggered
.triggerComputePlatformFinishedEvent(0, 0, -1, TaskStatus.REASON_LOCAL_SYSTEM_ERROR)
.advance(60, TimeUnit.SECONDS)
.expectTaskStateChangeEvent(0, 1, TaskState.Accepted)
);
}
@Test
public void testStuckInLaunchedIsRetriedAlways() {
JobDescriptor<BatchJobExt> jobWithRetries = JobFunctions.changeRetryPolicy(
oneTaskBatchJobDescriptor(),
JobModel.newImmediateRetryPolicy().withRetries(0).build()
);
jobsScenarioBuilder.scheduleJob(jobWithRetries, jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.template(ScenarioTemplates.startTask(0, 0, TaskState.StartInitiated))
// Task will time out and move to KillInitiated by the system
.advance(120, TimeUnit.SECONDS)
.advance(JobsScenarioBuilder.MIN_RETRY_INTERVAL_MS, TimeUnit.MILLISECONDS)
.expectTaskStateChangeEvent(0, 1, TaskState.Accepted)
);
}
/**
* Check task timeout in Launched state. if the timeout passes, task should be moved to KillInitiated state.
*/
@Test
public void testTaskLaunchingTimeout() {
jobsScenarioBuilder.scheduleJob(oneTaskBatchJobDescriptor(), jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.template(ScenarioTemplates.startTask(0, 0, TaskState.Launched))
.advance()
.advance(JobsScenarioBuilder.LAUNCHED_TIMEOUT_MS, TimeUnit.MILLISECONDS)
.advance()
.expectTaskInActiveState(0, 0, TaskState.KillInitiated)
);
}
/**
* Check task timeout in StartInitiated state. if the timeout passes, task should be moved to KillInitiated state.
*/
@Test
public void testStartInitiatedTimeout() {
jobsScenarioBuilder.scheduleJob(oneTaskBatchJobDescriptor(), jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.template(ScenarioTemplates.startTask(0, 0, TaskState.StartInitiated))
.advance()
.advance(JobsScenarioBuilder.START_INITIATED_TIMEOUT_MS, TimeUnit.MILLISECONDS)
.advance()
.expectTaskInActiveState(0, 0, TaskState.KillInitiated)
);
}
/**
* If timeout passes in KillInitiated state, instead of moving directly to Finished state, check that configured
* number of kill reattempts is made. The total timeout in this state is (attempts_count * timeout).
*/
@Test
public void testKillReattemptsInKillInitiatedTimeout() {
jobsScenarioBuilder.scheduleJob(oneTaskBatchJobDescriptor(), jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.template(ScenarioTemplates.startTask(0, 0, TaskState.StartInitiated))
.template(ScenarioTemplates.killTask(0, 0))
.template(ScenarioTemplates.passKillInitiatedTimeoutWithKillReattempt(0, 0))
.template(ScenarioTemplates.passFinalKillInitiatedTimeout())
.template(ScenarioTemplates.handleTaskFinishedTransitionInSingleTaskJob(0, 0, TaskStatus.REASON_STUCK_IN_KILLING_STATE))
);
}
@Test
public void testJobCapacityUpdateNotPossibleForBatchJob() throws Exception {
jobsScenarioBuilder.scheduleJob(oneTaskBatchJobDescriptor(), jobScenario -> jobScenario
.expectFailure(() -> jobScenario.changeCapacity(0, 5, 10), error -> {
assertThat(error).isInstanceOf(JobManagerException.class);
assertThat(((JobManagerException) error).getErrorCode()).isEqualTo(ErrorCode.NotServiceJob);
})
);
}
@Test
public void testJobEnableStatusNotPossibleForBatchJob() {
jobsScenarioBuilder.scheduleJob(oneTaskBatchJobDescriptor(), jobScenario -> jobScenario
.expectFailure(() -> jobScenario.changeJobEnabledStatus(false), error -> {
assertThat(error).isInstanceOf(JobManagerException.class);
assertThat(((JobManagerException) error).getErrorCode()).isEqualTo(ErrorCode.NotServiceJob);
})
);
}
@Test
public void testBatchJobRuntimeLimitWithRetries() {
testBatchJobRuntimeLimit(true);
}
@Test
public void testBatchJobRuntimeLimitWithNoRetries() {
testBatchJobRuntimeLimit(false);
}
@Test
public void testBatchJobRetriesWhenTaskCreateWriteToStoreFails() {
jobsScenarioBuilder.breakStoreForTasks()
.scheduleJob(oneTaskBatchJobDescriptor(), jobScenario -> jobScenario
.advance()
.enableStore()
.advance(500, TimeUnit.MILLISECONDS)
.expectTaskEvent(0, 0, event -> {
Task task = event.getCurrentTask();
assertThat(task.getOriginalId()).isEqualTo(task.getId());
})
);
}
private void testBatchJobRuntimeLimit(boolean retryOnRuntimeLimit) {
JobDescriptor<BatchJobExt> jobWithRuntimeLimit = oneTaskBatchJobDescriptor().but(jd ->
jd.getExtensions().toBuilder()
.withRetryPolicy(JobModel.newImmediateRetryPolicy().withRetries(1).build())
.withRuntimeLimitMs(120_000)
.withRetryOnRuntimeLimit(retryOnRuntimeLimit)
.build()
);
jobsScenarioBuilder.scheduleJob(jobWithRuntimeLimit, jobScenario -> jobScenario
.template(ScenarioTemplates.acceptJobWithOneTask(0, 0))
.template(ScenarioTemplates.startTask(0, 0, TaskState.Started))
.advance(120_000, TimeUnit.MILLISECONDS)
.expectTaskStateChangeEvent(0, 0, TaskState.KillInitiated, TaskStatus.REASON_RUNTIME_LIMIT_EXCEEDED)
.triggerComputePlatformFinishedEvent(0, 0, -1, TaskStatus.REASON_TASK_KILLED)
.andThen(() -> {
if (retryOnRuntimeLimit) {
jobScenario.expectTaskAddedToStore(0, 1, task -> assertThat(task.getResubmitNumber()).isEqualTo(1));
} else {
jobScenario.expectJobEvent(job -> assertThat(job.getStatus().getState()).isEqualTo(JobState.Finished));
}
}
)
);
}
}
| 9,926 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration/scenario/JobsScenarioBuilder.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.service.integration.scenario;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
import com.netflix.titus.api.FeatureActivationConfiguration;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor.JobDescriptorExt;
import com.netflix.titus.api.jobmanager.model.job.event.JobManagerEvent;
import com.netflix.titus.api.jobmanager.model.job.sanitizer.JobAssertions;
import com.netflix.titus.api.jobmanager.model.job.sanitizer.JobConfiguration;
import com.netflix.titus.api.jobmanager.model.job.sanitizer.JobSanitizerBuilder;
import com.netflix.titus.api.jobmanager.service.JobManagerException;
import com.netflix.titus.api.model.ResourceDimension;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import com.netflix.titus.common.model.sanitizer.EntitySanitizer;
import com.netflix.titus.common.model.sanitizer.EntitySanitizerBuilder;
import com.netflix.titus.common.model.sanitizer.VerifierMode;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.archaius2.Archaius2Ext;
import com.netflix.titus.common.util.limiter.Limiters;
import com.netflix.titus.common.util.limiter.tokenbucket.TokenBucket;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.master.jobmanager.service.DefaultV3JobOperations;
import com.netflix.titus.master.jobmanager.service.JobManagerConfiguration;
import com.netflix.titus.master.jobmanager.service.JobReconciliationFrameworkFactory;
import com.netflix.titus.master.jobmanager.service.JobServiceRuntime;
import com.netflix.titus.master.jobmanager.service.VersionSupplier;
import com.netflix.titus.master.jobmanager.service.VersionSuppliers;
import com.netflix.titus.master.jobmanager.service.batch.BatchDifferenceResolver;
import com.netflix.titus.master.jobmanager.service.integration.scenario.StubbedJobStore.StoreEvent;
import com.netflix.titus.master.jobmanager.service.limiter.JobSubmitLimiter;
import com.netflix.titus.master.jobmanager.service.service.ServiceDifferenceResolver;
import com.netflix.titus.master.service.management.ApplicationSlaManagementService;
import com.netflix.titus.testkit.rx.ExtTestSubscriber;
import rx.schedulers.Schedulers;
import rx.schedulers.TestScheduler;
import static com.jayway.awaitility.Awaitility.await;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class JobsScenarioBuilder {
public static final long RECONCILER_ACTIVE_TIMEOUT_MS = 50L;
public static final long RECONCILER_IDLE_TIMEOUT_MS = 50;
public static final int ACTIVE_NOT_STARTED_TASKS_LIMIT = 5;
public static final int CONCURRENT_STORE_UPDATE_LIMIT = 5;
public static final long MIN_RETRY_INTERVAL_MS = 10;
public static final long LAUNCHED_TIMEOUT_MS = 5_000;
public static final long START_INITIATED_TIMEOUT_MS = 10_000;
public static final long KILL_INITIATED_TIMEOUT_MS = 30_000;
private final TestScheduler testScheduler = Schedulers.test();
private final TitusRuntime titusRuntime = TitusRuntimes.test(testScheduler);
private final JobManagerConfiguration configuration = mock(JobManagerConfiguration.class);
private final FeatureActivationConfiguration featureActivationConfiguration = mock(FeatureActivationConfiguration.class);
private final JobConfiguration jobSanitizerConfiguration = Archaius2Ext.newConfiguration(JobConfiguration.class);
private final ApplicationSlaManagementService capacityGroupService = new StubbedApplicationSlaManagementService();
private final StubbedJobStore jobStore = new StubbedJobStore();
private final VersionSupplier versionSupplier;
private final JobServiceRuntime runtime;
private volatile int concurrentStoreUpdateLimit = CONCURRENT_STORE_UPDATE_LIMIT;
private DefaultV3JobOperations jobOperations;
private final ExtTestSubscriber<Pair<StoreEvent, ?>> storeEvents = new ExtTestSubscriber<>();
private final List<JobScenarioBuilder> jobScenarioBuilders = new ArrayList<>();
public JobsScenarioBuilder() {
this.versionSupplier = VersionSuppliers.newInstance(titusRuntime.getClock());
when(configuration.getReconcilerActiveTimeoutMs()).thenReturn(RECONCILER_ACTIVE_TIMEOUT_MS);
when(configuration.getReconcilerIdleTimeoutMs()).thenReturn(RECONCILER_IDLE_TIMEOUT_MS);
when(configuration.getMaxActiveJobs()).thenReturn(20_000L);
when(configuration.getActiveNotStartedTasksLimit()).thenReturn(ACTIVE_NOT_STARTED_TASKS_LIMIT);
when(configuration.getConcurrentReconcilerStoreUpdateLimit()).thenAnswer(invocation -> concurrentStoreUpdateLimit);
when(configuration.getTaskInLaunchedStateTimeoutMs()).thenReturn(LAUNCHED_TIMEOUT_MS);
when(configuration.getBatchTaskInStartInitiatedStateTimeoutMs()).thenReturn(START_INITIATED_TIMEOUT_MS);
when(configuration.getTaskInKillInitiatedStateTimeoutMs()).thenReturn(KILL_INITIATED_TIMEOUT_MS);
when(configuration.getMinRetryIntervalMs()).thenReturn(MIN_RETRY_INTERVAL_MS);
when(configuration.getTaskRetryerResetTimeMs()).thenReturn(TimeUnit.MINUTES.toMillis(5));
when(configuration.getTaskKillAttempts()).thenReturn(2L);
when(featureActivationConfiguration.isMoveTaskValidationEnabled()).thenReturn(true);
jobStore.events().subscribe(storeEvents);
this.runtime = new JobServiceRuntime(
configuration,
new StubbedComputeProvider(),
titusRuntime
);
this.jobOperations = createAndActivateV3JobOperations();
}
private DefaultV3JobOperations createAndActivateV3JobOperations() {
TokenBucket stuckInStateRateLimiter = Limiters.unlimited("stuckInState");
BatchDifferenceResolver batchDifferenceResolver = new BatchDifferenceResolver(
configuration,
runtime,
featureActivationConfiguration,
capacityGroupService,
jobStore,
versionSupplier,
stuckInStateRateLimiter,
titusRuntime,
testScheduler
);
ServiceDifferenceResolver serviceDifferenceResolver = new ServiceDifferenceResolver(
configuration,
runtime,
featureActivationConfiguration,
capacityGroupService,
jobStore,
versionSupplier,
stuckInStateRateLimiter,
titusRuntime,
testScheduler
);
JobSubmitLimiter jobSubmitLimiter = new JobSubmitLimiter() {
@Override
public <JOB_DESCR> Optional<JobManagerException> checkIfAllowed(JOB_DESCR jobDescriptor) {
return Optional.empty();
}
@Override
public <JOB_DESCR> Optional<String> reserveId(JOB_DESCR jobDescriptor) {
return Optional.empty();
}
@Override
public <JOB_DESCR> void releaseId(JOB_DESCR jobDescriptor) {
}
};
DefaultV3JobOperations v3JobOperations = new DefaultV3JobOperations(
configuration,
featureActivationConfiguration,
jobStore,
runtime,
new JobReconciliationFrameworkFactory(
configuration,
featureActivationConfiguration,
batchDifferenceResolver,
serviceDifferenceResolver,
jobStore,
capacityGroupService,
newJobSanitizer(VerifierMode.Permissive),
newJobSanitizer(VerifierMode.Strict),
versionSupplier,
titusRuntime,
Optional.of(testScheduler)
),
jobSubmitLimiter,
titusRuntime,
EntitySanitizerBuilder.stdBuilder().build(),
versionSupplier
);
v3JobOperations.enterActiveMode();
return v3JobOperations;
}
public DefaultV3JobOperations getJobOperations() {
return jobOperations;
}
public TestScheduler getTestScheduler() {
return testScheduler;
}
public TitusRuntime getTitusRuntime() {
return titusRuntime;
}
public JobsScenarioBuilder reboot() {
this.jobOperations.shutdown();
this.jobScenarioBuilders.clear();
this.jobOperations = createAndActivateV3JobOperations();
jobOperations.getJobs().forEach(job -> {
JobScenarioBuilder.EventHolder<JobManagerEvent<?>> jobEventsSubscriber = new JobScenarioBuilder.EventHolder<>(jobStore);
JobScenarioBuilder.EventHolder<Pair<StoreEvent, ?>> storeEventsSubscriber = new JobScenarioBuilder.EventHolder<>(jobStore);
jobOperations.observeJob(job.getId()).subscribe(jobEventsSubscriber);
jobStore.events(job.getId()).subscribe(storeEventsSubscriber);
JobScenarioBuilder jobScenarioBuilder = new JobScenarioBuilder(
job.getId(),
jobEventsSubscriber,
storeEventsSubscriber,
jobOperations,
jobStore,
(StubbedComputeProvider) runtime.getComputeProvider(),
versionSupplier,
titusRuntime,
testScheduler
);
jobScenarioBuilders.add(jobScenarioBuilder);
});
return this;
}
public JobsScenarioBuilder withConcurrentStoreUpdateLimit(int concurrentStoreUpdateLimit) {
this.concurrentStoreUpdateLimit = concurrentStoreUpdateLimit;
return this;
}
public JobsScenarioBuilder breakStoreForTasks() {
jobStore.setStoreState(StubbedJobStore.StoreState.BrokenForTasks);
return this;
}
public JobsScenarioBuilder trigger() {
testScheduler.triggerActions();
return this;
}
public JobsScenarioBuilder advance() {
testScheduler.advanceTimeBy(JobsScenarioBuilder.RECONCILER_ACTIVE_TIMEOUT_MS, TimeUnit.MILLISECONDS);
return this;
}
public JobScenarioBuilder getJobScenario(int idx) {
return jobScenarioBuilders.get(idx);
}
public List<JobScenarioBuilder> getJobScenarios() {
return jobScenarioBuilders;
}
public <E extends JobDescriptorExt> JobsScenarioBuilder inJob(int idx, Function<JobScenarioBuilder, JobScenarioBuilder> jobScenario) {
JobScenarioBuilder jobScenarioBuilder = getJobScenario(idx);
if (jobScenarioBuilder == null) {
throw new IllegalArgumentException(String.format("No job with index %s registered", idx));
}
jobScenario.apply(jobScenarioBuilder);
return this;
}
public <E extends JobDescriptorExt> JobsScenarioBuilder scheduleJob(JobDescriptor<E> jobDescriptor,
Function<JobScenarioBuilder, JobScenarioBuilder> jobScenario) {
JobScenarioBuilder.EventHolder<JobManagerEvent<?>> jobEventsSubscriber = new JobScenarioBuilder.EventHolder<>(jobStore);
JobScenarioBuilder.EventHolder<Pair<StoreEvent, ?>> storeEventsSubscriber = new JobScenarioBuilder.EventHolder<>(jobStore);
AtomicReference<String> jobIdRef = new AtomicReference<>();
jobOperations.createJob(jobDescriptor, CallMetadata.newBuilder().withCallerId("Testing").withCallReason("Testing job creation").build()).doOnNext(jobId -> {
jobOperations.observeJob(jobId).subscribe(jobEventsSubscriber);
jobStore.events(jobId).subscribe(storeEventsSubscriber);
}).subscribe(jobIdRef::set);
trigger();
await().timeout(5, TimeUnit.SECONDS).until(() -> {
if (jobIdRef.get() != null) {
return true;
}
advance();
return false;
});
String jobId = jobIdRef.get();
assertThat(jobId).describedAs("Job not created").isNotNull();
JobScenarioBuilder jobScenarioBuilder = new JobScenarioBuilder(
jobId,
jobEventsSubscriber,
storeEventsSubscriber,
jobOperations,
jobStore,
(StubbedComputeProvider) runtime.getComputeProvider(),
versionSupplier,
titusRuntime,
testScheduler
);
jobScenarioBuilders.add(jobScenarioBuilder);
jobScenario.apply(jobScenarioBuilder);
jobScenarioBuilder.expectVersionsOrdered();
return this;
}
private EntitySanitizer newJobSanitizer(VerifierMode verifierMode) {
return new JobSanitizerBuilder()
.withVerifierMode(verifierMode)
.withJobConstraintConfiguration(jobSanitizerConfiguration)
.withJobAsserts(new JobAssertions(
jobSanitizerConfiguration,
instanceType -> ResourceDimension.newBuilder()
.withCpus(64)
.withGpu(8)
.withMemoryMB(256 * 1024)
.withDiskMB(1024 * 1024)
.withNetworkMbs(10 * 1024)
.build()
))
.build();
}
}
| 9,927 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration/scenario/StubbedComputeProvider.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.service.integration.scenario;
import java.util.HashMap;
import java.util.Map;
import com.google.common.base.Preconditions;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskStatus;
import com.netflix.titus.master.jobmanager.service.ComputeProvider;
import reactor.core.publisher.Mono;
class StubbedComputeProvider implements ComputeProvider {
private final Map<String, ComputeProviderTask> providerTasks = new HashMap<>();
private boolean enabled;
private RuntimeException simulatedError;
public StubbedComputeProvider() {
enabled = true;
}
@Override
public Mono<Void> launchTask(Job<?> job, Task task) {
return Mono.defer(() -> {
if (simulatedError != null) {
RuntimeException error = simulatedError;
simulatedError = null;
return Mono.error(error);
}
providerTasks.put(task.getId(), new ComputeProviderTask(job, task));
return Mono.empty();
});
}
@Override
public Mono<Void> terminateTask(Task task) {
return Mono.fromRunnable(() -> {
ComputeProviderTask providerTask = providerTasks.get(task.getId());
if (providerTask != null) {
providerTask.terminate();
}
});
}
@Override
public boolean isReadyForScheduling() {
return enabled;
}
@Override
public String resolveReasonCode(Throwable error) {
return TaskStatus.REASON_UNKNOWN_SYSTEM_ERROR;
}
public void enableScheduling(boolean enabled) {
this.enabled = enabled;
}
public void scheduleTask(String taskId) {
ComputeProviderTask providerTask = mustHaveTask(taskId);
providerTask.schedule();
}
public void failNextTaskLaunch(RuntimeException simulatedError) {
this.simulatedError = simulatedError;
}
public boolean hasComputeProviderTask(String taskId) {
return providerTasks.containsKey(taskId);
}
public boolean isTaskFinished(String taskId) {
ComputeProviderTask providerTask = mustHaveTask(taskId);
return providerTask.isFinished();
}
public void finishTask(String taskId) {
mustHaveTask(taskId);
providerTasks.remove(taskId);
}
public Map<String, String> getScheduledTaskContext(String taskId) {
ComputeProviderTask providerTask = mustHaveTask(taskId);
return providerTask.getScheduledTaskContext();
}
private ComputeProviderTask mustHaveTask(String taskId) {
return Preconditions.checkNotNull(providerTasks.get(taskId), "Task not found: %s", taskId);
}
}
| 9,928 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration/scenario/StubbedApplicationSlaManagementService.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.service.integration.scenario;
import java.util.Collection;
import java.util.Collections;
import java.util.Optional;
import com.netflix.titus.api.model.ApplicationSLA;
import com.netflix.titus.api.model.ResourceDimension;
import com.netflix.titus.api.model.SchedulerConstants;
import com.netflix.titus.api.model.Tier;
import com.netflix.titus.master.service.management.ApplicationSlaManagementService;
import rx.Observable;
class StubbedApplicationSlaManagementService implements ApplicationSlaManagementService {
private static final ApplicationSLA DEFAULT = new ApplicationSLA(
"DEFAULT",
Tier.Flex,
ResourceDimension.newBuilder().withCpus(16).withMemoryMB(32 * 1024).withNetworkMbs(4096).withDiskMB(100 * 1024).build(),
10,
SchedulerConstants.SCHEDULER_NAME_KUBE_SCHEDULER,
""
);
@Override
public Collection<ApplicationSLA> getApplicationSLAs() {
return Collections.singletonList(DEFAULT);
}
@Override
public ApplicationSLA getApplicationSLA(String applicationName) {
return applicationName.equals("DEFAULT") ? DEFAULT : null;
}
@Override
public Optional<ApplicationSLA> findApplicationSLA(String applicationName) {
return Optional.ofNullable(getApplicationSLA(applicationName));
}
@Override
public Collection<ApplicationSLA> getApplicationSLAsForScheduler(String schedulerName) {
return Collections.singletonList(DEFAULT);
}
@Override
public Observable<Void> addApplicationSLA(ApplicationSLA applicationSLA) {
return Observable.empty();
}
@Override
public Observable<Void> removeApplicationSLA(String applicationName) {
return Observable.empty();
}
}
| 9,929 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration/scenario/ComputeProviderTask.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.service.integration.scenario;
import java.util.Map;
import com.google.common.base.Preconditions;
import com.netflix.titus.api.jobmanager.TaskAttributes;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.grpc.protogen.NetworkConfiguration;
class ComputeProviderTask {
private enum State {
Accepted,
Scheduled,
StartInitiated,
Started,
Terminating,
FinishedSucceeded,
FinishedFailed,
}
private State state;
private final Map<String, String> scheduledTaskContext;
ComputeProviderTask(Job<?> job, Task task) {
this.state = State.Accepted;
this.scheduledTaskContext = CollectionsExt.asMap(
TaskAttributes.TASK_ATTRIBUTES_AGENT_HOST, "agent1"
);
}
public State getState() {
return state;
}
public Map<String, String> getScheduledTaskContext() {
return scheduledTaskContext;
}
public boolean isFinished() {
return state == State.FinishedSucceeded || state == State.FinishedFailed;
}
void schedule() {
Preconditions.checkState(state == State.Accepted);
this.state = State.Scheduled;
}
void terminate() {
switch (state) {
case Accepted:
case Scheduled:
this.state = State.FinishedFailed;
break;
case StartInitiated:
case Started:
this.state = State.Terminating;
break;
case Terminating:
case FinishedSucceeded:
case FinishedFailed:
// Do not take any actions for these states.
break;
}
}
}
| 9,930 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration/scenario/ScenarioTemplates.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.service.integration.scenario;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import com.netflix.titus.api.jobmanager.TaskAttributes;
import com.netflix.titus.api.jobmanager.model.job.Capacity;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor.JobDescriptorExt;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.JobState;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.TaskStatus;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.testkit.model.job.JobDescriptorGenerator;
import static com.netflix.titus.master.jobmanager.service.integration.scenario.JobScenarioBuilder.CHANGE_CAPACITY_CALL_METADATA;
import static org.assertj.core.api.Assertions.assertThat;
public class ScenarioTemplates {
public static Function<JobScenarioBuilder, JobScenarioBuilder> changeJobCapacity(Capacity newCapacity) {
return jobScenario -> jobScenario
.changeCapacity(newCapacity)
.expectJobUpdateEventObject(event -> {
Job<ServiceJobExt> job = event.getCurrent();
assertThat(job.getJobDescriptor().getExtensions().getCapacity()).isEqualTo(newCapacity);
assertThat(event.getCallMetadata()).isEqualTo(CHANGE_CAPACITY_CALL_METADATA);
})
.expectServiceJobUpdatedInStore(job -> assertThat(job.getJobDescriptor().getExtensions().getCapacity()).isEqualTo(newCapacity));
}
public static <E extends JobDescriptorExt> Function<JobScenarioBuilder, JobScenarioBuilder> acceptJobWithOneTask(int taskIdx, int resubmit) {
return jobScenario -> jobScenario
.expectJobEvent()
.template(acceptTask(taskIdx, resubmit));
}
public static <E extends JobDescriptorExt> Function<JobScenarioBuilder, JobScenarioBuilder> acceptTask(int taskIdx, int resubmit) {
return jobScenario -> jobScenario
.expectTaskAddedToStore(taskIdx, resubmit, task -> {
assertThat(task.getStatus().getState()).isEqualTo(TaskState.Accepted);
assertThat(task.getTaskContext())
.containsEntry(TaskAttributes.TASK_ATTRIBUTES_CELL, JobDescriptorGenerator.TEST_CELL_NAME);
assertThat(task.getTaskContext())
.containsEntry(TaskAttributes.TASK_ATTRIBUTES_STACK, JobDescriptorGenerator.TEST_STACK_NAME);
})
.expectTaskStateChangeEvent(taskIdx, resubmit, TaskState.Accepted, "normal")
.expectComputeProviderCreateRequest(taskIdx, resubmit);
}
public static <E extends JobDescriptorExt> Function<JobScenarioBuilder, JobScenarioBuilder> triggerComputePlatformStartInitiatedEvent(int taskIdx, int resubmit) {
return jobScenario -> jobScenario
.triggerComputePlatformStartInitiatedEvent(taskIdx, resubmit)
.expectTaskUpdatedInStore(taskIdx, resubmit, task -> {
assertThat(task.getStatus().getState()).isEqualTo(TaskState.StartInitiated);
})
.expectTaskStateChangeEvent(taskIdx, resubmit, TaskState.StartInitiated);
}
public static <E extends JobDescriptorExt> Function<JobScenarioBuilder, JobScenarioBuilder> triggerComputeProviderStartedEvent(int taskIdx, int resubmit) {
return jobScenario -> jobScenario
.triggerComputePlatformStartedEvent(taskIdx, resubmit)
.expectTaskUpdatedInStore(taskIdx, resubmit, task -> assertThat(task.getStatus().getState()).isEqualTo(TaskState.Started))
.expectTaskStateChangeEvent(taskIdx, resubmit, TaskState.Started);
}
public static <E extends JobDescriptorExt> Function<JobScenarioBuilder, JobScenarioBuilder> triggerComputeProviderFinishedEvent(int taskIdx, int resubmit, int errorCode) {
String reasonCode = errorCode == 0 ? TaskStatus.REASON_NORMAL : TaskStatus.REASON_FAILED;
return jobScenario -> jobScenario
.triggerComputePlatformFinishedEvent(taskIdx, resubmit, errorCode, reasonCode)
.expectTaskUpdatedInStore(taskIdx, resubmit, task -> {
assertThat(task.getStatus().getState()).isEqualTo(TaskState.Finished);
assertThat(task.getStatus().getReasonCode()).isEqualTo(reasonCode);
})
.expectTaskStateChangeEvent(taskIdx, resubmit, TaskState.Finished, reasonCode);
}
public static <E extends JobDescriptorExt> Function<JobScenarioBuilder, JobScenarioBuilder> startTask(int taskIdx, int resubmit, TaskState targetTaskState) {
return jobScenario -> {
JobScenarioBuilder accepted = jobScenario.expectTaskInActiveState(taskIdx, resubmit, TaskState.Accepted);
if (targetTaskState == TaskState.Accepted) {
return accepted;
}
jobScenario.inTask(taskIdx, resubmit, task -> jobScenario.getComputeProvider().scheduleTask(task.getId()));
JobScenarioBuilder launched = jobScenario.triggerComputePlatformLaunchEvent(taskIdx, resubmit);
jobScenario.advance();
jobScenario.expectTaskUpdatedInStore(taskIdx, resubmit, updatedTask -> assertThat(updatedTask.getStatus().getState() == TaskState.Launched));
jobScenario.expectTaskEvent(taskIdx, resubmit, event -> assertThat(event.getCurrentTask().getStatus().getState() == TaskState.Launched));
if (targetTaskState == TaskState.Launched) {
return launched;
}
JobScenarioBuilder startInitiated = launched.template(triggerComputePlatformStartInitiatedEvent(taskIdx, resubmit));
if (targetTaskState == TaskState.StartInitiated) {
return startInitiated;
}
return startInitiated.template(triggerComputeProviderStartedEvent(taskIdx, resubmit));
};
}
public static <E extends JobDescriptorExt> Function<JobScenarioBuilder, JobScenarioBuilder> verifyJobWithFinishedTasksCompletes() {
return jobScenario -> {
List<Task> activeTasks = jobScenario.getActiveTasks();
return jobScenario.allActiveTasks(task -> assertThat(task.getStatus().getState()).isEqualTo(TaskState.Finished))
.advance()
.advance()
.expectJobEvent(job -> assertThat(job.getStatus().getState()).isEqualTo(JobState.Finished))
.expectJobUpdatedInStore(job -> assertThat(job.getStatus().getState()).isEqualTo(JobState.Finished))
.inAllTasks(activeTasks, jobScenario::expectedTaskArchivedInStore)
.advance()
.expectJobArchivedInStore();
};
}
public static <E extends JobDescriptorExt> Function<JobScenarioBuilder, JobScenarioBuilder> finishSingleTaskJob(
int taskIdx, int resubmit, String reasonCode, int errorCode) {
return jobScenario -> jobScenario
.triggerComputePlatformFinishedEvent(taskIdx, resubmit, errorCode, reasonCode)
.template(handleTaskFinishedTransitionInSingleTaskJob(taskIdx, resubmit, reasonCode));
}
public static <E extends JobDescriptorExt> Function<JobScenarioBuilder, JobScenarioBuilder> expectTaskStateUpdate(int taskIdx, int resubmit, TaskState taskState, String reasonCode) {
return jobScenario -> jobScenario
.expectTaskUpdatedInStore(taskIdx, resubmit, task -> {
assertThat(task.getStatus().getState()).isEqualTo(taskState);
assertThat(task.getStatus().getReasonCode()).isEqualTo(reasonCode);
})
.expectTaskStateChangeEvent(taskIdx, resubmit, taskState, reasonCode);
}
public static <E extends JobDescriptorExt> Function<JobScenarioBuilder, JobScenarioBuilder> handleTaskFinishedTransitionInSingleTaskJob(
int taskIdx, int resubmit, String reasonCode) {
return jobScenario -> jobScenario
.expectTaskUpdatedInStore(taskIdx, resubmit, task -> {
assertThat(task.getStatus().getState()).isEqualTo(TaskState.Finished);
assertThat(task.getStatus().getReasonCode()).isEqualTo(reasonCode);
})
.expectTaskStateChangeEvent(taskIdx, resubmit, TaskState.Finished, reasonCode)
.andThen(() -> {
if (JobFunctions.isServiceJob(jobScenario.getJob())) {
jobScenario.expectArchivedTaskEvent(taskIdx, resubmit);
}
})
.advance()
.advance()
.expectJobEvent(job -> assertThat(job.getStatus().getState()).isEqualTo(JobState.Finished))
.expectJobUpdatedInStore(job -> assertThat(job.getStatus().getState()).isEqualTo(JobState.Finished))
.advance()
.expectedTaskArchivedInStore(taskIdx, resubmit)
.expectJobArchivedInStore()
.expectJobUpdateEventObject(event -> {
assertThat(event.getCurrent().getId()).isEqualTo(jobScenario.getJobId());
assertThat(event.isArchived()).isTrue();
});
}
public static <E extends JobDescriptorExt> Function<JobScenarioBuilder, JobScenarioBuilder> failRetryableTask(int taskIdx, int resubmit) {
return failRetryableTask(taskIdx, resubmit, 0);
}
public static <E extends JobDescriptorExt> Function<JobScenarioBuilder, JobScenarioBuilder> failRetryableTask(int taskIdx, int resubmit, long expectedRetryDelayMs) {
return jobScenario -> jobScenario
.triggerComputePlatformFinishedEvent(taskIdx, resubmit, -1, TaskStatus.REASON_FAILED)
.template(cleanAfterFinishedTaskAndRetry(taskIdx, resubmit, TaskStatus.REASON_FAILED, expectedRetryDelayMs));
}
public static <E extends JobDescriptorExt> Function<JobScenarioBuilder, JobScenarioBuilder> killJob() {
return jobScenario -> jobScenario
.killJob()
.expectJobUpdatedInStore(job -> {
assertThat(job.getStatus().getState()).isEqualTo(JobState.KillInitiated);
assertThat(job.getStatus().getReasonCode()).isEqualTo(TaskStatus.REASON_TASK_KILLED);
})
.expectJobEvent(job -> assertThat(job.getStatus().getState()).isEqualTo(JobState.KillInitiated));
}
public static <E extends JobDescriptorExt> Function<JobScenarioBuilder, JobScenarioBuilder> reconcilerTaskKill(int taskIdx, int resubmit) {
return jobScenario -> jobScenario
.expectTaskUpdatedInStore(taskIdx, resubmit, task -> assertThat(task.getStatus().getState()).isEqualTo(TaskState.KillInitiated))
.expectTaskStateChangeEvent(taskIdx, resubmit, TaskState.KillInitiated)
.inTask(taskIdx, resubmit, task -> jobScenario.getComputeProvider().finishTask(task.getId()))
.expectPodTerminated(taskIdx, resubmit)
.triggerComputePlatformFinishedEvent(taskIdx, resubmit, -1, TaskStatus.REASON_TASK_KILLED);
}
public static <E extends JobDescriptorExt> Function<JobScenarioBuilder, JobScenarioBuilder> killTask(int taskIdx, int resubmit) {
return jobScenario -> jobScenario
.killTask(taskIdx, resubmit)
.expectComputeProviderTaskFinished(taskIdx, resubmit)
.expectTaskUpdatedInStore(taskIdx, resubmit, task -> {
assertThat(task.getStatus().getState()).isEqualTo(TaskState.KillInitiated);
assertThat(task.getStatus().getReasonCode()).isEqualTo(TaskStatus.REASON_TASK_KILLED);
})
.expectTaskStateChangeEvent(taskIdx, resubmit, TaskState.KillInitiated, TaskStatus.REASON_TASK_KILLED);
}
public static <E extends JobDescriptorExt> Function<JobScenarioBuilder, JobScenarioBuilder> killKubeTask(int taskIdx, int resubmit) {
return jobScenario -> jobScenario
.killTask(taskIdx, resubmit)
.inTask(taskIdx, resubmit, task -> jobScenario.getComputeProvider().finishTask(task.getId()))
.expectPodTerminated(taskIdx, resubmit)
.expectTaskUpdatedInStore(taskIdx, resubmit, task -> {
assertThat(task.getStatus().getState()).isEqualTo(TaskState.KillInitiated);
assertThat(task.getStatus().getReasonCode()).isEqualTo(TaskStatus.REASON_TASK_KILLED);
})
.expectTaskStateChangeEvent(taskIdx, resubmit, TaskState.KillInitiated, TaskStatus.REASON_TASK_KILLED);
}
/**
* Batch tasks that are killed are not restarted.
*/
public static <E extends JobDescriptorExt> Function<JobScenarioBuilder, JobScenarioBuilder> killBatchTask(int taskIdx, int resubmit) {
return jobScenario -> jobScenario
.template(killTask(taskIdx, resubmit))
.triggerComputePlatformFinishedEvent(taskIdx, resubmit, -1, TaskStatus.REASON_TASK_KILLED)
.template(expectTaskStateUpdate(taskIdx, resubmit, TaskState.Finished, TaskStatus.REASON_TASK_KILLED));
}
public static <E extends JobDescriptorExt> Function<JobScenarioBuilder, JobScenarioBuilder> passFinalKillInitiatedTimeout() {
return jobScenario -> jobScenario
.advance()
.advance(JobsScenarioBuilder.KILL_INITIATED_TIMEOUT_MS, TimeUnit.MILLISECONDS);
}
public static <E extends JobDescriptorExt> Function<JobScenarioBuilder, JobScenarioBuilder> passKillInitiatedTimeoutWithKillReattempt(int taskIdx, int resubmit) {
return jobScenario -> jobScenario
.advance()
.advance(JobsScenarioBuilder.KILL_INITIATED_TIMEOUT_MS, TimeUnit.MILLISECONDS)
.advance()
.expectTaskUpdatedInStore(taskIdx, resubmit, task -> {
assertThat(task.getStatus().getState()).isEqualTo(TaskState.KillInitiated);
assertThat(task.getStatus().getReasonCode()).isEqualTo(TaskStatus.REASON_STUCK_IN_KILLING_STATE);
})
.expectTaskStateChangeEvent(taskIdx, resubmit, TaskState.KillInitiated, TaskStatus.REASON_STUCK_IN_KILLING_STATE)
.expectTaskInActiveState(taskIdx, resubmit, TaskState.KillInitiated)
.expectComputeProviderTaskFinished(taskIdx, resubmit);
}
public static <E extends JobDescriptorExt> Function<JobScenarioBuilder, JobScenarioBuilder> failLastBatchRetryableTask(int taskIdx, int resubmit) {
return jobScenario -> jobScenario
.template(triggerComputeProviderFinishedEvent(taskIdx, resubmit, -1))
.advance()
.expectJobEvent(job -> assertThat(job.getStatus().getState()).isEqualTo(JobState.Finished))
.expectJobUpdatedInStore(job -> assertThat(job.getStatus().getState()).isEqualTo(JobState.Finished))
.advance()
.expectedTaskArchivedInStore(taskIdx, resubmit)
.expectJobArchivedInStore();
}
public static <E extends JobDescriptorExt> Function<JobScenarioBuilder, JobScenarioBuilder> cleanAfterFinishedTaskAndRetry(int taskIdx, int resubmit, String reasonCode) {
return cleanAfterFinishedTaskAndRetry(taskIdx, resubmit, reasonCode, 0);
}
public static <E extends JobDescriptorExt> Function<JobScenarioBuilder, JobScenarioBuilder> cleanAfterFinishedTaskAndRetry(int taskIdx, int resubmit, String reasonCode, long expectedRetryDelayMs) {
int nextResubmit = resubmit + 1;
return jobScenario -> {
jobScenario
.expectTaskUpdatedInStore(taskIdx, resubmit, task -> {
assertThat(task.getStatus().getState()).isEqualTo(TaskState.Finished);
assertThat(task.getStatus().getReasonCode()).isEqualTo(reasonCode);
})
.expectTaskStateChangeEvent(taskIdx, resubmit, TaskState.Finished, reasonCode);
if (expectedRetryDelayMs > 0) {
jobScenario
.advance(expectedRetryDelayMs / 2, TimeUnit.MILLISECONDS)
.expectNoStoreUpdate(taskIdx, nextResubmit)
.advance(expectedRetryDelayMs / 2, TimeUnit.MILLISECONDS);
}
return jobScenario
.expectTaskAddedToStore(taskIdx, nextResubmit, task -> assertThat(task.getStatus().getState()).isEqualTo(TaskState.Accepted))
.expectedTaskArchivedInStore(taskIdx, resubmit)
.expectTaskStateChangeEvent(taskIdx, nextResubmit, TaskState.Accepted)
.expectComputeProviderCreateRequest(taskIdx, nextResubmit);
};
}
}
| 9,931 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration/scenario/JobScenarioBuilder.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.service.integration.scenario;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.BiFunction;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import com.google.common.base.Preconditions;
import com.netflix.titus.api.jobmanager.model.job.BatchJobTask;
import com.netflix.titus.api.jobmanager.model.job.Capacity;
import com.netflix.titus.api.jobmanager.model.job.CapacityAttributes;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.JobModel;
import com.netflix.titus.api.jobmanager.model.job.ServiceJobProcesses;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.TaskStatus;
import com.netflix.titus.api.jobmanager.model.job.disruptionbudget.DisruptionBudget;
import com.netflix.titus.api.jobmanager.model.job.event.JobManagerEvent;
import com.netflix.titus.api.jobmanager.model.job.event.JobUpdateEvent;
import com.netflix.titus.api.jobmanager.model.job.event.TaskUpdateEvent;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.api.jobmanager.service.V3JobOperations;
import com.netflix.titus.api.jobmanager.service.V3JobOperations.Trigger;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import com.netflix.titus.api.model.callmetadata.Caller;
import com.netflix.titus.api.model.callmetadata.CallerType;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.ExceptionExt;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.master.jobmanager.service.JobManagerUtil;
import com.netflix.titus.master.jobmanager.service.VersionSupplier;
import com.netflix.titus.master.jobmanager.service.VersionSuppliers;
import com.netflix.titus.master.jobmanager.service.integration.scenario.StubbedJobStore.StoreEvent;
import com.netflix.titus.testkit.rx.ExtTestSubscriber;
import com.netflix.titus.testkit.rx.TitusRxSubscriber;
import rx.Subscriber;
import rx.schedulers.TestScheduler;
import static com.netflix.titus.master.jobmanager.service.integration.scenario.JobsScenarioBuilder.RECONCILER_ACTIVE_TIMEOUT_MS;
import static org.assertj.core.api.Assertions.assertThat;
public class JobScenarioBuilder {
static final CallMetadata CHANGE_CAPACITY_CALL_METADATA = CallMetadata.newBuilder()
.withCallers(Collections.singletonList(Caller.newBuilder()
.withId("capacity")
.withCallerType(CallerType.Application)
.build()))
.withCallReason("capacity update")
.build();
private final String jobId;
private final VersionSupplier versionSupplier;
private final TitusRuntime titusRuntime;
private final EventHolder<JobManagerEvent<?>> jobEventsSubscriber;
private final EventHolder<Pair<StoreEvent, ?>> storeEventsSubscriber;
private final V3JobOperations jobOperations;
private final StubbedJobStore jobStore;
private final StubbedComputeProvider computeProvider;
private final TestScheduler testScheduler;
private final boolean batchJob;
private final CallMetadata callMetadata = CallMetadata.newBuilder().withCallReason("Testing call metadata").withCallerId("test").build();
public JobScenarioBuilder(String jobId,
EventHolder<JobManagerEvent<?>> jobEventsSubscriber,
EventHolder<Pair<StoreEvent, ?>> storeEventsSubscriber,
V3JobOperations jobOperations,
StubbedJobStore jobStore,
StubbedComputeProvider computeProvider,
VersionSupplier versionSupplier,
TitusRuntime titusRuntime,
TestScheduler testScheduler) {
this.jobId = jobId;
this.versionSupplier = versionSupplier;
this.titusRuntime = titusRuntime;
this.batchJob = JobFunctions.isBatchJob(jobStore.retrieveJob(jobId).toBlocking().first());
this.jobEventsSubscriber = jobEventsSubscriber;
this.storeEventsSubscriber = storeEventsSubscriber;
this.jobOperations = jobOperations;
this.jobStore = jobStore;
this.computeProvider = computeProvider;
this.testScheduler = testScheduler;
}
public String getJobId() {
return jobId;
}
public JobScenarioBuilder enableKubeIntegration(boolean enabled) {
computeProvider.enableScheduling(enabled);
return this;
}
public StubbedComputeProvider getComputeProvider() {
return computeProvider;
}
public JobScenarioBuilder advance() {
testScheduler.advanceTimeBy(RECONCILER_ACTIVE_TIMEOUT_MS, TimeUnit.MILLISECONDS);
return this;
}
public JobScenarioBuilder andThen(Runnable action) {
action.run();
return this;
}
public JobScenarioBuilder expectVersionsOrdered() {
expectJobVersionsOrdered();
expectTaskVersionsOrdered();
return this;
}
public JobScenarioBuilder expectJobVersionsOrdered() {
List<Job> revisions = jobStore.getJobRevisions(jobId);
Job last = revisions.get(0);
for (int i = 1; i < revisions.size(); i++) {
Job next = revisions.get(i);
assertThat(next.getVersion().getTimestamp()).isGreaterThanOrEqualTo(last.getVersion().getTimestamp());
}
return this;
}
public JobScenarioBuilder expectTaskVersionsOrdered() {
Map<String, List<Task>> taskGroups = jobStore.getTaskRevisions(jobId);
taskGroups.forEach((originalId, list) -> {
Task last = list.get(0);
for (int i = 1; i < list.size(); i++) {
Task next = list.get(i);
assertThat(next.getVersion().getTimestamp()).isGreaterThanOrEqualTo(last.getVersion().getTimestamp());
}
});
return this;
}
public JobScenarioBuilder expectFailure(Callable<?> action, Consumer<Throwable> errorEvaluator) {
try {
action.call();
throw new IllegalStateException("Expected action to fail");
} catch (Exception e) {
errorEvaluator.accept(e);
}
return this;
}
public JobScenarioBuilder advance(long time, TimeUnit timeUnit) {
long timeMs = timeUnit.toMillis(time);
long steps = timeMs / RECONCILER_ACTIVE_TIMEOUT_MS;
if (steps > 0) {
for (int i = 0; i < steps; i++) {
advance();
}
}
testScheduler.advanceTimeBy(timeMs - steps * RECONCILER_ACTIVE_TIMEOUT_MS, TimeUnit.MILLISECONDS);
return this;
}
public JobScenarioBuilder inActiveTasks(BiFunction<Integer, Integer, Function<JobScenarioBuilder, JobScenarioBuilder>> templateFun) {
List<Task> activeTasks = jobOperations.getTasks(jobId);
activeTasks.forEach(task -> {
if (task instanceof BatchJobTask) {
BatchJobTask batchTask = (BatchJobTask) task;
templateFun.apply(batchTask.getIndex(), batchTask.getResubmitNumber()).apply(this);
} else {
jobStore.getIndexAndResubmit(task.getId()).ifPresent(pair -> {
templateFun.apply(pair.getLeft(), task.getResubmitNumber()).apply(this);
});
}
});
return this;
}
public JobScenarioBuilder inTask(int taskIdx, int resubmit, Consumer<Task> consumer) {
consumer.accept(findTaskInActiveState(taskIdx, resubmit));
return this;
}
public JobScenarioBuilder allActiveTasks(Consumer<Task> consumer) {
List<Task> activeTasks = jobOperations.getTasks(jobId);
activeTasks.forEach(consumer);
return this;
}
public JobScenarioBuilder allTasks(Consumer<List<Task>> templateFun) {
templateFun.accept(jobOperations.getTasks(jobId));
return this;
}
public JobScenarioBuilder inAllTasks(Collection<Task> tasks, BiFunction<Integer, Integer, JobScenarioBuilder> templateFun) {
tasks.forEach(task -> templateFun.apply(jobStore.getIndex(task.getId()), task.getResubmitNumber()));
return this;
}
public JobScenarioBuilder firstTaskMatch(Predicate<Task> predicate, Consumer<Task> consumer) {
Task task = jobOperations.getTasks(jobId).stream().filter(predicate).findFirst().orElseThrow(() -> new IllegalStateException("No task matches the given predicate"));
consumer.accept(task);
return this;
}
public JobScenarioBuilder template(Function<JobScenarioBuilder, JobScenarioBuilder> templateFun) {
return templateFun.apply(this);
}
public JobScenarioBuilder ignoreAvailableEvents() {
jobEventsSubscriber.ignoreAvailableEvents();
storeEventsSubscriber.ignoreAvailableEvents();
return this;
}
public JobScenarioBuilder changeCapacity(int min, int desired, int max) {
return changeCapacity(Capacity.newBuilder().withMin(min).withDesired(desired).withMax(max).build());
}
public JobScenarioBuilder changeCapacity(Capacity newCapacity) {
ExtTestSubscriber<Void> subscriber = new ExtTestSubscriber<>();
CapacityAttributes capacityAttributes = JobModel.newCapacityAttributes(newCapacity).build();
jobOperations.updateJobCapacityAttributes(jobId, capacityAttributes, CHANGE_CAPACITY_CALL_METADATA).subscribe(subscriber);
autoAdvanceUntilSuccessful(() -> checkOperationSubscriberAndThrowExceptionIfError(subscriber));
return this;
}
public JobScenarioBuilder changeJobEnabledStatus(boolean enabled) {
ExtTestSubscriber<Void> subscriber = new ExtTestSubscriber<>();
jobOperations.updateJobStatus(jobId, enabled, callMetadata).subscribe(subscriber);
autoAdvanceUntilSuccessful(() -> checkOperationSubscriberAndThrowExceptionIfError(subscriber));
return this;
}
public JobScenarioBuilder changServiceJobProcesses(ServiceJobProcesses processes) {
ExtTestSubscriber<Void> subscriber = new ExtTestSubscriber<>();
jobOperations.updateServiceJobProcesses(jobId, processes, callMetadata).subscribe(subscriber);
autoAdvanceUntilSuccessful(() -> checkOperationSubscriberAndThrowExceptionIfError(subscriber));
return this;
}
public JobScenarioBuilder changeDisruptionBudget(DisruptionBudget disruptionBudget) {
TitusRxSubscriber<Void> subscriber = new TitusRxSubscriber<>();
jobOperations.updateJobDisruptionBudget(jobId, disruptionBudget, callMetadata).subscribe(subscriber);
autoAdvanceUntilSuccessful(() -> checkOperationSubscriberAndThrowExceptionIfError(subscriber));
return this;
}
public JobScenarioBuilder killJob() {
ExtTestSubscriber<Void> subscriber = new ExtTestSubscriber<>();
jobOperations.killJob(jobId, "Testing", callMetadata).subscribe(subscriber);
autoAdvanceUntilSuccessful(() -> checkOperationSubscriberAndThrowExceptionIfError(subscriber));
return this;
}
public JobScenarioBuilder killTask(Task task, Trigger trigger) {
TitusRxSubscriber<Void> subscriber = new TitusRxSubscriber<>();
jobOperations.killTask(task.getId(), false, false, trigger, callMetadata).subscribe(subscriber);
autoAdvanceUntilSuccessful(() -> checkOperationSubscriberAndThrowExceptionIfError(subscriber));
return this;
}
public JobScenarioBuilder killTask(int taskIdx, int resubmit, Trigger trigger) {
return killTask(findTaskInActiveState(taskIdx, resubmit), trigger);
}
public JobScenarioBuilder killTask(int taskIdx, int resubmit) {
return killTask(taskIdx, resubmit, Trigger.API);
}
public JobScenarioBuilder killTaskAndShrink(Task task) {
TitusRxSubscriber<Void> subscriber = new TitusRxSubscriber<>();
jobOperations.killTask(task.getId(), true, false, Trigger.API, callMetadata).subscribe(subscriber);
autoAdvanceUntilSuccessful(() -> checkOperationSubscriberAndThrowExceptionIfError(subscriber));
return this;
}
public JobScenarioBuilder killTaskAndShrinkNoWait(Task task) {
TitusRxSubscriber<Void> subscriber = new TitusRxSubscriber<>();
jobOperations.killTask(task.getId(), true, false, Trigger.API, callMetadata).subscribe(subscriber);
return this;
}
public JobScenarioBuilder killTaskAndShrink(int taskIdx, int resubmit) {
return killTaskAndShrink(findTaskInActiveState(taskIdx, resubmit));
}
public JobScenarioBuilder moveTask(int taskIdx, int resubmit, String sourceJobId, String targetJobId) {
Task task = findTaskInActiveState(taskIdx, resubmit);
ExtTestSubscriber<Void> subscriber = new ExtTestSubscriber<>();
jobOperations.moveServiceTask(sourceJobId, targetJobId, task.getId(), callMetadata).subscribe(subscriber);
autoAdvanceUntilSuccessful(() -> checkOperationSubscriberAndThrowExceptionIfError(subscriber));
return this;
}
public JobScenarioBuilder failNextPodCreate(RuntimeException simulatedError) {
computeProvider.failNextTaskLaunch(simulatedError);
return this;
}
public JobScenarioBuilder assertServiceJob(Consumer<Job<ServiceJobExt>> serviceJob) {
Job<?> job = jobOperations.getJob(jobId).orElseThrow(() -> new IllegalStateException("Unknown job: " + jobId));
assertThat(JobFunctions.isServiceJob(job)).describedAs("Not a service job: %s", jobId).isTrue();
serviceJob.accept((Job<ServiceJobExt>) job);
return this;
}
public JobScenarioBuilder expectTaskInActiveState(int taskIdx, int resubmit, TaskState taskState) {
Task task = findTaskInActiveState(taskIdx, resubmit);
assertThat(task.getStatus().getState()).isEqualTo(taskState);
return this;
}
private Task findTaskInActiveState(int taskIdx, int resubmit) {
Task task = jobOperations.getTasks(jobId).stream()
.filter(t -> jobStore.hasIndexAndResubmit(t, taskIdx, resubmit))
.findFirst()
.orElseThrow(() -> new IllegalArgumentException("Job has no active task with index " + taskIdx));
assertThat(task.getResubmitNumber()).isEqualTo(resubmit);
return task;
}
public JobScenarioBuilder expectJobEvent() {
return expectJobEvent(job -> {
});
}
public JobScenarioBuilder expectNoJobStateChangeEvent() {
JobManagerEvent<?> event = autoAdvance(jobEventsSubscriber::takeNextJobEvent);
assertThat(event).isNull();
return this;
}
public JobScenarioBuilder expectServiceJobEvent(Consumer<Job<ServiceJobExt>> check) {
Preconditions.checkState(!batchJob, "Service job expected");
Consumer checkNoTypeParam = check;
return expectJobEvent(checkNoTypeParam);
}
public JobScenarioBuilder expectJobUpdateEventObject(Consumer<JobUpdateEvent> check) {
JobManagerEvent<?> event = autoAdvance(jobEventsSubscriber::takeNextJobEvent);
assertThat(event).describedAs("No job update event for job: %s", jobId).isNotNull();
assertThat(event).isInstanceOf(JobUpdateEvent.class);
check.accept((JobUpdateEvent) event);
return this;
}
public JobScenarioBuilder expectJobEvent(Consumer<Job<?>> check) {
expectJobUpdateEventObject(jobUpdateEvent -> {
assertThat(jobUpdateEvent.getCurrent().getId()).isEqualTo(jobId);
check.accept(jobUpdateEvent.getCurrent());
});
return this;
}
private TaskUpdateEvent expectTaskEvent(int taskIdx, int resubmit) {
jobStore.expectTaskInStore(jobId, taskIdx, resubmit);
JobManagerEvent<?> event = autoAdvance(() -> jobEventsSubscriber.takeNextTaskEvent(taskIdx, resubmit));
assertThat(event).isNotNull();
assertThat(event).isInstanceOf(TaskUpdateEvent.class);
TaskUpdateEvent taskUpdateEvent = (TaskUpdateEvent) event;
Task taskFromEvent = taskUpdateEvent.getCurrent();
assertThat(jobStore.hasIndexAndResubmit(taskFromEvent, taskIdx, resubmit))
.describedAs("Expected event for task index %i and resubmit %i, but got %s", taskIdx, resubmit, taskFromEvent.getId())
.isTrue();
return taskUpdateEvent;
}
public JobScenarioBuilder expectArchivedTaskEvent(int taskIdx, int resubmit) {
JobManagerEvent<?> event = autoAdvance(() -> jobEventsSubscriber.takeNextTaskEvent(taskIdx, resubmit));
assertThat(event).isNotNull();
assertThat(event).isInstanceOf(TaskUpdateEvent.class);
TaskUpdateEvent taskUpdateEvent = (TaskUpdateEvent) event;
assertThat(taskUpdateEvent.isArchived()).isTrue();
return this;
}
public JobScenarioBuilder expectTaskEvent(int taskIdx, int resubmit, Consumer<TaskUpdateEvent> validator) {
TaskUpdateEvent event = expectTaskEvent(taskIdx, resubmit);
validator.accept(event);
return this;
}
public JobScenarioBuilder expectTaskStateChangeEvent(int taskIdx, int resubmit, TaskState taskState) {
TaskUpdateEvent event = expectTaskEvent(taskIdx, resubmit);
TaskStatus status = event.getCurrent().getStatus();
assertThat(status.getState()).isEqualTo(taskState);
return this;
}
public JobScenarioBuilder expectTaskStateChangeEvent(int taskIdx, int resubmit, TaskState taskState, String reasonCode) {
TaskUpdateEvent event = expectTaskEvent(taskIdx, resubmit);
TaskStatus status = event.getCurrent().getStatus();
assertThat(status.getState()).isEqualTo(taskState);
assertThat(status.getReasonCode()).isEqualTo(reasonCode);
return this;
}
public JobScenarioBuilder expectNoTaskStateChangeEvent() {
JobManagerEvent<?> event = autoAdvance(jobEventsSubscriber::takeNextTaskEvent);
assertThat(event).isNull();
return this;
}
public JobScenarioBuilder expectComputeProviderCreateRequest(int taskIdx, int resubmit) {
Task task = jobStore.expectTaskInStore(jobId, taskIdx, resubmit);
advance();
assertThat(computeProvider.hasComputeProviderTask(task.getId()))
.describedAs("Task %s (index %d) is not scheduled yet", task.getId(), taskIdx)
.isTrue();
expectTaskStateChangeEvent(taskIdx, resubmit, TaskState.Accepted, TaskStatus.REASON_POD_CREATED);
expectTaskUpdatedInStore(taskIdx, resubmit, t -> assertThat(t.getStatus().getReasonCode()).isEqualTo(TaskStatus.REASON_POD_CREATED));
return this;
}
public JobScenarioBuilder expectServiceJobUpdatedInStore(Consumer<Job<ServiceJobExt>> check) {
Preconditions.checkState(!batchJob, "Service job expected");
Consumer checkNoTypeParam = check;
return expectJobUpdatedInStore(checkNoTypeParam);
}
public JobScenarioBuilder expectJobUpdatedInStore(Consumer<Job<?>> check) {
Pair<StoreEvent, ?> storeEventPair = autoAdvance((storeEventsSubscriber::takeNextJobStoreEvent));
assertThat(storeEventPair.getLeft()).isEqualTo(StoreEvent.JobUpdated);
Job<?> job = (Job<?>) storeEventPair.getRight();
check.accept(job);
return this;
}
public JobScenarioBuilder expectJobArchivedInStore() {
Pair<StoreEvent, ?> storeEventPair = storeEventsSubscriber.takeNextJobStoreEvent();
assertThat(storeEventPair.getLeft()).isEqualTo(StoreEvent.JobRemoved);
return this;
}
public JobScenarioBuilder expectTaskAddedToStore(int taskIdx, int resubmit, Consumer<Task> check) {
Task task = expectTaskEvent(taskIdx, resubmit, StoreEvent.TaskAdded);
check.accept(task);
return this;
}
public JobScenarioBuilder expectTaskUpdatedInStore(int taskIdx, int resubmit, Consumer<Task> check) {
Task task = expectTaskEvent(taskIdx, resubmit, StoreEvent.TaskUpdated);
check.accept(task);
return this;
}
public JobScenarioBuilder expectedTaskArchivedInStore(int taskIdx, int resubmit) {
expectTaskEvent(taskIdx, resubmit, StoreEvent.TaskRemoved);
return this;
}
public JobScenarioBuilder expectNoStoreUpdate(int taskIdx, int resubmit) {
Pair<StoreEvent, ?> event = autoAdvance(() -> storeEventsSubscriber.takeNextTaskStoreEvent(taskIdx, resubmit));
assertThat(event).isNull();
return this;
}
public JobScenarioBuilder expectNoStoreUpdate() {
Pair<StoreEvent, ?> event = autoAdvance(storeEventsSubscriber::takeNext);
assertThat(event).isNull();
return this;
}
public JobScenarioBuilder expectPodTerminated(int taskIdx, int resubmit) {
Task task = findTaskInActiveState(taskIdx, resubmit);
assertThat(computeProvider.hasComputeProviderTask(task.getId())).describedAs("Expected not to find task in Kube").isFalse();
return this;
}
public JobScenarioBuilder expectComputeProviderTaskFinished(int taskIdx, int resubmit) {
Task task = findTaskInActiveState(taskIdx, resubmit);
assertThat(computeProvider.isTaskFinished(task.getId())).isTrue();
return this;
}
public JobScenarioBuilder triggerSchedulerLaunchEvent(int taskIdx, int resubmit) {
Task task = findTaskInActiveState(taskIdx, resubmit);
computeProvider.scheduleTask(task.getId());
triggerComputePlatformEvent(taskIdx, resubmit, TaskState.Launched, "scheduled", -1);
return this;
}
public JobScenarioBuilder triggerComputePlatformLaunchEvent(int taskIdx, int resubmit) {
return triggerComputePlatformEvent(taskIdx, resubmit, TaskState.Launched, "Task launched", 0);
}
public JobScenarioBuilder triggerComputePlatformStartInitiatedEvent(int taskIdx, int resubmit) {
return triggerComputePlatformEvent(taskIdx, resubmit, TaskState.StartInitiated, "Starting container", 0);
}
public JobScenarioBuilder triggerComputePlatformStartedEvent(int taskIdx, int resubmit) {
return triggerComputePlatformEvent(taskIdx, resubmit, TaskState.Started, "Task started", 0);
}
public JobScenarioBuilder triggerComputePlatformFinishedEvent(int taskIdx, int resubmit) {
return triggerComputePlatformEvent(taskIdx, resubmit, TaskState.Finished, TaskStatus.REASON_NORMAL, 0);
}
public JobScenarioBuilder triggerComputePlatformFinishedEvent(Task task, int errorCode, String reasonCode) {
return triggerComputePlatformEvent(task, TaskState.Finished, reasonCode, errorCode);
}
public JobScenarioBuilder triggerComputePlatformFinishedEvent(int taskIdx, int resubmit, int errorCode, String reasonCode) {
return triggerComputePlatformEvent(taskIdx, resubmit, TaskState.Finished, reasonCode, errorCode);
}
public JobScenarioBuilder breakStore() {
jobStore.setStoreState(StubbedJobStore.StoreState.Broken);
return this;
}
public JobScenarioBuilder slowStore() {
jobStore.setStoreState(StubbedJobStore.StoreState.Slow);
return this;
}
public JobScenarioBuilder enableStore() {
jobStore.setStoreState(StubbedJobStore.StoreState.Normal);
return this;
}
private Task expectTaskEvent(int taskIdx, int resubmit, StoreEvent eventType) {
Task lastInStore = autoAdvance(() -> eventType == StoreEvent.TaskRemoved
? jobStore.expectTaskInStoreArchive(jobId, taskIdx, resubmit)
: jobStore.expectTaskInStore(jobId, taskIdx, resubmit)
);
Pair<StoreEvent, Task> storeEventPair = storeEventsSubscriber.takeNextTaskStoreEvent(taskIdx, resubmit);
assertThat(storeEventPair.getLeft()).isEqualTo(eventType);
Task task = storeEventPair.getRight();
assertThat(task.getId())
.describedAs("Task version in store different from the task in the store event: %s != %s", lastInStore.getId(), task.getId())
.isEqualTo(lastInStore.getId());
return task;
}
private JobScenarioBuilder triggerComputePlatformEvent(int taskIdx, int resubmit, TaskState taskState, String reason, int errorCode) {
Task task = jobStore.expectTaskInStore(jobId, taskIdx, resubmit);
return triggerComputePlatformEvent(task, taskState, reason, errorCode);
}
private JobScenarioBuilder triggerComputePlatformEvent(Task task, TaskState taskState, String reason, int errorCode) {
String reasonMessage;
if (taskState == TaskState.Finished) {
reasonMessage = errorCode == 0 ? "Completed successfully" : "Container terminated with an error " + errorCode;
} else {
reasonMessage = "Task changed state to " + taskState;
}
AtomicBoolean done = new AtomicBoolean();
final Map<String, String> newTaskContext = new HashMap<>();
if (taskState == TaskState.Launched) {
newTaskContext.putAll(computeProvider.getScheduledTaskContext(task.getId()));
}
TaskStatus taskStatus = JobModel.newTaskStatus()
.withState(taskState)
.withReasonCode(reason)
.withReasonMessage(reasonMessage)
.withTimestamp(testScheduler.now())
.build();
Function<Task, Optional<Task>> changeFunction = currentTask ->
JobManagerUtil.newTaskStateUpdater(taskStatus, titusRuntime)
.apply(currentTask)
.map(updated -> updated.toBuilder()
.withTaskContext(CollectionsExt.merge(updated.getTaskContext(), newTaskContext))
.build()
);
jobOperations.updateTask(task.getId(),
changeFunction,
Trigger.ComputeProvider,
String.format("ComputeProvider callback taskStatus=%s, reason=%s (%s)", taskState, reason, reasonMessage),
callMetadata
).subscribe(() -> done.set(true));
autoAdvanceUntil(done::get);
assertThat(done.get()).isTrue();
return this;
}
public JobScenarioBuilder modifyJobStoreRecord(Function<Job, Job> transformer) {
Job<?> storedJob = jobStore.retrieveJob(jobId).toBlocking().first();
Job updatedJob = VersionSuppliers.nextVersion(transformer.apply(storedJob), versionSupplier);
assertThat(jobStore.updateJob(updatedJob).get()).isNull();
return this;
}
private boolean autoAdvanceUntil(Supplier<Boolean> action) {
for (int i = 0; i < 5; i++) {
if (action.get()) {
return true;
}
advance();
}
return false;
}
private <T> T autoAdvance(Supplier<T> action) {
Optional<T> result;
for (int i = 0; i < 5; i++) {
result = ExceptionExt.doTry(action);
if (result.isPresent()) {
return result.get();
}
advance();
}
return null;
}
private void autoAdvanceUntilSuccessful(Runnable action) {
for (int i = 0; i < 5; i++) {
try {
action.run();
return;
} catch (Throwable ignore) {
}
advance();
}
action.run();
}
Job getJob() {
return jobOperations.getJob(jobId).orElseThrow(() -> new IllegalArgumentException("Job not found: " + jobId));
}
/**
* Returns tasks in strict order.
*/
List<Task> getActiveTasks() {
return jobOperations.getTasks(jobId).stream().sorted((task1, task2) -> {
if (task1 instanceof BatchJobTask) {
BatchJobTask batchTask1 = (BatchJobTask) task1;
BatchJobTask batchTask2 = (BatchJobTask) task2;
return Integer.compare(batchTask1.getIndex(), batchTask2.getIndex());
}
int task1Index = jobStore.getIndexAndResubmit(task1.getId()).get().getLeft();
int task2Index = jobStore.getIndexAndResubmit(task2.getId()).get().getLeft();
return Integer.compare(task1Index, task2Index);
}).collect(Collectors.toList());
}
static class EventHolder<EVENT> extends Subscriber<EVENT> {
private final StubbedJobStore jobStore;
private final List<EVENT> events = new ArrayList<>();
EventHolder(StubbedJobStore jobStore) {
this.jobStore = jobStore;
}
@Override
public void onCompleted() {
}
@Override
public void onError(Throwable e) {
}
@Override
public void onNext(EVENT e) {
events.add(e);
}
EVENT takeNext() {
if (events.isEmpty()) {
return null;
}
return events.remove(0);
}
Pair<StoreEvent, Job> takeNextJobStoreEvent() {
Iterator<EVENT> it = events.iterator();
while (it.hasNext()) {
Pair<StoreEvent, ?> event = (Pair<StoreEvent, ?>) it.next();
if (event.getRight() instanceof Job) {
it.remove();
return Pair.of(event.getLeft(), (Job) event.getRight());
}
}
return null;
}
Pair<StoreEvent, Task> takeNextTaskStoreEvent(int index, int resubmit) {
Iterator<EVENT> it = events.iterator();
while (it.hasNext()) {
Pair<StoreEvent, ?> event = (Pair<StoreEvent, ?>) it.next();
if (event.getRight() instanceof Task) {
Task task = (Task) event.getRight();
if (jobStore.hasIndexAndResubmit(task, index, resubmit)) {
it.remove();
return Pair.of(event.getLeft(), task);
}
}
}
return null;
}
JobUpdateEvent takeNextJobEvent() {
Iterator<EVENT> it = events.iterator();
while (it.hasNext()) {
JobManagerEvent<?> event = (JobManagerEvent<?>) it.next();
if (event instanceof JobUpdateEvent) {
it.remove();
return (JobUpdateEvent) event;
}
}
return null;
}
TaskUpdateEvent takeNextTaskEvent() {
Iterator<EVENT> it = events.iterator();
while (it.hasNext()) {
JobManagerEvent<?> event = (JobManagerEvent<?>) it.next();
if (event instanceof TaskUpdateEvent) {
it.remove();
return (TaskUpdateEvent) event;
}
}
return null;
}
public TaskUpdateEvent takeNextTaskEvent(int taskIdx, int resubmit) {
Iterator<EVENT> it = events.iterator();
while (it.hasNext()) {
JobManagerEvent<?> event = (JobManagerEvent<?>) it.next();
if (event instanceof TaskUpdateEvent) {
Task task = ((TaskUpdateEvent) event).getCurrentTask();
if (jobStore.hasIndexAndResubmit(task, taskIdx, resubmit)) {
it.remove();
return (TaskUpdateEvent) event;
}
}
}
return null;
}
void ignoreAvailableEvents() {
events.clear();
}
}
private void checkOperationSubscriberAndThrowExceptionIfError(TitusRxSubscriber<Void> subscriber) {
if (subscriber.hasError()) {
Throwable error = subscriber.getError();
if (error instanceof RuntimeException) {
throw (RuntimeException) error;
}
throw new RuntimeException(error);
}
assertThat(subscriber.isOpen()).isFalse();
}
private void checkOperationSubscriberAndThrowExceptionIfError(ExtTestSubscriber<Void> subscriber) {
Throwable error = subscriber.getError();
if (error != null) {
if (error instanceof RuntimeException) {
throw (RuntimeException) error;
}
throw new RuntimeException(error);
}
subscriber.assertOnCompleted();
}
}
| 9,932 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/integration/scenario/StubbedJobStore.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.service.integration.scenario;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.NavigableSet;
import java.util.Objects;
import java.util.Optional;
import java.util.TreeSet;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import com.google.common.base.Preconditions;
import com.netflix.titus.api.jobmanager.model.job.BatchJobTask;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.store.JobStore;
import com.netflix.titus.common.util.rx.ObservableExt;
import com.netflix.titus.common.util.tuple.Pair;
import org.assertj.core.api.Assertions;
import rx.Completable;
import rx.Observable;
import rx.subjects.PublishSubject;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.isServiceJob;
public class StubbedJobStore implements JobStore {
enum StoreEvent {
JobAdded,
JobRemoved,
JobUpdated,
TaskAdded,
TaskRemoved,
TaskUpdated,
}
enum StoreState {
Normal,
Broken,
BrokenForTasks,
Slow,
}
private final PublishSubject<Pair<StoreEvent, ?>> eventSubject = PublishSubject.create();
private final ConcurrentMap<String, Job<?>> jobs = new ConcurrentHashMap<>();
private final ConcurrentMap<String, Task> tasks = new ConcurrentHashMap<>();
private final ConcurrentMap<String, List<Task>> taskRevisionsByOriginalId = new ConcurrentHashMap<>();
private final ConcurrentMap<String, List<Job>> jobRevisions = new ConcurrentHashMap<>();
private final ConcurrentMap<String, Job<?>> archivedJobs = new ConcurrentHashMap<>();
private final ConcurrentMap<String, Task> archivedTasks = new ConcurrentHashMap<>();
private final ConcurrentMap<String, ServiceTaskIndex> jobToServiceTaskIndex = new ConcurrentHashMap<>();
private StoreState storeState = StoreState.Normal;
void setStoreState(StoreState storeState) {
this.storeState = storeState;
}
public Map<String, Job<?>> getJobsInternal() {
return new HashMap<>(jobs);
}
public List<Job> getJobRevisions(String jobId) {
Preconditions.checkState(jobs.containsKey(jobId) || archivedJobs.containsKey(jobId));
return jobRevisions.get(jobId);
}
public Map<String, List<Task>> getTaskRevisions(String jobId) {
Preconditions.checkState(jobs.containsKey(jobId) || archivedJobs.containsKey(jobId));
return taskRevisionsByOriginalId.values().stream()
.filter(tasks -> tasks.get(0).getJobId().equals(jobId))
.collect(Collectors.toMap(ts -> ts.get(0).getOriginalId(), Function.identity()));
}
public Map<String, Task> getArchivedTasksInternal(String jobId) {
Preconditions.checkState(jobs.containsKey(jobId) || archivedJobs.containsKey(jobId));
return archivedTasks.values().stream()
.filter(task -> task.getJobId().equals(jobId))
.collect(Collectors.toMap(Task::getId, Function.identity()));
}
public void addArchivedTaskInternal(Task task) {
Preconditions.checkState(TaskState.isTerminalState(task.getStatus().getState()));
archivedTasks.put(task.getId(), task);
}
public Observable<Pair<StoreEvent, ?>> events() {
return eventSubject;
}
public Observable<Pair<StoreEvent, ?>> events(String jobId) {
return eventSubject
.filter(event -> {
if (event.getRight() instanceof Job) {
Job eventJob = (Job) event.getRight();
return jobId.equals(eventJob.getId());
}
if (event.getRight() instanceof Task) {
Task eventTask = (Task) event.getRight();
return jobId.equals(eventTask.getJobId());
}
return false;
});
}
public int getIndex(String taskId) {
return getIndexAndResubmit(taskId).map(p -> p.getLeft()).orElseThrow(() -> new IllegalStateException("Task " + taskId + " is not registered in store"));
}
public Optional<Pair<Integer, Integer>> getIndexAndResubmit(String taskId) {
Task task = tasks.getOrDefault(taskId, archivedTasks.get(taskId));
if (task == null) {
return Optional.empty();
}
if (task instanceof BatchJobTask) {
BatchJobTask batchJobTask = (BatchJobTask) task;
return Optional.of(Pair.of(batchJobTask.getIndex(), task.getResubmitNumber()));
}
ServiceTaskIndex serviceTaskIndex = jobToServiceTaskIndex.get(task.getJobId());
if (serviceTaskIndex == null) {
return Optional.empty();
}
return serviceTaskIndex.getTaskIndexAndResubmitById(taskId);
}
public boolean hasIndexAndResubmit(Task task, int taskIdx, int resubmit) {
if (task instanceof BatchJobTask) {
BatchJobTask batchJobTask = (BatchJobTask) task;
return batchJobTask.getIndex() == taskIdx && batchJobTask.getResubmitNumber() == resubmit;
}
ServiceTaskIndex serviceTaskIndex = jobToServiceTaskIndex.get(task.getJobId());
if (serviceTaskIndex == null) {
return false;
}
return serviceTaskIndex.getTaskIdByIndexAndResubmit(taskIdx, resubmit)
.map(foundTaskId -> foundTaskId.equals(task.getId()))
.orElse(false);
}
public Task expectTaskInStore(String jobId, int taskIdx, int resubmit) {
Optional<Task> match = findTask(jobId, taskIdx, resubmit, tasks);
Assertions.assertThat(match)
.describedAs("No task {job=%s, index=%s, resubmit=%s} found in task active store", jobId, taskIdx, resubmit)
.isPresent();
return match.get();
}
public Task expectTaskInStoreOrStoreArchive(String jobId, int taskIdx, int resubmit) {
Optional<Task> match = findTask(jobId, taskIdx, resubmit, tasks);
if (!match.isPresent()) {
match = findTask(jobId, taskIdx, resubmit, archivedTasks);
}
Assertions.assertThat(match)
.describedAs("No task {job=%s, index=%s, resubmit=%s} found in task active store or archive", jobId, taskIdx, resubmit)
.isPresent();
return match.get();
}
public Task expectTaskInStoreOrStoreArchive(String taskId) {
Task task = tasks.getOrDefault(taskId, archivedTasks.get(taskId));
Assertions.assertThat(task)
.describedAs("No task %s found in task active store or archive", taskId)
.isNotNull();
return task;
}
public Task expectTaskInStoreArchive(String jobId, int taskIdx, int resubmit) {
Optional<Task> match = findTask(jobId, taskIdx, resubmit, archivedTasks);
Assertions.assertThat(match)
.describedAs("No task {job=%s, index=%s, resubmit=%s} found in task archive", jobId, taskIdx, resubmit)
.isPresent();
return match.get();
}
private Optional<Task> findTask(String jobId, int taskIdx, int resubmit, Map<String, Task> taskMap) {
Job<?> job = jobs.getOrDefault(jobId, archivedJobs.get(jobId));
if (JobFunctions.isBatchJob(job)) {
return taskMap.values().stream().filter(task -> {
if (!task.getJobId().equals(jobId)) {
return false;
}
BatchJobTask batchJobTask = (BatchJobTask) task;
return batchJobTask.getIndex() == taskIdx && batchJobTask.getResubmitNumber() == resubmit;
}).findFirst();
}
// For service job we need to use our internal index
ServiceTaskIndex serviceTaskIndex = jobToServiceTaskIndex.get(jobId);
if (serviceTaskIndex == null) {
return Optional.empty();
}
return serviceTaskIndex.getTaskIdByIndexAndResubmit(taskIdx, resubmit)
.flatMap(taskId ->
taskMap.values().stream()
.filter(task -> {
if (!task.getJobId().equals(jobId)) {
return false;
}
return task.getId().equals(taskId);
})
.findFirst()
);
}
@Override
public Completable init() {
return Completable.complete();
}
@Override
public Observable<Pair<List<Job<?>>, Integer>> retrieveJobs() {
return beforeObservable(() -> Observable.just(Pair.of(new ArrayList<>(jobs.values()), 0)));
}
@Override
public Observable<Job<?>> retrieveJob(String jobId) {
return beforeObservable(() -> {
Callable<Job<?>> jobCallable = () -> jobs.get(jobId);
return Observable.fromCallable(jobCallable).filter(Objects::nonNull);
});
}
@Override
public Completable storeJob(Job job) {
return beforeCompletable(() ->
Completable.fromAction(() -> {
addJobInternal(job);
if (isServiceJob(job)) {
jobToServiceTaskIndex.put(job.getId(), new ServiceTaskIndex());
}
eventSubject.onNext(Pair.of(StoreEvent.JobAdded, job));
}), false);
}
@Override
public Completable updateJob(Job job) {
return beforeCompletable(() ->
Completable.fromAction(() -> {
addJobInternal(job);
eventSubject.onNext(Pair.of(StoreEvent.JobUpdated, job));
}), false);
}
private void addJobInternal(Job job) {
jobs.put(job.getId(), job);
// We make a copy of an array to allow for shallow copy when accessing this data.
List<Job> currentRevisions = jobRevisions.get(job.getId());
List<Job> newRevisions = new ArrayList<>();
if (currentRevisions != null) {
newRevisions.addAll(currentRevisions);
}
newRevisions.add(job);
jobRevisions.put(job.getId(), newRevisions);
}
@Override
public Completable deleteJob(Job job) {
return beforeCompletable(() ->
Completable.fromAction(() -> {
Job<?> removedJob = jobs.remove(job.getId());
if (removedJob != null) {
// We sort tasks by index, to make events more predictable for easier evaluation in test code.
tasks.values().stream()
.sorted(Comparator.comparingInt(task2 -> getIndex(task2.getId())))
.filter(task -> task.getJobId().equals(job.getId()))
.forEach(task -> {
tasks.remove(task.getId());
archivedTasks.put(task.getId(), task);
eventSubject.onNext(Pair.of(StoreEvent.TaskRemoved, task));
});
archivedJobs.put(removedJob.getId(), removedJob);
eventSubject.onNext(Pair.of(StoreEvent.JobRemoved, job));
}
}), false);
}
@Override
public Observable<Pair<List<Task>, Integer>> retrieveTasksForJob(String jobId) {
return beforeObservable(() ->
ObservableExt.fromCallable(() -> {
List<Task> jobTasks = tasks.values().stream().filter(t -> t.getJobId().equals(jobId)).collect(Collectors.toList());
return Collections.singletonList(Pair.of(jobTasks, 0));
}
));
}
@Override
public Observable<Task> retrieveTask(String taskId) {
return beforeObservable(() ->
Observable.fromCallable(() -> tasks.get(taskId)).filter(Objects::nonNull)
);
}
@Override
public Completable storeTask(Task task) {
return beforeCompletable(() ->
Completable.fromAction(() -> {
Job<?> job = jobs.get(task.getJobId());
if (job != null) {
addTaskInternal(task);
if (isServiceJob(job)) {
jobToServiceTaskIndex.get(job.getId()).addTask(task);
}
eventSubject.onNext(Pair.of(StoreEvent.TaskAdded, task));
} else {
throw new IllegalStateException("Adding task for unknown job " + task.getJobId());
}
}), true);
}
@Override
public Completable updateTask(Task task) {
return beforeCompletable(() ->
Completable.fromAction(() -> {
if (jobs.get(task.getJobId()) != null) {
addTaskInternal(task);
eventSubject.onNext(Pair.of(StoreEvent.TaskUpdated, task));
} else {
throw new IllegalStateException("Adding task for unknown job " + task.getJobId());
}
}), true);
}
private void addTaskInternal(Task task) {
tasks.put(task.getId(), task);
// We make a copy of an array to allow for shallow copy when accessing this data.
List<Task> currentRevisions = taskRevisionsByOriginalId.get(task.getOriginalId());
List<Task> newRevisions = new ArrayList<>();
if (currentRevisions != null) {
newRevisions.addAll(currentRevisions);
}
newRevisions.add(task);
taskRevisionsByOriginalId.put(task.getOriginalId(), newRevisions);
}
@Override
public Completable replaceTask(Task oldTask, Task newTask) {
return beforeCompletable(() -> storeTask(newTask).concatWith(deleteTask(oldTask)), true);
}
@Override
public Completable moveTask(Job jobFrom, Job jobTo, Task taskAfter) {
return beforeCompletable(() ->
Completable.fromAction(() -> {
Preconditions.checkArgument(jobs.containsKey(jobFrom.getId()), "jobFrom=%s not found", jobFrom.getId());
Preconditions.checkArgument(jobs.containsKey(jobTo.getId()), "jobTo=%s not found", jobTo.getId());
Preconditions.checkArgument(tasks.containsKey(taskAfter.getId()), "task=%s not found", taskAfter.getId());
jobs.put(jobFrom.getId(), jobFrom);
jobs.put(jobTo.getId(), jobTo);
tasks.put(taskAfter.getId(), taskAfter);
jobToServiceTaskIndex.get(jobFrom.getId()).removeTask(taskAfter);
jobToServiceTaskIndex.get(jobTo.getId()).addTask(taskAfter);
eventSubject.onNext(Pair.of(StoreEvent.JobUpdated, jobFrom));
eventSubject.onNext(Pair.of(StoreEvent.JobUpdated, jobTo));
eventSubject.onNext(Pair.of(StoreEvent.TaskUpdated, taskAfter));
}),
true
);
}
@Override
public Completable deleteTask(Task task) {
return beforeCompletable(() ->
Completable.fromAction(() -> {
Task removedTask = tasks.remove(task.getId());
if (removedTask != null) {
archivedTasks.put(removedTask.getId(), removedTask);
eventSubject.onNext(Pair.of(StoreEvent.TaskRemoved, task));
}
}), true);
}
@Override
public Observable<Task> retrieveArchivedTask(String taskId) {
return beforeObservable(() -> Observable.fromCallable(() -> archivedTasks.get(taskId)).filter(Objects::nonNull));
}
@Override
public Observable<Long> retrieveArchivedTaskCountForJob(String jobId) {
return Observable.fromCallable(() ->
archivedTasks.values().stream().filter(task -> task.getJobId().equals(jobId)).count()
);
}
@Override
public Completable deleteArchivedTask(String jobId, String taskId) {
return Completable.defer(() -> {
if (archivedTasks.remove(taskId) != null) {
return Completable.complete();
}
return Completable.error(new IllegalStateException("not found"));
});
}
@Override
public Observable<Job<?>> retrieveArchivedJob(String jobId) {
return beforeObservable(() -> {
Callable<Job<?>> jobCallable = () -> archivedJobs.get(jobId);
return Observable.fromCallable(jobCallable).filter(Objects::nonNull);
});
}
@Override
public Observable<Task> retrieveArchivedTasksForJob(String jobId) {
return Observable.defer(() -> {
List<Task> jobTasks = archivedTasks.values().stream().filter(task -> task.getJobId().equals(jobId)).collect(Collectors.toList());
return Observable.from(jobTasks);
});
}
private Completable beforeCompletable(Supplier<Completable> action, boolean taskAction) {
return Completable.defer(() -> {
switch (storeState) {
case Normal:
return action.get();
case Broken:
return Completable.error(new IOException("Store is broken"));
case BrokenForTasks:
if (taskAction) {
return Completable.error(new IOException("Store is broken for tasks"));
}
return action.get();
case Slow:
return Completable.never();
}
return Completable.error(new IllegalStateException("Unrecognized store state: " + storeState));
});
}
private <R> Observable<R> beforeObservable(Supplier<Observable<R>> action) {
if (storeState == StoreState.Broken) {
return Observable.error(new IOException("Store is broken"));
}
return action.get();
}
/**
* Service tasks contain no longer index. To simplify task access we assign index equivalent to each newly added task
* (resubmitted task reuses index assigned to the original task).
*/
private static class ServiceTaskIndex {
private int nextIdx;
private NavigableSet<Integer> freeIndexes = new TreeSet<>();
private Map<Integer, List<String>> taskIds = new HashMap<>();
private void addTask(Task task) {
String originalId = task.getOriginalId();
Optional<Pair<Integer, Integer>> taskIndexAndResubmit = getTaskIndexAndResubmitById(originalId);
if (taskIndexAndResubmit.isPresent()) {
List<String> ids = taskIds.get(taskIndexAndResubmit.get().getLeft());
Preconditions.checkArgument(ids.indexOf(task.getId()) == -1, "Task with id %s has been already created", task.getId());
ids.add(task.getId());
} else {
List<String> ids = new ArrayList<>();
ids.add(task.getId());
taskIds.put(nextFreeIdx(), ids);
}
}
private void removeTask(Task task) {
String originalId = task.getOriginalId();
Optional<Pair<Integer, Integer>> taskIndexAndResubmit = getTaskIndexAndResubmitById(originalId);
if (taskIndexAndResubmit.isPresent()) {
int idx = taskIndexAndResubmit.get().getLeft();
taskIds.remove(idx);
freeIndexes.add(idx);
}
}
private Optional<Pair<Integer, Integer>> getTaskIndexAndResubmitById(String taskId) {
for (Map.Entry<Integer, List<String>> entry : taskIds.entrySet()) {
int taskIndex = entry.getKey();
int resubmit = entry.getValue().indexOf(taskId);
if (resubmit >= 0) {
return Optional.of(Pair.of(taskIndex, resubmit));
}
}
return Optional.empty();
}
private Optional<String> getTaskIdByIndexAndResubmit(int taskIdx, int resubmit) {
List<String> slotIds = taskIds.get(taskIdx);
if (slotIds != null && slotIds.size() > resubmit) {
return Optional.of(slotIds.get(resubmit));
}
return Optional.empty();
}
@SuppressWarnings("ConstantConditions")
private int nextFreeIdx() {
if (freeIndexes.isEmpty()) {
return nextIdx++;
}
return freeIndexes.pollFirst();
}
}
}
| 9,933 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/common | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/common/action/TaskTimeoutChangeActionsTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.service.common.action;
import java.util.List;
import java.util.concurrent.TimeUnit;
import com.netflix.titus.api.jobmanager.model.job.BatchJobTask;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.TaskStatus;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.common.framework.reconciler.EntityHolder;
import com.netflix.titus.common.framework.reconciler.ModelActionHolder;
import com.netflix.titus.common.util.time.Clocks;
import com.netflix.titus.common.util.time.TestClock;
import com.netflix.titus.master.jobmanager.service.common.action.task.TaskTimeoutChangeActions;
import com.netflix.titus.master.jobmanager.service.common.action.task.TaskTimeoutChangeActions.TimeoutStatus;
import org.junit.Test;
import static com.netflix.titus.common.util.CollectionsExt.first;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.batchJobDescriptors;
import static com.netflix.titus.testkit.model.job.JobGenerator.batchJobs;
import static com.netflix.titus.testkit.model.job.JobGenerator.batchTasks;
import static org.assertj.core.api.Assertions.assertThat;
public class TaskTimeoutChangeActionsTest {
private static final long DEADLINE_INTERVAL_MS = 100;
private final TestClock testClock = Clocks.test();
private final Job<BatchJobExt> job = batchJobs(batchJobDescriptors().getValue()).getValue();
private final BatchJobTask task = batchTasks(job).getValue();
@Test
public void testTimeout() throws Exception {
BatchJobTask launchedTask = createTaskInState(TaskState.Launched);
EntityHolder initialRoot = rootFrom(job, launchedTask);
EntityHolder initialChild = first(initialRoot.getChildren());
// Initially there is no timeout associated
TimeoutStatus timeoutStatus = TaskTimeoutChangeActions.getTimeoutStatus(initialChild, testClock);
assertThat(timeoutStatus).isEqualTo(TimeoutStatus.NotSet);
// Apply timeout
List<ModelActionHolder> modelActionHolders = TaskTimeoutChangeActions.setTimeout(
launchedTask.getId(),
launchedTask.getStatus().getState(),
DEADLINE_INTERVAL_MS,
testClock
).apply().toBlocking().first();
EntityHolder rootWithTimeout = modelActionHolders.get(0).getAction().apply(initialRoot).get().getLeft();
assertThat(TaskTimeoutChangeActions.getTimeoutStatus(first(rootWithTimeout.getChildren()), testClock)).isEqualTo(TimeoutStatus.Pending);
// Advance time to trigger timeout
testClock.advanceTime(DEADLINE_INTERVAL_MS, TimeUnit.MILLISECONDS);
assertThat(TaskTimeoutChangeActions.getTimeoutStatus(first(rootWithTimeout.getChildren()), testClock)).isEqualTo(TimeoutStatus.TimedOut);
}
private EntityHolder rootFrom(Job<BatchJobExt> job, BatchJobTask task) {
return EntityHolder.newRoot(job.getId(), job).addChild(EntityHolder.newRoot(task.getId(), task));
}
private BatchJobTask createTaskInState(TaskState taskState) {
return BatchJobTask.newBuilder(task).withStatus(TaskStatus.newBuilder().withState(taskState).build()).build();
}
} | 9,934 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/common | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/common/interceptor/RetryActionInterceptorTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.service.common.interceptor;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import com.netflix.titus.common.framework.reconciler.EntityHolder;
import com.netflix.titus.common.framework.reconciler.ModelAction;
import com.netflix.titus.common.framework.reconciler.ModelActionHolder;
import com.netflix.titus.common.util.retry.Retryer;
import com.netflix.titus.common.util.retry.Retryers;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.master.jobmanager.SampleTitusChangeActions;
import com.netflix.titus.master.jobmanager.service.common.action.TitusChangeAction;
import com.netflix.titus.testkit.rx.ExtTestSubscriber;
import org.junit.Test;
import rx.schedulers.Schedulers;
import rx.schedulers.TestScheduler;
import static org.assertj.core.api.Assertions.assertThat;
/**
*/
public class RetryActionInterceptorTest {
private static final String ATTR_NAME = "test.retry";
private static final long INITIAL_DELAY_MS = 100;
private static final long MAX_DELAY_MS = 1000;
private static final int RETRY_LIMIT = 3;
private static final Retryer RETRY_POLICY = Retryers.exponentialBackoff(INITIAL_DELAY_MS, MAX_DELAY_MS, TimeUnit.MILLISECONDS, RETRY_LIMIT);
private TestScheduler testScheduler = Schedulers.test();
private final RetryActionInterceptor retryInterceptor = new RetryActionInterceptor(ATTR_NAME, RETRY_POLICY, testScheduler);
private final ExtTestSubscriber<List<ModelActionHolder>> testSubscriber = new ExtTestSubscriber<>();
@Test
public void testSuccessfulActionsPassThrough() throws Exception {
retryInterceptor.apply(SampleTitusChangeActions.successfulJob()).apply().subscribe(testSubscriber);
List<ModelActionHolder> updateAction = testSubscriber.takeNext();
assertThat(updateAction.get(0).getAction()).isInstanceOf(RetryActionInterceptor.RemoveRetryRecord.class);
testSubscriber.assertOnCompleted();
}
@Test
public void testRetry() throws Exception {
TitusChangeAction changeAction = SampleTitusChangeActions.failingJob(2);
// First two calls should fail
ModelAction updateAction1 = expectUpdateActionOfType(changeAction, RetryActionInterceptor.RetryModelUpdateAction.class);
EntityHolder modelWithTag1 = expectAboveExecutionLimits(updateAction1, EntityHolder.newRoot("rootId", "data"));
expectBelowExecutionLimitsWhenTimeAdvanced(modelWithTag1, INITIAL_DELAY_MS);
ModelAction updateAction2 = expectUpdateActionOfType(changeAction, RetryActionInterceptor.RetryModelUpdateAction.class);
EntityHolder modelWithTag2 = expectAboveExecutionLimits(updateAction2, modelWithTag1);
expectBelowExecutionLimitsWhenTimeAdvanced(modelWithTag2, INITIAL_DELAY_MS * 2);
// Third call should succeed
ModelAction updateAction3 = expectUpdateActionOfType(changeAction, RetryActionInterceptor.RemoveRetryRecord.class);
expectNoRetryTag(updateAction3, modelWithTag2);
}
private ModelAction expectUpdateActionOfType(TitusChangeAction changeAction, Class<? extends ModelAction> updateActionType) {
ExtTestSubscriber<List<ModelActionHolder>> testSubscriber = new ExtTestSubscriber<>();
retryInterceptor.apply(changeAction).apply().subscribe(testSubscriber);
List<ModelActionHolder> updateAction = testSubscriber.takeNext();
assertThat(updateAction.get(0).getAction()).isInstanceOf(updateActionType);
return updateAction.get(0).getAction();
}
private EntityHolder expectAboveExecutionLimits(ModelAction updateAction, EntityHolder model) {
Optional<Pair<EntityHolder, EntityHolder>> pair = updateAction.apply(model);
assertThat(pair).isPresent();
EntityHolder modelWithTag = pair.get().getRight();
assertThat(retryInterceptor.executionLimits(modelWithTag)).isFalse();
return modelWithTag;
}
private void expectBelowExecutionLimitsWhenTimeAdvanced(EntityHolder modelWithTag, long delayMs) {
testScheduler.advanceTimeBy(delayMs / 2, TimeUnit.MILLISECONDS);
assertThat(retryInterceptor.executionLimits(modelWithTag)).isFalse();
testScheduler.advanceTimeBy(delayMs, TimeUnit.MILLISECONDS);
assertThat(retryInterceptor.executionLimits(modelWithTag)).isTrue();
}
private void expectNoRetryTag(ModelAction updateAction, EntityHolder model) {
Optional<Pair<EntityHolder, EntityHolder>> pair = updateAction.apply(model);
assertThat(pair).isPresent();
assertThat(pair.get().getRight().getAttributes()).isEmpty();
}
} | 9,935 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/common | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/common/interceptor/RateLimiterInterceptorTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.service.common.interceptor;
import java.util.List;
import java.util.concurrent.TimeUnit;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.common.framework.reconciler.EntityHolder;
import com.netflix.titus.common.framework.reconciler.ModelAction;
import com.netflix.titus.common.framework.reconciler.ModelActionHolder;
import com.netflix.titus.common.util.limiter.ImmutableLimiters;
import com.netflix.titus.common.util.limiter.tokenbucket.ImmutableTokenBucket;
import com.netflix.titus.common.util.time.Clocks;
import com.netflix.titus.common.util.time.TestClock;
import com.netflix.titus.master.jobmanager.SampleTitusChangeActions;
import com.netflix.titus.master.jobmanager.service.common.action.TitusChangeAction;
import com.netflix.titus.master.jobmanager.service.common.action.TitusModelAction;
import com.netflix.titus.testkit.model.job.JobDescriptorGenerator;
import com.netflix.titus.testkit.rx.ExtTestSubscriber;
import org.junit.Test;
import static com.netflix.titus.testkit.model.job.JobGenerator.batchJobs;
import static org.assertj.core.api.Assertions.assertThat;
public class RateLimiterInterceptorTest {
private static final String ATTR_NAME = "test.rateLimiter";
private static final long BUCKET_SIZE = 5;
private static final long REFILL_INTERVAL_MS = 100;
private final TestClock testClock = Clocks.test();
private final ImmutableTokenBucket.ImmutableRefillStrategy refillStrategy = ImmutableLimiters.refillAtFixedInterval(
1, REFILL_INTERVAL_MS, TimeUnit.MILLISECONDS, testClock
);
private final ImmutableTokenBucket tokenBucket = ImmutableLimiters.tokenBucket(BUCKET_SIZE, refillStrategy);
private final RateLimiterInterceptor rateLimiterInterceptor = new RateLimiterInterceptor(ATTR_NAME, tokenBucket);
@Test
public void testRateLimiting() throws Exception {
Job<BatchJobExt> job = batchJobs(JobDescriptorGenerator.oneTaskBatchJobDescriptor()).getValue();
// Use all tokens
EntityHolder nextRoot = EntityHolder.newRoot("root", job);
for (int i = 0; i < BUCKET_SIZE; i++) {
assertThat(rateLimiterInterceptor.executionLimits(nextRoot)).isEqualTo(BUCKET_SIZE - i);
ModelAction updateAction = executeRateLimitedAction(SampleTitusChangeActions.successfulJob());
nextRoot = updateAction.apply(nextRoot).get().getRight();
}
assertThat(rateLimiterInterceptor.executionLimits(nextRoot)).isEqualTo(0);
// Refill
testClock.advanceTime(REFILL_INTERVAL_MS, TimeUnit.MILLISECONDS);
assertThat(rateLimiterInterceptor.executionLimits(nextRoot)).isEqualTo(1);
}
private ModelAction executeRateLimitedAction(TitusChangeAction changeAction) {
ExtTestSubscriber<List<ModelActionHolder>> testSubscriber = new ExtTestSubscriber<>();
rateLimiterInterceptor.apply(changeAction).apply().subscribe(testSubscriber);
ModelAction updateAction = testSubscriber.takeNext().get(0).getAction();
assertThat(updateAction).isInstanceOf(TitusModelAction.class);
return updateAction;
}
} | 9,936 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/service/service/ScaleDownEvaluatorTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.service.service;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import com.google.common.collect.ImmutableMap;
import com.netflix.titus.api.jobmanager.TaskAttributes;
import com.netflix.titus.api.jobmanager.model.job.ServiceJobTask;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.TaskStatus;
import com.netflix.titus.common.data.generator.DataGenerator;
import com.netflix.titus.common.runtime.TitusRuntimes;
import org.junit.Test;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.changeServiceJobCapacity;
import static com.netflix.titus.common.util.CollectionsExt.first;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskServiceJobDescriptor;
import static com.netflix.titus.testkit.model.job.JobGenerator.serviceJobs;
import static com.netflix.titus.testkit.model.job.JobGenerator.serviceTasks;
import static java.util.Arrays.asList;
import static org.assertj.core.api.Assertions.assertThat;
public class ScaleDownEvaluatorTest {
private DataGenerator<ServiceJobTask> taskDataGenerator = serviceTasks(
serviceJobs(changeServiceJobCapacity(oneTaskServiceJobDescriptor(), 1_000)).getValue()
);
@Test
public void testTasksInKillInitiatedStateAreSelectedFirst() {
List<ServiceJobTask> tasks = asList(
nextTask("zoneA", "agentA1", TaskState.Accepted),
nextTask("zoneA", "agentA1", TaskState.KillInitiated),
nextTask("zoneB", "agentB1", TaskState.Started),
nextTask("zoneB", "agentB1", TaskState.KillInitiated)
);
evaluateAndCheck(tasks, 3, TaskState.KillInitiated);
evaluateAndCheck(tasks, 2, TaskState.KillInitiated);
}
@Test
public void testTasksInAcceptedStateAreSelectedAfterTasksInKillInitiatedState() {
List<ServiceJobTask> tasks = asList(
nextTask("zoneA", "agentA1", TaskState.Accepted),
nextTask("zoneA", "agentA1", TaskState.Started),
nextTask("zoneA", "agentA1", TaskState.KillInitiated),
nextTask("zoneB", "agentB1", TaskState.Accepted),
nextTask("zoneB", "agentB1", TaskState.Started),
nextTask("zoneB", "agentB1", TaskState.KillInitiated)
);
evaluateAndCheck(tasks, 3, TaskState.KillInitiated, 2, TaskState.Accepted, 1);
evaluateAndCheck(tasks, 2, TaskState.KillInitiated, 2, TaskState.Accepted, 2);
}
@Test
public void testTasksInLaunchedOrStartInitiatedStateAreSelectedBeforeTasksInStartedState() {
List<ServiceJobTask> tasks = asList(
nextTask("zoneA", "agentA1", TaskState.Launched),
nextTask("zoneA", "agentA1", TaskState.Started),
nextTask("zoneB", "agentB1", TaskState.StartInitiated),
nextTask("zoneB", "agentB1", TaskState.Started)
);
evaluateAndCheck(tasks, 2, TaskState.Launched, 1, TaskState.StartInitiated, 1);
}
@Test
public void testScaleDownToZero() {
List<ServiceJobTask> tasks = asList(
nextTask("zoneA", "agentA1", TaskState.Launched),
nextTask("zoneA", "agentA1", TaskState.StartInitiated),
nextTask("zoneB", "agentB1", TaskState.Started),
nextTask("zoneB", "agentB1", TaskState.KillInitiated)
);
List<ServiceJobTask> toRemove = doEvaluate(tasks, 0);
assertThat(toRemove).hasSize(4);
}
@Test
public void testLargeTaskGroupsAreScaledDownFirst() {
List<ServiceJobTask> tasks = asList(
nextTask("zoneA", "agentA1", TaskState.Launched),
nextTask("zoneB", "agentB1", TaskState.Launched),
nextTask("zoneB", "agentB1", TaskState.Launched),
nextTask("zoneB", "agentB1", TaskState.StartInitiated),
nextTask("zoneB", "agentB1", TaskState.StartInitiated),
nextTask("zoneB", "agentB1", TaskState.StartInitiated),
nextTask("zoneC", "agentC1", TaskState.Launched)
);
List<ServiceJobTask> toRemove = doEvaluate(tasks, 3);
Map<String, List<ServiceJobTask>> toRemoveGrouped = groupByZone(toRemove);
assertThat(toRemoveGrouped).hasSize(1);
assertThat(first(toRemoveGrouped.keySet())).isEqualTo("zoneB");
assertThat(toRemoveGrouped.get("zoneB")).hasSize(4);
}
private List<ServiceJobTask> doEvaluate(List<ServiceJobTask> tasks, int expectedSize) {
List<ServiceJobTask> toRemove = ScaleDownEvaluator.selectTasksToTerminate(tasks, expectedSize, TitusRuntimes.test());
checkAreForDuplicates(toRemove);
return toRemove;
}
private void evaluateAndCheck(List<ServiceJobTask> tasks, int expectedSize, TaskState expectedSelectedTasksState) {
List<ServiceJobTask> toRemove = doEvaluate(tasks, expectedSize);
assertThat(toRemove).hasSize(tasks.size() - expectedSize);
toRemove.forEach(t -> assertThat(t.getStatus().getState()).isEqualTo(expectedSelectedTasksState));
}
private void evaluateAndCheck(List<ServiceJobTask> tasks, int expectedSize, TaskState expectedState1, int expectedSize1, TaskState expectedState2, int expectedSize2) {
List<ServiceJobTask> toRemove = doEvaluate(tasks, expectedSize);
assertThat(toRemove).hasSize(tasks.size() - expectedSize);
Map<TaskState, List<ServiceJobTask>> byState = toRemove.stream().collect(Collectors.groupingBy(t -> t.getStatus().getState()));
assertThat(byState.get(expectedState1)).hasSize(expectedSize1);
assertThat(byState.get(expectedState2)).hasSize(expectedSize2);
}
private void checkAreForDuplicates(List<ServiceJobTask> tasks) {
List<String> duplicatedIds = tasks.stream().collect(Collectors.groupingBy(Task::getId)).values().stream()
.filter(v -> v.size() > 1).map(l -> l.get(0).getId()).collect(Collectors.toList());
assertThat(duplicatedIds).isEmpty();
}
private Map<String, List<ServiceJobTask>> groupByZone(List<ServiceJobTask> toRemove) {
return toRemove.stream().collect(Collectors.groupingBy(t -> t.getTaskContext().get(TaskAttributes.TASK_ATTRIBUTES_AGENT_ZONE)));
}
private ServiceJobTask nextTask(String zoneId, String agentId, TaskState taskState) {
ServiceJobTask task = taskDataGenerator.getValue().toBuilder()
.withStatus(TaskStatus.newBuilder().withState(taskState).build())
.addAllToTaskContext(ImmutableMap.of(
TaskAttributes.TASK_ATTRIBUTES_AGENT_ZONE, zoneId,
TaskAttributes.TASK_ATTRIBUTES_AGENT_INSTANCE_ID, agentId
))
.build();
this.taskDataGenerator = taskDataGenerator.apply();
return task;
}
} | 9,937 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/jobmanager/store/ArchivedTasksGcTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.jobmanager.store;
import java.util.ArrayList;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.TaskStatus;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.api.jobmanager.service.V3JobOperations;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.master.jobmanager.service.integration.scenario.StubbedJobStore;
import com.netflix.titus.testkit.model.job.JobDescriptorGenerator;
import com.netflix.titus.testkit.model.job.JobGenerator;
import org.junit.Before;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class ArchivedTasksGcTest {
private final ArchivedTasksGcConfiguration configuration = mock(ArchivedTasksGcConfiguration.class);
private final V3JobOperations jobOperations = mock(V3JobOperations.class);
private final StubbedJobStore jobStore = new StubbedJobStore();
private final TitusRuntime titusRuntime = TitusRuntimes.test();
private ArchivedTasksGc archivedTasksGc;
@Before
public void setUp() {
when(configuration.isGcEnabled()).thenReturn(true);
when(configuration.getGcInitialDelayMs()).thenReturn(1L);
when(configuration.getGcIntervalMs()).thenReturn(1L);
when(configuration.getGcTimeoutMs()).thenReturn(1000000L);
when(configuration.getMaxNumberOfArchivedTasksPerJob()).thenReturn(5L);
when(configuration.getMaxNumberOfArchivedTasksToGcPerIteration()).thenReturn(2);
when(configuration.getMaxRxConcurrency()).thenReturn(1);
archivedTasksGc = new ArchivedTasksGc(configuration, jobOperations, jobStore, titusRuntime);
}
@Test
public void gcArchivedTasks() {
when(configuration.isGcEnabled()).thenReturn(true);
}
@Test
public void gc() {
Job<ServiceJobExt> job = createServiceJob(10);
// Limit is max 2 tasks per iteration per job
archivedTasksGc.gc();
assertThat(jobStore.getArchivedTasksInternal(job.getId())).hasSize(8);
archivedTasksGc.gc();
assertThat(jobStore.getArchivedTasksInternal(job.getId())).hasSize(6);
archivedTasksGc.gc();
assertThat(jobStore.getArchivedTasksInternal(job.getId())).hasSize(5);
archivedTasksGc.gc();
assertThat(jobStore.getArchivedTasksInternal(job.getId())).hasSize(5);
}
private Job<ServiceJobExt> createServiceJob(int archivedTasksCount) {
Job<ServiceJobExt> job = JobGenerator.serviceJobs(JobDescriptorGenerator.oneTaskServiceJobDescriptor()).getValue();
jobStore.storeJob(job).get();
for (int i = 0; i < archivedTasksCount; i++) {
Task task = JobGenerator.serviceTasks(job).getValue().toBuilder().withId("task" + i).build();
task = JobFunctions.changeTaskStatus(task, TaskStatus.newBuilder().withState(TaskState.Finished).build());
jobStore.addArchivedTaskInternal(task);
}
when(jobOperations.getJobs()).thenReturn(new ArrayList<>(jobStore.getJobsInternal().values()));
return job;
}
} | 9,938 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/BaseIntegrationTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration;
import java.security.Permission;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import org.junit.BeforeClass;
import org.junit.experimental.categories.Category;
@Category(IntegrationTest.class)
public class BaseIntegrationTest {
protected static final long SHORT_TIMEOUT_MS = 5_000;
protected static final long TEST_TIMEOUT_MS = 30_000;
protected static final long LONG_TEST_TIMEOUT_MS = 60_000;
static class PreventSystemExitSecurityManager extends SecurityManager {
@Override
public void checkPermission(Permission perm) {
}
@Override
public void checkPermission(Permission perm, Object context) {
}
@Override
public void checkExit(int status) {
if (status != 0) {
String message = "System exit requested with error " + status;
throw new IllegalStateException(message);
}
}
}
private static final SecurityManager securityManager = new PreventSystemExitSecurityManager();
@BeforeClass
public static void setSecurityManager() {
if (System.getSecurityManager() != securityManager) {
System.setSecurityManager(securityManager);
}
}
}
| 9,939 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v2/CapacityManagementTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v2;
import com.netflix.titus.api.model.ApplicationSLA;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.testkit.client.TitusMasterClient;
import com.netflix.titus.testkit.data.core.ApplicationSlaSample;
import com.netflix.titus.testkit.embedded.cell.master.EmbeddedTitusMasters;
import com.netflix.titus.testkit.embedded.kube.EmbeddedKubeClusters;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusMasterResource;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static com.netflix.titus.master.endpoint.v2.rest.Representation2ModelConvertions.asRepresentation;
import static org.assertj.core.api.Assertions.assertThat;
/**
* A collection of integration tests for application SLA management. These tests are driven by the application SLA REST
* API, and validate proper interaction with the storage layer and cloud provider (auto-scale actions).
*/
@Category(IntegrationTest.class)
public class CapacityManagementTest extends BaseIntegrationTest {
@Rule
public final TitusMasterResource titusMasterResource = new TitusMasterResource(
EmbeddedTitusMasters.basicMasterWithKubeIntegration(EmbeddedKubeClusters.basicCluster(2)));
private TitusMasterClient client;
@Before
public void setUp() throws Exception {
client = titusMasterResource.getMaster().getClient();
}
/**
* Add new critical tier SLA, which should be persisted in a storage, and should force a scale up of
* a server group.
*/
@Test(timeout = 30_000)
public void addCriticalTierJobSla() {
ApplicationSLA applicationSLA = ApplicationSlaSample.CriticalLarge.build();
String location = client.addApplicationSLA(asRepresentation(applicationSLA)).toBlocking().first();
assertThat(location).contains("/api/v2/management/applications/" + applicationSLA.getAppName());
}
}
| 9,940 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/HealthTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3;
import java.util.concurrent.TimeUnit;
import com.netflix.titus.grpc.protogen.HealthCheckRequest;
import com.netflix.titus.grpc.protogen.HealthCheckResponse;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCell;
import com.netflix.titus.testkit.embedded.kube.EmbeddedKubeClusters;
import com.netflix.titus.testkit.grpc.TestStreamObserver;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static com.netflix.titus.grpc.protogen.HealthCheckResponse.ServingStatus.SERVING;
import static com.netflix.titus.testkit.embedded.cell.master.EmbeddedTitusMasters.basicMasterWithKubeIntegration;
import static org.assertj.core.api.Assertions.assertThat;
@Category(IntegrationTest.class)
public class HealthTest extends BaseIntegrationTest {
@Rule
public final TitusStackResource titusStackResource = new TitusStackResource(
EmbeddedTitusCell.aTitusCell()
.withMaster(basicMasterWithKubeIntegration(EmbeddedKubeClusters.basicCluster(2)))
.withDefaultGateway()
.build()
);
@Test(timeout = TEST_TIMEOUT_MS)
public void healthStatus() throws Exception {
TestStreamObserver<HealthCheckResponse> testStreamObserver = new TestStreamObserver<>();
titusStackResource.getOperations().getHealthClient().check(HealthCheckRequest.newBuilder().build(), testStreamObserver);
HealthCheckResponse response = testStreamObserver.takeNext(10, TimeUnit.SECONDS);
assertThat(testStreamObserver.hasError()).isFalse();
assertThat(response).isNotNull();
assertThat(response.getStatus()).isEqualTo(SERVING);
assertThat(response.getDetailsCount()).isGreaterThan(0);
assertThat(response.getDetails(0).hasDetails()).isTrue();
HealthCheckResponse.Details details = response.getDetails(0).getDetails();
assertThat(details.getStatus()).isEqualTo(SERVING);
assertThat(details.hasUptime()).isTrue();
assertThat(details.hasElectionTimestamp()).isTrue();
assertThat(details.hasActivationTime()).isTrue();
assertThat(details.hasActivationTimestamp()).isTrue();
}
}
| 9,941 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/loadbalancer/LoadBalancerGrpcTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.loadbalancer;
import java.util.Map;
import java.util.Set;
import java.util.function.BiConsumer;
import com.google.protobuf.Empty;
import com.netflix.titus.grpc.protogen.AddLoadBalancerRequest;
import com.netflix.titus.grpc.protogen.GetAllLoadBalancersRequest;
import com.netflix.titus.grpc.protogen.GetAllLoadBalancersResult;
import com.netflix.titus.grpc.protogen.GetJobLoadBalancersResult;
import com.netflix.titus.grpc.protogen.JobId;
import com.netflix.titus.grpc.protogen.LoadBalancerId;
import com.netflix.titus.grpc.protogen.LoadBalancerServiceGrpc;
import com.netflix.titus.grpc.protogen.RemoveLoadBalancerRequest;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.master.loadbalancer.service.LoadBalancerTests;
import com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCell;
import com.netflix.titus.testkit.embedded.kube.EmbeddedKubeClusters;
import com.netflix.titus.testkit.grpc.TestStreamObserver;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.RuleChain;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static com.netflix.titus.master.loadbalancer.service.LoadBalancerTests.buildPageSupplier;
import static com.netflix.titus.testkit.embedded.cell.master.EmbeddedTitusMasters.basicMasterWithKubeIntegration;
import static org.assertj.core.api.AssertionsForClassTypes.assertThat;
/**
* These integration tests validate proper plumbing of gRPC client requests
* through the Gateway and Master servers.
*/
@Category(IntegrationTest.class)
public class LoadBalancerGrpcTest extends BaseIntegrationTest {
private final Logger logger = LoggerFactory.getLogger(LoadBalancerGrpcTest.class);
private LoadBalancerServiceGrpc.LoadBalancerServiceStub client;
public final TitusStackResource titusStackResource = new TitusStackResource(
EmbeddedTitusCell.aTitusCell()
.withMaster(basicMasterWithKubeIntegration(EmbeddedKubeClusters.basicCluster(2)))
.withDefaultGateway()
.build(),
false // FIXME These tests fail when run via TitusFederation
);
@Rule
public final RuleChain ruleChain = RuleChain.outerRule(titusStackResource);
@Before
public void setUp() throws Exception {
client = titusStackResource.getOperations().getLoadBalancerGrpcClient();
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testPutLoadBalancer() throws Exception {
LoadBalancerTests.putLoadBalancersPerJob(1, 1, putLoadBalancerWithJobId);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testGetLoadBalancer() throws Exception {
String jobId = "Titus-123";
Set<LoadBalancerId> loadBalancerIds = LoadBalancerTests.getLoadBalancersForJob(jobId, getJobLoadBalancers);
assertThat(loadBalancerIds.size()).isEqualTo(0);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testRmLoadBalancer() throws Exception {
Map<String, Set<LoadBalancerId>> jobIdToLoadBalancersMap = LoadBalancerTests.putLoadBalancersPerJob(10, 50, putLoadBalancerWithJobId);
// Remove the load balancers for each job
jobIdToLoadBalancersMap.forEach((jobId, loadBalancerIdSet) -> {
loadBalancerIdSet.forEach(loadBalancerId -> {
LoadBalancerTests.removeLoadBalancerFromJob(jobId, loadBalancerId, removeLoadBalancers);
});
});
// Check that there are no load balancers left
jobIdToLoadBalancersMap.forEach((jobId, loadBalancerIdSet) -> {
assertThat(LoadBalancerTests.getLoadBalancersForJob(jobId, getJobLoadBalancers).size()).isEqualTo(0);
});
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testGetAllLoadBalancerPages() throws Exception {
int numJobs = 75;
int numLbs = 7;
Map<String, Set<LoadBalancerId>> verificationMap = LoadBalancerTests.putLoadBalancersPerJob(numJobs, numLbs, putLoadBalancerWithJobId);
int pageSize = 3;
int currentPageNum = 0;
GetAllLoadBalancersResult result;
do {
result = LoadBalancerTests.getAllLoadBalancers(buildPageSupplier(currentPageNum, pageSize), getAllLoadBalancers);
result.getJobLoadBalancersList().forEach(
getJobLoadBalancersResult -> {
String jobId = getJobLoadBalancersResult.getJobId();
assertThat(verificationMap.containsKey(jobId)).isTrue();
getJobLoadBalancersResult.getLoadBalancersList().forEach(
loadBalancerId -> {
// Mark the load balancer as checked
assertThat(verificationMap.get(jobId).remove(loadBalancerId)).isTrue();
}
);
}
);
currentPageNum++;
} while (result.getPagination().getHasMore());
// Make sure that all of the data was checked
verificationMap.forEach(
(jobId, loadBalancerSet) -> {
assertThat(loadBalancerSet.isEmpty()).isTrue();
}
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testGetAllLoadBalancerPagesWithCursor() throws Exception {
int numJobs = 75;
int numLbs = 7;
Map<String, Set<LoadBalancerId>> verificationMap = LoadBalancerTests.putLoadBalancersPerJob(numJobs, numLbs, putLoadBalancerWithJobId);
int pageSize = 3;
String cursor = "";
GetAllLoadBalancersResult result;
do {
result = LoadBalancerTests.getAllLoadBalancers(buildPageSupplier(cursor, pageSize), getAllLoadBalancers);
result.getJobLoadBalancersList().forEach(
getJobLoadBalancersResult -> {
String jobId = getJobLoadBalancersResult.getJobId();
assertThat(verificationMap.containsKey(jobId)).isTrue();
getJobLoadBalancersResult.getLoadBalancersList().forEach(
loadBalancerId -> {
// Mark the load balancer as checked
logger.info("checking lb {} exists for job {} - {}", loadBalancerId.getId(), jobId, verificationMap.get(jobId).contains(loadBalancerId));
assertThat(verificationMap.get(jobId).remove(loadBalancerId)).isTrue();
}
);
}
);
cursor = result.getPagination().getCursor();
} while (result.getPagination().getHasMore());
// Make sure that all of the data was checked
verificationMap.forEach(
(jobId, loadBalancerSet) -> {
assertThat(loadBalancerSet.isEmpty()).isTrue();
}
);
}
private BiConsumer<AddLoadBalancerRequest, TestStreamObserver<Empty>> putLoadBalancerWithJobId = (request, addResponse) -> {
client.addLoadBalancer(request, addResponse);
};
private BiConsumer<JobId, TestStreamObserver<GetJobLoadBalancersResult>> getJobLoadBalancers = (request, getResponse) -> {
client.getJobLoadBalancers(request, getResponse);
};
private BiConsumer<GetAllLoadBalancersRequest, TestStreamObserver<GetAllLoadBalancersResult>> getAllLoadBalancers = (request, getResponse) -> {
client.getAllLoadBalancers(request, getResponse);
};
private BiConsumer<RemoveLoadBalancerRequest, TestStreamObserver<Empty>> removeLoadBalancers = (request, removeResponse) -> {
client.removeLoadBalancer(request, removeResponse);
};
}
| 9,942 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/scenario/JobsScenarioBuilder.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.scenario;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import com.google.common.base.Preconditions;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.grpc.protogen.JobChangeNotification;
import com.netflix.titus.grpc.protogen.JobId;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc;
import com.netflix.titus.grpc.protogen.JobQuery;
import com.netflix.titus.grpc.protogen.JobQueryResult;
import com.netflix.titus.grpc.protogen.Page;
import com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters;
import com.netflix.titus.testkit.embedded.EmbeddedTitusOperations;
import com.netflix.titus.testkit.grpc.TestStreamObserver;
import com.netflix.titus.testkit.junit.master.TitusMasterResource;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import org.junit.rules.ExternalResource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static com.netflix.titus.common.util.ExceptionExt.rethrow;
import static com.netflix.titus.master.integration.v3.scenario.ScenarioBuilderUtil.TIMEOUT_MS;
/**
*
*/
public class JobsScenarioBuilder extends ExternalResource {
private static final Logger logger = LoggerFactory.getLogger(JobsScenarioBuilder.class);
private final TitusStackResource titusStackResource;
private final TitusMasterResource titusMasterResource;
private EmbeddedTitusOperations titusOperations;
private JobManagementServiceGrpc.JobManagementServiceStub client;
private final List<JobScenarioBuilder> jobScenarioBuilders = new ArrayList<>();
public JobsScenarioBuilder(TitusStackResource titusStackResource) {
this.titusStackResource = titusStackResource;
this.titusMasterResource = null;
}
public JobsScenarioBuilder(TitusMasterResource titusMasterResource) {
this.titusStackResource = null;
this.titusMasterResource = titusMasterResource;
}
public JobsScenarioBuilder(EmbeddedTitusOperations titusOperations) {
this.titusStackResource = null;
this.titusMasterResource = null;
this.titusOperations = titusOperations;
try {
before();
} catch (Throwable error) {
throw new IllegalStateException(error);
}
}
@Override
protected void before() throws Throwable {
if (titusStackResource != null) {
this.titusOperations = titusStackResource.getOperations();
}
if (titusMasterResource != null) {
this.titusOperations = titusMasterResource.getOperations();
}
this.client = titusOperations.getV3GrpcClient();
this.jobScenarioBuilders.addAll(loadJobs());
}
@Override
protected void after() {
stop();
}
public void stop() {
jobScenarioBuilders.forEach(JobScenarioBuilder::stop);
}
public JobsScenarioBuilder schedule(JobDescriptor jobDescriptor,
Function<JobScenarioBuilder, JobScenarioBuilder> jobScenario) {
TestStreamObserver<JobId> responseObserver = new TestStreamObserver<>();
client.withDeadlineAfter(TIMEOUT_MS, TimeUnit.MILLISECONDS)
.createJob(GrpcJobManagementModelConverters.toGrpcJobDescriptor(jobDescriptor), responseObserver);
JobId jobId;
try {
jobId = responseObserver.takeNext(TIMEOUT_MS, TimeUnit.MILLISECONDS);
} catch (Exception e) {
throw new IllegalStateException(e);
}
Preconditions.checkNotNull(jobId, "Job create operation not completed in time");
TestStreamObserver<JobChangeNotification> eventStream = new TestStreamObserver<>();
client.observeJob(jobId, eventStream);
JobScenarioBuilder jobScenarioBuilder = new JobScenarioBuilder(titusOperations, this, jobId.getId());
jobScenarioBuilders.add(jobScenarioBuilder);
jobScenario.apply(jobScenarioBuilder);
return this;
}
public JobsScenarioBuilder schedule(JobDescriptor<?> jobDescriptor,
int times,
Function<JobScenarioBuilder, JobScenarioBuilder> jobScenario) throws Exception {
String sequence = jobDescriptor.getJobGroupInfo().getSequence();
for (int i = 0; i < times; i++) {
int finalI = i;
JobDescriptor effective = sequence.isEmpty()
? jobDescriptor
: jobDescriptor.but(jd -> jd.getJobGroupInfo().toBuilder().withSequence(sequence + '_' + finalI));
schedule(effective, jobScenario);
}
return this;
}
public Job scheduleAndReturnJob(JobDescriptor jobDescriptor,
Function<JobScenarioBuilder, JobScenarioBuilder> jobScenario) {
AtomicReference<Job> jobRef = new AtomicReference<>();
schedule(jobDescriptor, js -> jobScenario.apply(js).inJob(jobRef::set));
Preconditions.checkNotNull(jobRef.get(), "Job not set after scheduling");
return jobRef.get();
}
public JobScenarioBuilder takeJob(String jobId) {
return jobScenarioBuilders.stream().filter(j -> j.getJobId().equals(jobId)).findFirst().orElseThrow(() -> new IllegalArgumentException("Job not found: " + jobId));
}
public JobScenarioBuilder takeJob(int idx) {
Preconditions.checkArgument(idx < jobScenarioBuilders.size(), "Invalid job index: %s (max=%s)", idx, jobScenarioBuilders.size());
return jobScenarioBuilders.get(idx);
}
public String takeJobId(int idx) {
return takeJob(idx).getJobId();
}
public String takeTaskId(int jobIdx, int taskIdx) {
return takeJob(jobIdx).getTaskByIndex(taskIdx).getTask().getId();
}
public String takeTaskId(String jobId, int taskIdx) {
return takeJob(jobId).getTaskByIndex(taskIdx).getTask().getId();
}
public JobsScenarioBuilder assertJobs(Predicate<List<Job>> predicate) {
List<Job> jobs = jobScenarioBuilders.stream().map(JobScenarioBuilder::getJob).collect(Collectors.toList());
Preconditions.checkState(predicate.test(jobs), "Jobs collection predicate evaluation fails (job size=%s)", jobs.size());
return this;
}
public void expectVersionsOrdered() {
jobScenarioBuilders.forEach(JobScenarioBuilder::expectVersionsOrdered);
}
private List<JobScenarioBuilder> loadJobs() {
JobQuery query = JobQuery.newBuilder().setPage(Page.newBuilder().setPageSize(1000)).build();
Throwable lastFailure = null;
// During TitusMaster reboot we reuse the same ephemeral GRPC port with the same client side ManagedChannel
// in TitusGateway. The channel may have broken connection which after reboot my result in a failure here, and
// since we do not have retry interceptor installed, we make a few attempts directly.
for (int i = 0; i < 3; i++) {
try {
TestStreamObserver<JobQueryResult> responseObserver = new TestStreamObserver<>();
client.findJobs(query, responseObserver);
JobQueryResult queryResult = rethrow(() -> responseObserver.takeNext(TIMEOUT_MS, TimeUnit.MILLISECONDS));
List<JobScenarioBuilder> result = new ArrayList<>();
queryResult.getItemsList().forEach(job -> {
result.add(new JobScenarioBuilder(titusOperations, this, job.getId()));
});
return result;
} catch (Exception e) {
lastFailure = e;
logger.info("Cannot load jobs from TitusMaster (might be not ready yet). Waiting 1sec before next try...");
try {
Thread.sleep(1_000);
} catch (InterruptedException ignore) {
}
}
}
throw new IllegalStateException("Cannot load jobs: " + lastFailure, lastFailure);
}
}
| 9,943 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/scenario/JobAsserts.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.scenario;
import java.util.List;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import com.netflix.titus.api.jobmanager.model.job.ContainerResources;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobState;
import com.netflix.titus.api.model.EfsMount;
import com.netflix.titus.api.model.ResourceDimension;
import com.netflix.titus.testkit.embedded.kube.EmbeddedKubeUtil;
import io.kubernetes.client.openapi.models.V1Pod;
import io.kubernetes.client.openapi.models.V1VolumeMount;
public class JobAsserts {
public static Predicate<Job> jobInState(JobState expectedState) {
return job -> job.getStatus().getState() == expectedState;
}
public static Predicate<V1Pod> podWithResources(ContainerResources containerResources, int diskMbMin) {
return pod -> {
ResourceDimension podResources = EmbeddedKubeUtil.fromPodToResourceDimension(pod);
if (podResources.getCpu() != containerResources.getCpu()) {
return false;
}
if (podResources.getMemoryMB() != containerResources.getMemoryMB()) {
return false;
}
int diskMB = Math.max(containerResources.getDiskMB(), diskMbMin);
if (podResources.getDiskMB() != diskMB) {
return false;
}
if (podResources.getNetworkMbs() != containerResources.getNetworkMbps()) {
return false;
}
return true;
};
}
/**
* FIXME Incomplete checks as some parsing is required.
*/
public static Predicate<V1Pod> podWithEfsMounts(List<EfsMount> expectedEfsMounts) {
return pod -> {
List<V1VolumeMount> efsVolumes = pod.getSpec().getContainers().get(0).getVolumeMounts().stream()
.filter(v -> v.getName().startsWith("efs"))
.collect(Collectors.toList());
if (efsVolumes.size() != expectedEfsMounts.size()) {
return false;
}
for (int i = 0; i < efsVolumes.size(); i++) {
V1VolumeMount efsVolume = efsVolumes.get(i);
EfsMount expectedEfsMount = expectedEfsMounts.get(i);
if (!efsVolume.getMountPath().equals(expectedEfsMount.getMountPoint())) {
return false;
}
}
return true;
};
}
}
| 9,944 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/scenario/TaskScenarioBuilder.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.scenario;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
import com.google.common.base.Preconditions;
import com.google.common.base.Stopwatch;
import com.google.protobuf.Empty;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.common.aws.AwsInstanceType;
import com.netflix.titus.grpc.protogen.EvictionServiceGrpc;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc;
import com.netflix.titus.grpc.protogen.TaskAttributesDeleteRequest;
import com.netflix.titus.grpc.protogen.TaskAttributesUpdate;
import com.netflix.titus.grpc.protogen.TaskKillRequest;
import com.netflix.titus.grpc.protogen.TaskMoveRequest;
import com.netflix.titus.grpc.protogen.TaskStatus;
import com.netflix.titus.grpc.protogen.TaskTerminateRequest;
import com.netflix.titus.testkit.embedded.EmbeddedTitusOperations;
import com.netflix.titus.testkit.embedded.kube.EmbeddedKubeCluster;
import com.netflix.titus.testkit.embedded.kube.EmbeddedKubeNode;
import com.netflix.titus.testkit.embedded.kube.EmbeddedKubeUtil;
import com.netflix.titus.testkit.grpc.TestStreamObserver;
import com.netflix.titus.testkit.rx.ExtTestSubscriber;
import io.kubernetes.client.openapi.models.V1Pod;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Subscription;
import static com.jayway.awaitility.Awaitility.await;
import static com.netflix.titus.common.util.ExceptionExt.rethrow;
import static com.netflix.titus.master.integration.v3.scenario.ScenarioBuilderUtil.TIMEOUT_MS;
import static com.netflix.titus.master.integration.v3.scenario.ScenarioBuilderUtil.discoverActiveTest;
import static com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters.toCoreTaskState;
import static com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters.toGrpcTaskState;
import static java.util.Arrays.asList;
/**
*
*/
public class TaskScenarioBuilder {
private static final Logger logger = LoggerFactory.getLogger(TaskScenarioBuilder.class);
private final JobManagementServiceGrpc.JobManagementServiceStub jobClient;
private final EvictionServiceGrpc.EvictionServiceBlockingStub evictionClient;
private final JobScenarioBuilder jobScenarioBuilder;
private final ExtTestSubscriber<Task> eventStreamSubscriber = new ExtTestSubscriber<>();
private final Subscription eventStreamSubscription;
private final EmbeddedKubeCluster kubeCluster;
public TaskScenarioBuilder(EmbeddedTitusOperations titusOperations,
JobScenarioBuilder jobScenarioBuilder,
Observable<Task> eventStream) {
this.jobClient = titusOperations.getV3GrpcClient();
this.kubeCluster = titusOperations.getKubeCluster();
this.evictionClient = titusOperations.getBlockingGrpcEvictionClient();
this.jobScenarioBuilder = jobScenarioBuilder;
this.eventStreamSubscription = eventStream.subscribe(eventStreamSubscriber);
}
void stop() {
eventStreamSubscription.unsubscribe();
}
public JobScenarioBuilder toJob() {
return jobScenarioBuilder;
}
public Task getTask() {
return eventStreamSubscriber.getLatestItem();
}
public TaskScenarioBuilder moveToKillInitiated() {
return internalKill(false, false, false);
}
public TaskScenarioBuilder killTask() {
return internalKill(false, false, true);
}
public TaskScenarioBuilder completeKillInitiated() {
Task task = getTask();
Preconditions.checkState(task.getStatus().getState() == TaskState.KillInitiated, "Expected task in KillInitiated state");
kubeCluster.moveToFinishedSuccess(task.getId());
return this;
}
public TaskScenarioBuilder killTaskAndShrink() {
return internalKill(true, false, true);
}
public TaskScenarioBuilder killTaskAndShrinkWithMinCheck() {
return internalKill(true, true, true);
}
private TaskScenarioBuilder internalKill(boolean shrink, boolean preventMinSizeUpdate, boolean moveToFinished) {
String taskId = getTask().getId();
logger.info("[{}] Killing task: jobId={}, taskId={}, shrink={}, preventMinSizeUpdate={}...", discoverActiveTest(),
jobScenarioBuilder.getJobId(), taskId, shrink, preventMinSizeUpdate);
Stopwatch stopWatch = Stopwatch.createStarted();
TestStreamObserver<Empty> responseObserver = new TestStreamObserver<>();
jobClient.killTask(
TaskKillRequest.newBuilder()
.setTaskId(taskId)
.setShrink(shrink)
.setPreventMinSizeUpdate(preventMinSizeUpdate)
.build(),
responseObserver
);
rethrow(() -> responseObserver.awaitDone(TIMEOUT_MS, TimeUnit.MILLISECONDS));
expectStateUpdates(TaskStatus.TaskState.KillInitiated);
if (moveToFinished) {
kubeCluster.moveToFinishedSuccess(taskId);
}
logger.info("[{}] Task {} killed in {}[ms]", discoverActiveTest(), taskId, stopWatch.elapsed(TimeUnit.MILLISECONDS));
return this;
}
public TaskScenarioBuilder updateTaskAttributes(Map<String, String> attributes) {
String taskId = getTask().getId();
logger.info("[{}] Updating attributes of task {} of job {}...", discoverActiveTest(), taskId, jobScenarioBuilder.getJobId());
Stopwatch stopWatch = Stopwatch.createStarted();
TestStreamObserver<Empty> responseObserver = new TestStreamObserver<>();
jobClient.updateTaskAttributes(TaskAttributesUpdate.newBuilder().setTaskId(taskId).putAllAttributes(attributes).build(), responseObserver);
rethrow(() -> responseObserver.awaitDone(TIMEOUT_MS, TimeUnit.MILLISECONDS));
logger.info("[{}] Task {} updated in {}[ms]", discoverActiveTest(), taskId, stopWatch.elapsed(TimeUnit.MILLISECONDS));
return this;
}
public TaskScenarioBuilder deleteTaskAttributes(List<String> keys) {
String taskId = getTask().getId();
logger.info("[{}] Deleting attributes of task {} of job {}...", discoverActiveTest(), taskId, jobScenarioBuilder.getJobId());
Stopwatch stopWatch = Stopwatch.createStarted();
TestStreamObserver<Empty> responseObserver = new TestStreamObserver<>();
jobClient.deleteTaskAttributes(TaskAttributesDeleteRequest.newBuilder().setTaskId(taskId).addAllKeys(keys).build(), responseObserver);
rethrow(() -> responseObserver.awaitDone(TIMEOUT_MS, TimeUnit.MILLISECONDS));
logger.info("[{}] Task {} updated in {}[ms]", discoverActiveTest(), taskId, stopWatch.elapsed(TimeUnit.MILLISECONDS));
return this;
}
public TaskScenarioBuilder evictTask() {
String taskId = getTask().getId();
logger.info("[{}] Evicting task {} of job {}...", discoverActiveTest(), taskId, jobScenarioBuilder.getJobId());
Stopwatch stopWatch = Stopwatch.createStarted();
evictionClient.terminateTask(TaskTerminateRequest.newBuilder().setTaskId(taskId).setReason("Test").build());
logger.info("[{}] Task {} evicted in {}[ms]", discoverActiveTest(), taskId, stopWatch.elapsed(TimeUnit.MILLISECONDS));
return this;
}
public TaskScenarioBuilder moveTask(String targetJobId) {
String taskId = getTask().getId();
logger.info("[{}] Moving Task {} to another job {}", discoverActiveTest(), taskId, targetJobId);
Stopwatch stopWatch = Stopwatch.createStarted();
TestStreamObserver<Empty> responseObserver = new TestStreamObserver<>();
jobClient.moveTask(TaskMoveRequest.newBuilder()
.setSourceJobId(jobScenarioBuilder.getJobId())
.setTargetJobId(targetJobId)
.setTaskId(taskId)
.build(),
responseObserver);
rethrow(() -> responseObserver.awaitDone(TIMEOUT_MS, TimeUnit.MILLISECONDS));
logger.info("[{}] Task {} moved to job {} in {}[ms]", discoverActiveTest(), taskId, targetJobId, stopWatch.elapsed(TimeUnit.MILLISECONDS));
return this;
}
public TaskScenarioBuilder template(Function<TaskScenarioBuilder, TaskScenarioBuilder> templateFun) {
return templateFun.apply(this);
}
public TaskScenarioBuilder failTaskExecution() {
logger.info("[{}] Transition task to failed state", discoverActiveTest());
kubeCluster.moveToFinishedFailed(getTask().getId(), "Simulated task execution failure");
return this;
}
public TaskScenarioBuilder transitionToFailed() {
logger.info("[{}] Moving task to failed state", discoverActiveTest());
kubeCluster.moveToFinishedFailed(getTask().getId(), "marked as failed");
return this;
}
public TaskScenarioBuilder transitionTo(TaskStatus.TaskState... taskStates) {
logger.info("[{}] Transition task on agent through states {}", discoverActiveTest(), asList(taskStates));
Task task = getTask();
for (TaskStatus.TaskState taskState : taskStates) {
if (taskState == TaskStatus.TaskState.StartInitiated) {
kubeCluster.moveToStartInitiatedState(task.getId());
} else if (taskState == TaskStatus.TaskState.Started) {
kubeCluster.moveToStartedState(task.getId());
} else if (taskState == TaskStatus.TaskState.KillInitiated) {
kubeCluster.moveToKillInitiatedState(task.getId(), 0);
} else if (taskState == TaskStatus.TaskState.Finished) {
kubeCluster.moveToFinishedSuccess(task.getId());
}
}
return this;
}
public TaskScenarioBuilder transitionUntil(TaskStatus.TaskState taskState) {
logger.info("[{}] Transition task on agent to state {}", discoverActiveTest(), taskState);
String taskId = getTask().getId();
TaskState coreTaskState = EmbeddedKubeUtil.getPodState(kubeCluster.getPods().get(taskId));
TaskStatus.TaskState currentState = toGrpcTaskState(coreTaskState);
int startingPoint = currentState.ordinal();
int targetPoint = taskState.ordinal();
for (int next = startingPoint + 1; next <= targetPoint; next++) {
TaskStatus.TaskState nextState = TaskStatus.TaskState.forNumber(next);
if (nextState != TaskStatus.TaskState.KillInitiated) {
if (nextState == TaskStatus.TaskState.StartInitiated) {
kubeCluster.moveToStartInitiatedState(taskId);
} else if (nextState == TaskStatus.TaskState.Started) {
kubeCluster.moveToStartedState(taskId);
} else if (nextState == TaskStatus.TaskState.Finished) {
kubeCluster.moveToFinishedSuccess(taskId);
}
}
}
return this;
}
public TaskScenarioBuilder expectAllTasksInKube() {
Task task = getTask();
logger.info("[{}] Expecting task {} Kube", discoverActiveTest(), task.getId());
try {
await().timeout(TIMEOUT_MS, TimeUnit.MILLISECONDS).until(() -> kubeCluster.getPods().containsKey(task.getId()));
} catch (Exception e) {
throw e;
}
return this;
}
public TaskScenarioBuilder expectTaskOnAgent() {
Task task = getTask();
logger.info("[{}] Expecting task {} on agent", discoverActiveTest(), task.getId());
kubeCluster.schedule();
expectStateAndReasonUpdateSkipOther(TaskStatus.TaskState.Launched, "SCHEDULED");
kubeCluster.moveToStartInitiatedState(task.getId());
return this;
}
public TaskScenarioBuilder expectInstanceType(AwsInstanceType expectedInstanceType) {
logger.info("[{}] Expecting current task to run on instance type {}", discoverActiveTest(), expectedInstanceType);
Task task = getTask();
V1Pod pod = kubeCluster.getPods().get(task.getId());
String nodeId = pod.getSpec().getNodeName();
EmbeddedKubeNode node = kubeCluster.getFleet().getNodes().get(nodeId);
String instanceType = node.getServerGroup().getInstanceType();
Preconditions.checkArgument(
expectedInstanceType.name().equalsIgnoreCase(instanceType),
"Task is expected to run on AWS instance %s, but is running on %s", expectedInstanceType, instanceType
);
return this;
}
public TaskScenarioBuilder expectZoneId(String expectedZoneId) {
logger.info("[{}] Expecting current task to run in zone {}", discoverActiveTest(), expectedZoneId);
String taskId = getTask().getId();
String zone = kubeCluster.getPlacementZone(taskId);
Preconditions.checkArgument(
zone.equalsIgnoreCase(expectedZoneId),
"Task %s is expected to run in zone %s, but is running in %s (node %s)",
taskId, expectedZoneId, zone, kubeCluster.getPods().get(taskId).getSpec().getNodeName()
);
return this;
}
public TaskScenarioBuilder expectTaskContext(String key, String value) {
logger.info("[{}] Expecting current task to have taskContext entry {},{}", discoverActiveTest(), key, value);
Preconditions.checkArgument(
getTask().getTaskContext().getOrDefault(key, "").equals(value),
"Task context does not contain {},{}", key, value
);
return this;
}
public TaskScenarioBuilder expectStaticIpAllocationInPod(String ipAllocationId) {
logger.info("[{}] Expecting pod to have static IP allocation {}", discoverActiveTest(), ipAllocationId);
String actual = kubeCluster.getEmbeddedPods().get(getTask().getId()).getStaticIpAllocation();
Preconditions.checkArgument(
ipAllocationId.equals(actual),
"Different static IP allocations: expected={}, actual={}", ipAllocationId, actual
);
return this;
}
public TaskScenarioBuilder expectStateUpdates(TaskStatus.TaskState... expectedStates) {
logger.info("[{}] Expecting sequence of events with task states {}...", discoverActiveTest(), asList(expectedStates));
Stopwatch stopWatch = Stopwatch.createStarted();
for (TaskStatus.TaskState expectedState : expectedStates) {
TaskState expectedCoreState = toCoreTaskState(expectedState);
expectTaskUpdate(task -> task.getStatus().getState() == expectedCoreState, "Expected state " + expectedCoreState);
}
logger.info("[{}] Expected sequence of events with task states {} received in {}[ms]", discoverActiveTest(), asList(expectedStates), stopWatch.elapsed(TimeUnit.MILLISECONDS));
return this;
}
public TaskScenarioBuilder expectStateAndReasonUpdateSkipOther(TaskStatus.TaskState expectedState, String reasonCode) {
logger.info("[{}] Expecting task state and reason {}/{} (skipping other)...", discoverActiveTest(), expectedState, reasonCode);
Stopwatch stopWatch = Stopwatch.createStarted();
TaskState expectedCoreState = toCoreTaskState(expectedState);
expectTaskUpdate(
task -> task.getStatus().getState().ordinal() <= expectedCoreState.ordinal(),
task -> {
com.netflix.titus.api.jobmanager.model.job.TaskStatus taskStatus = task.getStatus();
return taskStatus.getState() == expectedCoreState && Objects.equals(reasonCode, taskStatus.getReasonCode());
},
"Expected state: " + expectedCoreState
);
logger.info("[{}] Expected task state {} received in {}[ms]", discoverActiveTest(), expectedState, stopWatch.elapsed(TimeUnit.MILLISECONDS));
return this;
}
public TaskScenarioBuilder expectStateUpdateSkipOther(TaskStatus.TaskState expectedState) {
logger.info("[{}] Expecting task state {} (skipping other)...", discoverActiveTest(), expectedState);
Stopwatch stopWatch = Stopwatch.createStarted();
TaskState expectedCoreState = toCoreTaskState(expectedState);
expectTaskUpdate(
task -> task.getStatus().getState().ordinal() < expectedCoreState.ordinal(),
task -> task.getStatus().getState() == expectedCoreState,
"Expected state: " + expectedCoreState
);
logger.info("[{}] Expected task state {} received in {}[ms]", discoverActiveTest(), expectedState, stopWatch.elapsed(TimeUnit.MILLISECONDS));
return this;
}
public TaskScenarioBuilder assertTaskUpdate(Consumer<Task> assertFun) {
Task task = expectTaskUpdate(update -> true, update -> true, "N/A");
assertFun.accept(task);
return this;
}
public TaskScenarioBuilder andThen(Runnable action) {
action.run();
return this;
}
private Task expectTaskUpdate(Predicate<Task> matcher, String message) {
return expectTaskUpdate(t -> false, matcher, message);
}
private Task expectTaskUpdate(Predicate<Task> filter, Predicate<Task> matcher, String message) {
while (true) {
Task task = rethrow(() -> eventStreamSubscriber.takeNext(TIMEOUT_MS, TimeUnit.MILLISECONDS));
Preconditions.checkNotNull(task, "No change notification received while waiting for task update event. %s", message);
if (matcher.test(task)) {
return task;
}
Preconditions.checkState(filter.test(task), "Received task state update with unexpected status value %s. %s", task.getStatus(), message);
}
}
}
| 9,945 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/scenario/ScenarioBuilderUtil.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.scenario;
import org.junit.Test;
import static java.util.Arrays.stream;
/**
*
*/
final class ScenarioBuilderUtil {
static long TIMEOUT_MS = 30_000;
static String discoverActiveTest() {
return stream(Thread.currentThread().getStackTrace())
.filter(seg -> {
try {
return Class.forName(seg.getClassName()).getMethod(seg.getMethodName()).getAnnotation(Test.class) != null;
} catch (Exception ignore) {
return false;
}
})
.findFirst()
.map(StackTraceElement::getMethodName)
.orElse("?");
}
}
| 9,946 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/scenario/ScenarioTemplates.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.scenario;
import java.util.function.Function;
import com.google.common.base.Preconditions;
import com.netflix.titus.api.jobmanager.model.job.JobState;
import com.netflix.titus.grpc.protogen.TaskStatus;
import com.netflix.titus.grpc.protogen.TaskStatus.TaskState;
/**
*
*/
public class ScenarioTemplates {
public static Function<JobScenarioBuilder, JobScenarioBuilder> jobAccepted() {
return jobScenarioBuilder -> jobScenarioBuilder.expectJobUpdateEvent(
job -> job.getStatus().getState() == JobState.Accepted, "Expected state: " + JobState.Accepted
);
}
public static Function<JobScenarioBuilder, JobScenarioBuilder> jobFinished() {
return jobScenarioBuilder -> jobScenarioBuilder.expectJobUpdateEvent(
job -> job.getStatus().getState() == JobState.Finished, "Expected state: " + JobState.Finished
);
}
public static Function<JobScenarioBuilder, JobScenarioBuilder> launchJob() {
return jobScenarioBuilder -> jobScenarioBuilder
.template(jobAccepted())
.expectAllTasksCreated()
.allTasks(taskScenarioBuilder -> taskScenarioBuilder
.expectStateAndReasonUpdateSkipOther(TaskState.Accepted, "podCreated")
)
.schedule()
.allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectStateUpdateSkipOther(TaskStatus.TaskState.Launched));
}
public static Function<JobScenarioBuilder, JobScenarioBuilder> startJob(TaskState taskState) {
Preconditions.checkArgument(taskState.ordinal() < TaskState.KillInitiated.ordinal(), "Invalid target task state: %s", taskState);
return jobScenarioBuilder -> jobScenarioBuilder
.template(jobAccepted())
.allTasks(TaskScenarioBuilder::expectTaskOnAgent)
.allTasks(moveToState(taskState));
}
/**
* KillInitiated state is volatile. To force a task to stay in it forever, set lock = true.
*/
public static Function<JobScenarioBuilder, JobScenarioBuilder> startJobAndMoveTasksToKillInitiated() {
return jobScenarioBuilder -> {
jobScenarioBuilder.template(startJob(TaskState.Started));
jobScenarioBuilder.getKube().allowPodTermination(false);
return jobScenarioBuilder.allTasks(TaskScenarioBuilder::moveToKillInitiated);
};
}
public static Function<JobScenarioBuilder, JobScenarioBuilder> startTasks() {
return jobScenarioBuilder -> jobScenarioBuilder
.schedule()
.allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectStateUpdates(TaskState.Launched))
.allTasks(taskScenarioBuilder -> taskScenarioBuilder.transitionTo(TaskState.StartInitiated, TaskState.Started))
.allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectStateUpdates(TaskState.StartInitiated, TaskState.Started));
}
public static Function<TaskScenarioBuilder, TaskScenarioBuilder> startTask() {
return taskScenarioBuilder -> taskScenarioBuilder
.transitionTo(TaskState.StartInitiated, TaskState.Started)
.expectStateUpdateSkipOther(TaskState.Started);
}
public static Function<TaskScenarioBuilder, TaskScenarioBuilder> startLaunchedTask() {
return taskScenarioBuilder -> taskScenarioBuilder
.transitionTo(TaskState.StartInitiated, TaskState.Started)
.expectStateUpdates(TaskState.StartInitiated, TaskState.Started);
}
public static Function<JobScenarioBuilder, JobScenarioBuilder> killJob() {
return jobScenarioBuilder -> jobScenarioBuilder
.killJob()
.expectJobUpdateEvent(job -> job.getStatus().getState() == JobState.KillInitiated, "Expected state: " + JobState.KillInitiated)
.allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectStateUpdateSkipOther(TaskState.KillInitiated))
.allTasks(TaskScenarioBuilder::completeKillInitiated)
.expectJobUpdateEvent(job -> job.getStatus().getState() == JobState.Finished, "Expected state: " + JobState.Finished)
.expectJobEventStreamCompletes();
}
public static Function<JobScenarioBuilder, JobScenarioBuilder> startTasksInNewJob() {
return jobScenarioBuilder -> jobScenarioBuilder
.template(jobAccepted())
.expectAllTasksCreated()
.allTasks(taskScenarioBuilder -> taskScenarioBuilder
.expectStateAndReasonUpdateSkipOther(TaskState.Accepted, "podCreated")
)
.template(startTasks());
}
public static Function<TaskScenarioBuilder, TaskScenarioBuilder> moveToState(TaskState taskState) {
return taskScenarioBuilder -> taskScenarioBuilder
.transitionUntil(taskState)
.expectStateUpdateSkipOther(taskState);
}
public static Function<TaskScenarioBuilder, TaskScenarioBuilder> completeTask() {
return taskScenarioBuilder -> taskScenarioBuilder
.transitionTo(TaskState.Finished)
.expectStateUpdateSkipOther(TaskState.Finished);
}
public static Function<TaskScenarioBuilder, TaskScenarioBuilder> lockTaskInState(TaskStatus.TaskState lockedState) {
return taskScenarioBuilder -> taskScenarioBuilder
.transitionUntil(lockedState)
.expectStateUpdateSkipOther(lockedState)
.expectStateUpdates(TaskState.KillInitiated);
}
public static Function<TaskScenarioBuilder, TaskScenarioBuilder> terminateAndShrinkV3() {
return taskScenarioBuilder -> taskScenarioBuilder
.killTaskAndShrink()
.completeKillInitiated()
.expectStateUpdateSkipOther(TaskState.Finished);
}
}
| 9,947 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/scenario/JobScenarioBuilder.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.scenario;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import com.google.common.base.Preconditions;
import com.google.common.base.Stopwatch;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Multimap;
import com.google.common.collect.Multimaps;
import com.google.protobuf.Empty;
import com.google.protobuf.UInt32Value;
import com.netflix.titus.api.jobmanager.JobAttributes;
import com.netflix.titus.api.jobmanager.model.job.Capacity;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.disruptionbudget.DisruptionBudget;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.grpc.protogen.JobAttributesDeleteRequest;
import com.netflix.titus.grpc.protogen.JobAttributesUpdate;
import com.netflix.titus.grpc.protogen.JobCapacityUpdate;
import com.netflix.titus.grpc.protogen.JobCapacityUpdateWithOptionalAttributes;
import com.netflix.titus.grpc.protogen.JobCapacityWithOptionalAttributes;
import com.netflix.titus.grpc.protogen.JobChangeNotification;
import com.netflix.titus.grpc.protogen.JobChangeNotification.NotificationCase;
import com.netflix.titus.grpc.protogen.JobDisruptionBudgetUpdate;
import com.netflix.titus.grpc.protogen.JobId;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc;
import com.netflix.titus.grpc.protogen.JobStatusUpdate;
import com.netflix.titus.grpc.protogen.TaskQuery;
import com.netflix.titus.grpc.protogen.TaskQueryResult;
import com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters;
import com.netflix.titus.testkit.embedded.EmbeddedTitusOperations;
import com.netflix.titus.testkit.embedded.kube.EmbeddedKubeCluster;
import com.netflix.titus.testkit.grpc.TestStreamObserver;
import com.netflix.titus.testkit.rx.ExtTestSubscriber;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.kubernetes.client.openapi.models.V1Pod;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Subscription;
import rx.observables.ConnectableObservable;
import rx.subjects.ReplaySubject;
import static com.jayway.awaitility.Awaitility.await;
import static com.netflix.titus.common.util.ExceptionExt.rethrow;
import static com.netflix.titus.master.integration.v3.scenario.ScenarioBuilderUtil.discoverActiveTest;
import static com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters.toCoreJob;
import static com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters.toGrpcCapacity;
import static com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters.toGrpcDisruptionBudget;
import static org.assertj.core.api.Assertions.assertThat;
/**
*
*/
public class JobScenarioBuilder {
private static final Logger logger = LoggerFactory.getLogger(JobScenarioBuilder.class);
private static final long TIMEOUT_MS = 30_000;
private final EmbeddedTitusOperations titusOperations;
private final JobsScenarioBuilder jobsScenarioBuilder;
private final String jobId;
private final JobManagementServiceGrpc.JobManagementServiceStub client;
private final ExtTestSubscriber<Job> jobEventStream = new ExtTestSubscriber<>();
private volatile int nextIndex = 0;
private final Multimap<Integer, String> taskSlotIndexes = Multimaps.synchronizedListMultimap(ArrayListMultimap.create());
private final Map<String, Integer> taskToSlot = new ConcurrentHashMap<>();
private final Map<String, TaskHolder> taskHolders = new ConcurrentHashMap<>();
private final Subscription eventStreamSubscription;
public JobScenarioBuilder(EmbeddedTitusOperations titusOperations, JobsScenarioBuilder jobsScenarioBuilder, String jobId) {
this.client = titusOperations.getV3GrpcClient();
this.titusOperations = titusOperations;
this.jobsScenarioBuilder = jobsScenarioBuilder;
this.jobId = jobId;
// FIXME Job is not made immediately visible after it is accepted by reconciliation framework
rethrow(() -> Thread.sleep(1000));
TestStreamObserver<JobChangeNotification> jobEvents = new TestStreamObserver<>();
ConnectableObservable<JobChangeNotification> connectableEventStream = jobEvents.toObservable()
.doOnNext(event -> logger.info("Received job change notification: {}", event))
.replay();
connectableEventStream.filter(e -> e.getNotificationCase() == NotificationCase.JOBUPDATE)
.map(n -> toCoreJob(n.getJobUpdate().getJob()))
.subscribe(jobEventStream);
connectableEventStream.filter(e -> e.getNotificationCase() == NotificationCase.TASKUPDATE)
.map(event -> event.getTaskUpdate().getTask())
.subscribe(
grpcTask -> {
Task coreTask = GrpcJobManagementModelConverters.toCoreTask(getJob(), grpcTask);
String taskId = coreTask.getId();
TaskHolder taskHolder = taskHolders.get(taskId);
if (taskHolder == null) {
Optional<String> resubmitOfOpt = coreTask.getResubmitOf();
Integer slot = null;
if (resubmitOfOpt.isPresent()) {
String resubmitOf = resubmitOfOpt.get();
slot = taskToSlot.get(resubmitOf);
}
if (slot == null) {
slot = nextIndex++;
}
taskSlotIndexes.put(slot, taskId);
taskToSlot.put(taskId, slot);
taskHolders.put(taskId, taskHolder = new TaskHolder());
}
taskHolder.onNext(coreTask);
},
e -> logger.error("Task event stream in job {} terminated with an error", jobId, e),
() -> logger.info("Task event stream in job {} completed", jobId)
);
Observable<JobChangeNotification> snapshotMarker = connectableEventStream.takeUntil(e -> e.getNotificationCase() == NotificationCase.SNAPSHOTEND);
this.eventStreamSubscription = connectableEventStream.connect();
client.observeJob(JobId.newBuilder().setId(jobId).build(), jobEvents);
snapshotMarker.timeout(TIMEOUT_MS, TimeUnit.MILLISECONDS).toBlocking().last();
}
void stop() {
eventStreamSubscription.unsubscribe();
}
public EmbeddedKubeCluster getKube() {
return titusOperations.getKubeCluster();
}
public JobsScenarioBuilder toJobs() {
return jobsScenarioBuilder;
}
public String getJobId() {
return jobId;
}
public Job getJob() {
await().timeout(TIMEOUT_MS, TimeUnit.MILLISECONDS).until(() -> jobEventStream.getLatestItem() != null);
return Preconditions.checkNotNull(jobEventStream.getLatestItem(), "Job not created yet");
}
public TaskScenarioBuilder getTask(String taskId) {
return taskHolders.get(taskId).getTaskScenarioBuilder();
}
public TaskScenarioBuilder getTaskByIndex(int idx) {
List<TaskHolder> lastTaskHolders = getLastTaskHolders();
Preconditions.checkArgument(lastTaskHolders.size() > idx, "Task with index %s not created yet", idx);
return lastTaskHolders.get(idx).getTaskScenarioBuilder();
}
public TaskScenarioBuilder getTaskInSlot(int slot, int resubmit) {
Collection<String> taskIdsPerSlot = taskSlotIndexes.get(slot);
Preconditions.checkArgument(resubmit < taskIdsPerSlot.size(), "Task with index %s and resubmit=%s not created yet", slot, resubmit);
String taskId = taskIdsPerSlot.stream().skip(resubmit).findFirst().get();
return taskHolders.get(taskId).getTaskScenarioBuilder();
}
public JobScenarioBuilder template(Function<JobScenarioBuilder, JobScenarioBuilder> templateFun) {
return templateFun.apply(this);
}
public JobScenarioBuilder allTasks(Function<TaskScenarioBuilder, TaskScenarioBuilder> taskActions) {
return inTasks(t -> true, taskActions);
}
public JobScenarioBuilder inTasks(Predicate<TaskScenarioBuilder> predicate, Function<TaskScenarioBuilder, TaskScenarioBuilder> taskActions) {
List<TaskHolder> lastTaskHolders = getLastTaskHolders();
lastTaskHolders.forEach(taskHolder -> {
if (predicate.test(taskHolder.getTaskScenarioBuilder())) {
taskActions.apply(taskHolder.getTaskScenarioBuilder());
}
});
return this;
}
public JobScenarioBuilder inTask(int idx, Function<TaskScenarioBuilder, TaskScenarioBuilder> taskActions) {
Preconditions.checkArgument(idx < nextIndex, "No task with id %s in job %s", idx, jobId);
taskActions.apply(getTaskByIndex(idx));
return this;
}
public JobScenarioBuilder inTask(int idx, int resubmit, Function<TaskScenarioBuilder, TaskScenarioBuilder> taskActions) {
Preconditions.checkArgument(idx < nextIndex, "No task with id %s in job %s", idx, jobId);
taskActions.apply(getTaskInSlot(idx, resubmit));
return this;
}
public JobScenarioBuilder updateJobCapacity(Capacity capacity) {
logger.info("[{}] Changing job {} capacity to {}...", discoverActiveTest(), jobId, capacity);
Stopwatch stopWatch = Stopwatch.createStarted();
TestStreamObserver<Empty> responseObserver = new TestStreamObserver<>();
client.updateJobCapacity(
JobCapacityUpdate.newBuilder().setJobId(jobId).setCapacity(toGrpcCapacity(capacity)).build(),
responseObserver
);
rethrow(() -> responseObserver.awaitDone(TIMEOUT_MS, TimeUnit.MILLISECONDS));
expectJobUpdateEvent(job -> {
ServiceJobExt ext = (ServiceJobExt) job.getJobDescriptor().getExtensions();
return ext.getCapacity().equals(capacity);
}, "Job capacity update did not complete in time");
logger.info("[{}] Job {} scaled to new size in {}ms", discoverActiveTest(), jobId, stopWatch.elapsed(TimeUnit.MILLISECONDS));
return this;
}
public JobScenarioBuilder updateJobCapacityDesired(int desired, int unchangedMin, int unchangedMax) {
logger.info("[{}] Changing job {} capacity desired to {}...", discoverActiveTest(), jobId, desired);
Stopwatch stopWatch = Stopwatch.createStarted();
TestStreamObserver<Empty> responseObserver = new TestStreamObserver<>();
client.updateJobCapacityWithOptionalAttributes(
JobCapacityUpdateWithOptionalAttributes.newBuilder().setJobId(jobId)
.setJobCapacityWithOptionalAttributes(JobCapacityWithOptionalAttributes.newBuilder().setDesired(UInt32Value.newBuilder().setValue(desired).build()).build()).build(),
responseObserver);
rethrow(() -> responseObserver.awaitDone(TIMEOUT_MS, TimeUnit.MILLISECONDS));
expectJobUpdateEvent(job -> {
ServiceJobExt ext = (ServiceJobExt) job.getJobDescriptor().getExtensions();
Capacity capacity = ext.getCapacity();
return capacity.getDesired() == desired && capacity.getMin() == unchangedMin && capacity.getMax() == unchangedMax;
}, "Job capacity update did not complete in time");
logger.info("[{}] Job {} scaled to new desired size in {}ms", discoverActiveTest(), jobId, stopWatch.elapsed(TimeUnit.MILLISECONDS));
return this;
}
public JobScenarioBuilder updateJobCapacityDesiredInvalid(int targetDesired, int currentDesired) {
logger.info("[{}] Changing job {} capacity desired to {}...", discoverActiveTest(), jobId, targetDesired);
TestStreamObserver<Empty> responseObserver = new TestStreamObserver<>();
client.updateJobCapacityWithOptionalAttributes(
JobCapacityUpdateWithOptionalAttributes.newBuilder().setJobId(jobId)
.setJobCapacityWithOptionalAttributes(JobCapacityWithOptionalAttributes.newBuilder().setDesired(UInt32Value.newBuilder().setValue(targetDesired).build()).build()).build(),
responseObserver);
await().timeout(TIMEOUT_MS, TimeUnit.MILLISECONDS).until(responseObserver::hasError);
Throwable error = responseObserver.getError();
assertThat(error).isNotNull();
assertThat(error).isInstanceOf(StatusRuntimeException.class);
StatusRuntimeException statusRuntimeException = (StatusRuntimeException) error;
assertThat(statusRuntimeException.getStatus().getCode() == Status.Code.INVALID_ARGUMENT).isTrue();
// Make sure desired count is unchanged
Job job = getJob();
JobDescriptor.JobDescriptorExt ext = job.getJobDescriptor().getExtensions();
int currentCapacity = ext instanceof BatchJobExt ? ((BatchJobExt) ext).getSize() : ((ServiceJobExt) ext).getCapacity().getDesired();
assertThat(currentCapacity).isEqualTo(currentDesired);
return this;
}
public JobScenarioBuilder updateJobCapacityMaxInvalid(int targetMax) {
logger.info("[{}] Changing job {} capacity max to {}...", discoverActiveTest(), jobId, targetMax);
TestStreamObserver<Empty> responseObserver = new TestStreamObserver<>();
client.updateJobCapacityWithOptionalAttributes(
JobCapacityUpdateWithOptionalAttributes.newBuilder().setJobId(jobId)
.setJobCapacityWithOptionalAttributes(JobCapacityWithOptionalAttributes.newBuilder().setMax(UInt32Value.newBuilder().setValue(targetMax).build()).build()).build(),
responseObserver);
await().timeout(TIMEOUT_MS, TimeUnit.MILLISECONDS).until(responseObserver::hasError);
Throwable error = responseObserver.getError();
assertThat(error).isNotNull();
assertThat(error).isInstanceOf(StatusRuntimeException.class);
StatusRuntimeException statusRuntimeException = (StatusRuntimeException) error;
assertThat(statusRuntimeException.getStatus().getCode() == Status.Code.INVALID_ARGUMENT).isTrue();
return this;
}
public JobScenarioBuilder updateJobCapacityMin(int min, int expectedMax, int expectedDesired) {
logger.info("[{}] Changing job {} capacity min to {}...", discoverActiveTest(), jobId, min);
Stopwatch stopWatch = Stopwatch.createStarted();
TestStreamObserver<Empty> responseObserver = new TestStreamObserver<>();
client.updateJobCapacityWithOptionalAttributes(
JobCapacityUpdateWithOptionalAttributes.newBuilder().setJobId(jobId)
.setJobCapacityWithOptionalAttributes(JobCapacityWithOptionalAttributes.newBuilder().setMin(UInt32Value.newBuilder().setValue(min).build()).build()).build(),
responseObserver);
rethrow(() -> responseObserver.awaitDone(TIMEOUT_MS, TimeUnit.MILLISECONDS));
expectJobUpdateEvent(job -> {
ServiceJobExt ext = (ServiceJobExt) job.getJobDescriptor().getExtensions();
Capacity capacity = ext.getCapacity();
return capacity.getMin() == min && capacity.getMax() == expectedMax && capacity.getDesired() == expectedDesired;
}, "Job capacity update did not complete in time");
logger.info("[{}] Job {} scaled to new min size in {}ms", discoverActiveTest(), jobId, stopWatch.elapsed(TimeUnit.MILLISECONDS));
return this;
}
public JobScenarioBuilder updateJobCapacityMax(int max, int expectedMin, int expectedDesired) {
logger.info("[{}] Changing job {} capacity max to {}...", discoverActiveTest(), jobId, max);
Stopwatch stopWatch = Stopwatch.createStarted();
TestStreamObserver<Empty> responseObserver = new TestStreamObserver<>();
client.updateJobCapacityWithOptionalAttributes(
JobCapacityUpdateWithOptionalAttributes.newBuilder().setJobId(jobId)
.setJobCapacityWithOptionalAttributes(JobCapacityWithOptionalAttributes.newBuilder().setMax(UInt32Value.newBuilder().setValue(max).build()).build()).build(),
responseObserver);
rethrow(() -> responseObserver.awaitDone(TIMEOUT_MS, TimeUnit.MILLISECONDS));
expectJobUpdateEvent(job -> {
ServiceJobExt ext = (ServiceJobExt) job.getJobDescriptor().getExtensions();
Capacity capacity = ext.getCapacity();
return capacity.getMax() == max && capacity.getMin() == expectedMin && capacity.getDesired() == expectedDesired;
}, "Job capacity update did not complete in time");
logger.info("[{}] Job {} scaled to new max size in {}ms", discoverActiveTest(), jobId, stopWatch.elapsed(TimeUnit.MILLISECONDS));
return this;
}
public JobScenarioBuilder updateJobStatus(boolean enabled) {
logger.info("[{}] Changing job {} enable status to {}...", discoverActiveTest(), jobId, enabled);
Stopwatch stopWatch = Stopwatch.createStarted();
TestStreamObserver<Empty> responseObserver = new TestStreamObserver<>();
client.updateJobStatus(JobStatusUpdate.newBuilder().setId(jobId).setEnableStatus(enabled).build(), responseObserver);
rethrow(() -> responseObserver.awaitDone(TIMEOUT_MS, TimeUnit.MILLISECONDS));
expectJobUpdateEvent(job -> {
ServiceJobExt ext = (ServiceJobExt) job.getJobDescriptor().getExtensions();
return ext.isEnabled() == enabled;
}, "Job status update did not complete in time");
logger.info("[{}] Changing job {} enable status to {} finished in {}ms", discoverActiveTest(), jobId, enabled, stopWatch.elapsed(TimeUnit.MILLISECONDS));
return this;
}
public JobScenarioBuilder updateJobDisruptionBudget(DisruptionBudget disruptionBudget) {
logger.info("[{}] Changing job {} disruption budget to {}...", discoverActiveTest(), jobId, disruptionBudget);
Stopwatch stopWatch = Stopwatch.createStarted();
TestStreamObserver<Empty> responseObserver = new TestStreamObserver<>();
client.updateJobDisruptionBudget(JobDisruptionBudgetUpdate.newBuilder()
.setJobId(jobId)
.setDisruptionBudget(toGrpcDisruptionBudget(disruptionBudget))
.build(),
responseObserver
);
rethrow(() -> responseObserver.awaitDone(TIMEOUT_MS, TimeUnit.MILLISECONDS));
expectJobUpdateEvent(job -> job.getJobDescriptor().getDisruptionBudget().equals(disruptionBudget), "Job disruption budget update did not complete in time");
logger.info("[{}] Changing job {} disruption budget to {} finished in {}ms", discoverActiveTest(), jobId, disruptionBudget, stopWatch.elapsed(TimeUnit.MILLISECONDS));
return this;
}
public JobScenarioBuilder updateJobAttributes(Map<String, String> attributes) {
logger.info("[{}] Updating job {} attributes with {}", discoverActiveTest(), jobId, attributes);
Stopwatch stopWatch = Stopwatch.createStarted();
TestStreamObserver<Empty> responseObserver = new TestStreamObserver<>();
client.updateJobAttributes(JobAttributesUpdate.newBuilder()
.setJobId(jobId)
.putAllAttributes(attributes)
.build(),
responseObserver
);
rethrow(() -> responseObserver.awaitDone(TIMEOUT_MS, TimeUnit.MILLISECONDS));
expectJobUpdateEvent(job -> {
Map<String, String> updatedAttributes = job.getJobDescriptor().getAttributes();
return updatedAttributes.entrySet().containsAll(attributes.entrySet());
}, "Job attributes update did not complete in time");
logger.info("[{}] Changing job {} attributes with {} finished in {}ms", discoverActiveTest(), jobId, attributes, stopWatch.elapsed(TimeUnit.MILLISECONDS));
return this;
}
public JobScenarioBuilder deleteJobAttributes(List<String> keys) {
logger.info("[{}] Deleting job {} attributes with keys: {}", discoverActiveTest(), jobId, keys);
Stopwatch stopWatch = Stopwatch.createStarted();
TestStreamObserver<Empty> responseObserver = new TestStreamObserver<>();
client.deleteJobAttributes(JobAttributesDeleteRequest.newBuilder()
.setJobId(jobId)
.addAllKeys(keys)
.build(),
responseObserver
);
rethrow(() -> responseObserver.awaitDone(TIMEOUT_MS, TimeUnit.MILLISECONDS));
expectJobUpdateEvent(job -> {
Map<String, String> updatedAttributes = job.getJobDescriptor().getAttributes();
return !updatedAttributes.keySet().containsAll(keys);
}, "Job attributes update did not complete in time");
logger.info("[{}] Changing job {} attributes with keys: {} finished in {}ms", discoverActiveTest(), jobId, keys, stopWatch.elapsed(TimeUnit.MILLISECONDS));
return this;
}
public JobScenarioBuilder killJob() {
logger.info("[{}] Killing job {}...", discoverActiveTest(), jobId);
Stopwatch stopWatch = Stopwatch.createStarted();
TestStreamObserver<Empty> responseObserver = new TestStreamObserver<>();
client.killJob(JobId.newBuilder().setId(jobId).build(), responseObserver);
rethrow(() -> responseObserver.awaitDone(TIMEOUT_MS, TimeUnit.MILLISECONDS));
logger.info("[{}] Job {} killed in {}ms", discoverActiveTest(), jobId, stopWatch.elapsed(TimeUnit.MILLISECONDS));
return this;
}
public JobScenarioBuilder expectJobUpdateEvent(Function<Job, Boolean> condition, String message) {
logger.info("[{}] Expecting job update event with a predicate...", discoverActiveTest());
Stopwatch stopWatch = Stopwatch.createStarted();
Job newJob = rethrow(() -> jobEventStream.takeNext(TIMEOUT_MS, TimeUnit.MILLISECONDS));
Preconditions.checkState(condition.apply(newJob), "Received Job does not match predicate. %s", message);
logger.info("[{}] Expected job update event with a matching predicate received in {}ms", discoverActiveTest(), stopWatch.elapsed(TimeUnit.MILLISECONDS));
return this;
}
public JobScenarioBuilder expectJobEventStreamCompletes() {
Stopwatch stopWatch = Stopwatch.createStarted();
try {
logger.info("[{}] Expect job event stream to complete due to job termination...", discoverActiveTest());
await().timeout(TIMEOUT_MS, TimeUnit.MILLISECONDS).until(eventStreamSubscription::isUnsubscribed);
logger.info("[{}] Job event stream completed after waiting for {}ms", discoverActiveTest(), stopWatch.elapsed(TimeUnit.MILLISECONDS));
return this;
} catch (Throwable e) {
logger.info("[{}] Job event stream failed after waiting for {}ms", discoverActiveTest(), stopWatch.elapsed(TimeUnit.MILLISECONDS), e);
throw e;
}
}
public JobScenarioBuilder expectAllTasksCreated() {
JobDescriptor.JobDescriptorExt ext = getJob().getJobDescriptor().getExtensions();
int size = ext instanceof BatchJobExt ? ((BatchJobExt) ext).getSize() : ((ServiceJobExt) ext).getCapacity().getDesired();
logger.info("[{}] Expecting {} tasks to be active...", discoverActiveTest(), size);
await().timeout(TIMEOUT_MS, TimeUnit.MILLISECONDS).until(() -> getNonFinishedTaskCount() == size);
return this;
}
public JobScenarioBuilder schedule() {
titusOperations.getKubeCluster().schedule();
return this;
}
public JobScenarioBuilder expectJobToScaleDown() {
JobDescriptor.JobDescriptorExt ext = getJob().getJobDescriptor().getExtensions();
Preconditions.checkState(ext instanceof ServiceJobExt, "Not a service job %s", jobId);
int size = ((ServiceJobExt) ext).getCapacity().getDesired();
logger.info("[{}] Expect job {} to scale down to the desired size {}...", discoverActiveTest(), jobId, size);
Stopwatch stopWatch = Stopwatch.createStarted();
await().timeout(TIMEOUT_MS, TimeUnit.MILLISECONDS).until(() -> {
List<TaskHolder> lastTaskHolders = getLastTaskHolders();
return lastTaskHolders.stream().filter(t -> {
TaskState state = t.getTaskScenarioBuilder().getTask().getStatus().getState();
return state != TaskState.KillInitiated;
}).count() <= size;
});
logger.info("[{}] Expected job {} scale down to the desired size {} completed in {}ms", discoverActiveTest(), jobId, size, stopWatch.elapsed(TimeUnit.MILLISECONDS));
return this;
}
public JobScenarioBuilder expectTaskInSlot(int slot, int index) {
logger.info("[{}] Expecting task in slot {} with index {} to exist", discoverActiveTest(), slot, index);
await().timeout(TIMEOUT_MS, TimeUnit.MILLISECONDS).until(() -> {
Collection<String> taskIdsPerSlot = taskSlotIndexes.get(slot);
return index < taskIdsPerSlot.size() && Iterables.get(taskIdsPerSlot, index) != null;
});
return this;
}
public JobScenarioBuilder expectTasksInSlot(int slot, Predicate<List<TaskScenarioBuilder>> predicate) {
logger.info("[{}] Expecting tasks in slot {} to fulfill the predicate requirements", discoverActiveTest(), slot);
await().timeout(TIMEOUT_MS, TimeUnit.MILLISECONDS).until(() -> {
List<TaskScenarioBuilder> taskScenarioBuilders = taskSlotIndexes.get(slot).stream()
.map(taskHolders::get)
.map(TaskHolder::getTaskScenarioBuilder)
.collect(Collectors.toList());
return predicate.test(taskScenarioBuilders);
});
return this;
}
public JobScenarioBuilder expectSome(int count, Predicate<TaskScenarioBuilder> predicate) {
logger.info("[{}] Expecting {} tasks to fulfill the predicate requirements", discoverActiveTest(), count);
await().timeout(TIMEOUT_MS, TimeUnit.MILLISECONDS).until(() -> {
long matching = getLastTaskHolders().stream().filter(t -> predicate.test(t.getTaskScenarioBuilder())).count();
return matching == count;
});
return this;
}
public JobScenarioBuilder inJob(Consumer<Job<?>> consumer) {
consumer.accept(getJob());
return this;
}
public JobScenarioBuilder inStrippedJob(Consumer<Job<?>> consumer) {
Job<?> job = getJob();
// Remove diagnostic data added by the job replicator.
Map<String, String> filteredAttributes = new HashMap<>();
job.getJobDescriptor().getAttributes().forEach((key, value) -> {
if (!key.startsWith("event.propagation")
&& !key.equals(JobAttributes.JOB_ATTRIBUTES_CREATED_BY)
&& !key.equals(JobAttributes.JOB_ATTRIBUTE_ROUTING_CELL)
&& !key.equals(JobAttributes.JOB_ATTRIBUTES_CALL_REASON)) {
filteredAttributes.put(key, value);
}
});
Job strippedJob = job.toBuilder()
.withJobDescriptor(((Job) job).getJobDescriptor().toBuilder()
.withAttributes(filteredAttributes)
.build()
)
.build();
consumer.accept(strippedJob);
return this;
}
public JobScenarioBuilder assertJob(Predicate<Job> jobPredicate) {
if (!jobPredicate.test(getJob())) {
throw new IllegalStateException("Job predicate is false");
}
return this;
}
public JobScenarioBuilder assertTasks(Predicate<List<Task>> tasksPredicate) {
List<TaskHolder> lastTaskHolders = getLastTaskHolders();
List<Task> tasks = lastTaskHolders.stream().map(h -> h.getTaskScenarioBuilder().getTask()).collect(Collectors.toList());
if (!tasksPredicate.test(tasks)) {
throw new IllegalStateException("Tasks predicate is false");
}
return this;
}
public JobScenarioBuilder assertEachTask(Predicate<Task> taskPredicate, String message) {
List<TaskHolder> lastTaskHolders = getLastTaskHolders();
boolean allMatch = lastTaskHolders.stream().allMatch(h -> taskPredicate.test(h.getTaskScenarioBuilder().getTask()));
if (!allMatch) {
throw new IllegalStateException("Task predicate is false for one or more tasks. " + message);
}
return this;
}
public JobScenarioBuilder assertEachPod(Predicate<V1Pod> podPredicate, String message) {
List<TaskHolder> lastTaskHolders = getLastTaskHolders();
boolean allMatch = lastTaskHolders.stream().allMatch(taskHolder -> {
Task task = taskHolder.getTaskScenarioBuilder().getTask();
V1Pod pod = titusOperations.getKubeCluster().getPods().get(task.getId());
return podPredicate.test(pod);
}
);
if (!allMatch) {
throw new IllegalStateException("Pod predicate is false for one or more tasks. " + message);
}
return this;
}
public int getNonFinishedTaskCount() {
List<TaskHolder> lastTaskHolders = getLastTaskHolders();
return (int) lastTaskHolders.stream()
.filter(h -> h.getTaskScenarioBuilder().getTask().getStatus().getState() != TaskState.Finished)
.count();
}
public JobScenarioBuilder findTasks(TaskQuery taskQuery, Predicate<List<com.netflix.titus.grpc.protogen.Task>> tasksPredicate) {
TestStreamObserver<TaskQueryResult> responseObserver = new TestStreamObserver<>();
client.findTasks(taskQuery, responseObserver);
TaskQueryResult result = rethrow(() -> responseObserver.getLast(TIMEOUT_MS, TimeUnit.MILLISECONDS));
if (result == null) {
throw new IllegalStateException("TaskQueryResult is null");
} else if (!tasksPredicate.test(result.getItemsList())) {
throw new IllegalStateException("Tasks predicate is false");
}
return this;
}
public JobScenarioBuilder andThen(Runnable action) {
action.run();
return this;
}
public void expectVersionsOrdered() {
expectJobVersionsOrdered();
}
public void expectJobVersionsOrdered() {
List<Job> revisions = jobEventStream.getAllItems();
Job last = revisions.get(0);
for (int i = 1; i < revisions.size(); i++) {
Job next = revisions.get(i);
assertThat(next.getVersion().getTimestamp()).isGreaterThanOrEqualTo(last.getVersion().getTimestamp());
}
}
private class TaskHolder {
private final ReplaySubject<Task> taskEventStream;
private final TaskScenarioBuilder taskScenarioBuilder;
private TaskHolder() {
this.taskEventStream = ReplaySubject.create();
this.taskScenarioBuilder = new TaskScenarioBuilder(titusOperations, JobScenarioBuilder.this, taskEventStream);
}
private TaskScenarioBuilder getTaskScenarioBuilder() {
return taskScenarioBuilder;
}
private void onNext(Task task) {
taskEventStream.onNext(task);
}
@Override
public String toString() {
Task task = taskScenarioBuilder.getTask();
return "TaskHolder{" +
"taskId=" + task.getId() +
", state=" + task.getStatus().getState() +
"}";
}
}
private List<TaskHolder> getLastTaskHolders() {
return getLastElementPerKey(taskSlotIndexes).stream().map(taskHolders::get).collect(Collectors.toList());
}
private List<String> getLastElementPerKey(Multimap<Integer, String> multimap) {
List<String> elements = new ArrayList<>();
for (Collection<String> collection : multimap.asMap().values()) {
if (!collection.isEmpty()) {
String last = Iterables.getLast(collection);
elements.add(last);
}
}
return elements;
}
}
| 9,948 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/JobTestUtils.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job;
import java.util.Collections;
import java.util.HashSet;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import com.google.rpc.BadRequest;
import com.netflix.titus.runtime.common.grpc.GrpcClientErrorUtils;
import com.netflix.titus.grpc.protogen.JobDescriptor;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc;
import io.grpc.StatusRuntimeException;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.fail;
public class JobTestUtils {
public static void submitBadJob(JobManagementServiceGrpc.JobManagementServiceBlockingStub client,
JobDescriptor badJobDescriptor,
String... expectedFields) {
Set<String> expectedFieldSet = new HashSet<>();
Collections.addAll(expectedFieldSet, expectedFields);
try {
client.createJob(badJobDescriptor).getId();
fail("Expected test to fail");
} catch (StatusRuntimeException e) {
System.out.println("Received StatusRuntimeException: " + e.getMessage());
Optional<BadRequest> badRequestOpt = GrpcClientErrorUtils.getDetail(e, BadRequest.class);
// Print validation messages for visual inspection
badRequestOpt.ifPresent(System.out::println);
Set<String> badFields = badRequestOpt.map(badRequest ->
badRequest.getFieldViolationsList().stream().map(BadRequest.FieldViolation::getField).collect(Collectors.toSet())
).orElse(Collections.emptySet());
assertThat(badFields).containsAll(expectedFieldSet);
assertThat(badFields.size()).isEqualTo(expectedFieldSet.size());
}
}
}
| 9,949 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/CellAssertions.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job;
import com.netflix.titus.grpc.protogen.Job;
import com.netflix.titus.grpc.protogen.JobDescriptor;
import com.netflix.titus.grpc.protogen.Task;
import static org.assertj.core.api.Assertions.assertThat;
public class CellAssertions {
public static void assertCellInfo(JobDescriptor jobDescriptor, String cellName) {
assertThat(jobDescriptor.getAttributesMap()).containsEntry("titus.cell", cellName);
}
public static void assertCellInfo(Job job, String cellName) {
assertCellInfo(job.getJobDescriptor(), cellName);
}
public static void assertCellInfo(Task task, String cellName) {
assertThat(task.getTaskContextMap()).containsEntry("titus.cell", cellName);
}
}
| 9,950 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/other/JobIpAllocationsTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.other;
import java.util.AbstractMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import com.netflix.titus.api.jobmanager.TaskAttributes;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.api.jobmanager.model.job.vpc.SignedIpAddressAllocation;
import com.netflix.titus.grpc.protogen.JobChangeNotification;
import com.netflix.titus.grpc.protogen.JobId;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc;
import com.netflix.titus.grpc.protogen.Page;
import com.netflix.titus.grpc.protogen.TaskQuery;
import com.netflix.titus.grpc.protogen.TaskQueryResult;
import com.netflix.titus.grpc.protogen.TaskStatus;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.master.integration.v3.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.integration.v3.scenario.ScenarioTemplates;
import com.netflix.titus.master.integration.v3.scenario.TaskScenarioBuilder;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import com.netflix.titus.testkit.model.job.JobIpAllocationGenerator;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.RuleChain;
import static com.netflix.titus.master.integration.v3.job.JobTestUtils.submitBadJob;
import static com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters.toGrpcJobDescriptor;
import static com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCells.basicKubeCell;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.batchJobDescriptors;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.serviceJobDescriptors;
import static org.assertj.core.api.Assertions.assertThat;
@Category(IntegrationTest.class)
public class JobIpAllocationsTest extends BaseIntegrationTest {
private static final JobDescriptor<BatchJobExt> ONE_TASK_BATCH_JOB = batchJobDescriptors(batchOfSizeAndIps(1)).getValue();
private static final JobDescriptor<ServiceJobExt> ONE_TASK_SERVICE_JOB = serviceJobDescriptors(serviceOfSizeAndIps(1)).getValue();
private final TitusStackResource titusStackResource = new TitusStackResource(basicKubeCell(3));
private final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder(titusStackResource);
@Rule
public final RuleChain ruleChain = RuleChain.outerRule(titusStackResource).around(jobsScenarioBuilder);
private static JobManagementServiceGrpc.JobManagementServiceBlockingStub client;
@Before
public void setUp() throws Exception {
client = titusStackResource.getMaster().getV3BlockingGrpcClient();
}
/**
* Tests a service job with a single task and IP assignment.
*/
@Test(timeout = 30_000)
public void testServiceIpAllocationConstraint() {
jobsScenarioBuilder.schedule(ONE_TASK_SERVICE_JOB, jobScenarioBuilder ->
jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
// Expect tasks to have been assigned
.allTasks(taskScenarioBuilder -> {
String ipAllocationIdFromJob = getIpAllocationIdFromJob(0, ONE_TASK_SERVICE_JOB);
return taskScenarioBuilder
.expectStaticIpAllocationInPod(ipAllocationIdFromJob)
.expectTaskContext(TaskAttributes.TASK_ATTRIBUTES_IP_ALLOCATION_ID, ipAllocationIdFromJob);
})
// Expect tasks to be assigned to correct AZ
.allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectZoneId(getZoneFromJobIpAllocation(0, ONE_TASK_SERVICE_JOB))));
}
/**
* Tests a batch job with a single task and IP assignment.
*/
@Test(timeout = 30_000)
public void testBatchIpAllocationConstraint() {
jobsScenarioBuilder.schedule(ONE_TASK_BATCH_JOB, jobScenarioBuilder ->
jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
// Expect tasks to have been assigned
.allTasks(taskScenarioBuilder -> {
String ipAllocationIdFromJob = getIpAllocationIdFromJob(0, ONE_TASK_BATCH_JOB);
return taskScenarioBuilder
.expectStaticIpAllocationInPod(ipAllocationIdFromJob)
.expectTaskContext(TaskAttributes.TASK_ATTRIBUTES_IP_ALLOCATION_ID, ipAllocationIdFromJob);
})
// Expect tasks to be assigned to correct AZ
.allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectZoneId(getZoneFromJobIpAllocation(0, ONE_TASK_BATCH_JOB))));
}
/**
* Tests a service job with multiple IP assignments.
*/
@Test(timeout = 30_000)
public void testMultiTaskIpAllocations() {
JobDescriptor<ServiceJobExt> serviceJobExtJobDescriptor = serviceJobDescriptors(serviceOfSizeAndIps(2)).getValue();
jobsScenarioBuilder.schedule(serviceJobExtJobDescriptor, jobScenarioBuilder ->
jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.expectSome(1, taskScenarioBuilder -> {
return taskScenarioBuilder.getTask().getTaskContext().getOrDefault(TaskAttributes.TASK_ATTRIBUTES_IP_ALLOCATION_ID, "").equals(getIpAllocationIdFromJob(0, serviceJobExtJobDescriptor));
})
.expectSome(1, taskScenarioBuilder -> taskScenarioBuilder.getTask().getTaskContext().getOrDefault(TaskAttributes.TASK_ATTRIBUTES_IP_ALLOCATION_ID, "").equals(getIpAllocationIdFromJob(1, serviceJobExtJobDescriptor)))
);
}
/**
* Tests a new replacement task retains the same IP allocation as the previous task.
*/
@Test(timeout = 30_000)
public void testReplacementTaskIpAllocation() {
jobsScenarioBuilder.schedule(ONE_TASK_SERVICE_JOB, jobScenarioBuilder ->
jobScenarioBuilder
// Start the initial task
.template(ScenarioTemplates.startTasksInNewJob())
.allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectTaskContext(TaskAttributes.TASK_ATTRIBUTES_IP_ALLOCATION_ID, getIpAllocationIdFromJob(0, ONE_TASK_SERVICE_JOB)))
.allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectZoneId(getZoneFromJobIpAllocation(0, ONE_TASK_SERVICE_JOB)))
// Finish the initial task and make sure it is replaced
.inTask(0, taskScenarioBuilder -> taskScenarioBuilder.transitionUntil(TaskStatus.TaskState.Finished))
.inTask(0, taskScenarioBuilder -> taskScenarioBuilder.expectStateUpdateSkipOther(TaskStatus.TaskState.Finished))
.expectAllTasksCreated()
.allTasks(TaskScenarioBuilder::expectTaskOnAgent)
.assertTasks(task -> task.get(0).getResubmitNumber() == 1)
.inTask(0, taskScenarioBuilder -> taskScenarioBuilder.template(ScenarioTemplates.startTask()))
// Make sure replacement has correct attribute and placement
.allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectTaskContext(TaskAttributes.TASK_ATTRIBUTES_IP_ALLOCATION_ID, getIpAllocationIdFromJob(0, ONE_TASK_SERVICE_JOB)))
.allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectZoneId(getZoneFromJobIpAllocation(0, ONE_TASK_SERVICE_JOB)))
);
}
/**
* Tests a job waiting for an in use IP allocation has updated task context fields.
*/
@Test(timeout = 30_000)
@Ignore // TODO Read static IP allocation status from pod message field, and add it to the task context.
public void testWaitingTaskContext() throws Exception {
JobDescriptor<ServiceJobExt> firstIpJobDescriptor = ONE_TASK_SERVICE_JOB;
JobDescriptor<ServiceJobExt> secondIpJobDescriptor = firstIpJobDescriptor.but(j -> j.getJobGroupInfo().toBuilder().withSequence("v001"));
// Schedule the first task and ensure it's in the correct zone with the correct task context
jobsScenarioBuilder.schedule(firstIpJobDescriptor, jobScenarioBuilder ->
jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectTaskContext(TaskAttributes.TASK_ATTRIBUTES_IP_ALLOCATION_ID, getIpAllocationIdFromJob(0, firstIpJobDescriptor)))
.allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectZoneId(getZoneFromJobIpAllocation(0, firstIpJobDescriptor))));
String firstJobId = jobsScenarioBuilder.takeJob(0).getJobId();
// Schedule the second task and ensure it's blocked on the first task
jobsScenarioBuilder.schedule(secondIpJobDescriptor, jobScenarioBuilder ->
jobScenarioBuilder
.template(ScenarioTemplates.jobAccepted())
.expectAllTasksCreated()
.allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectStateUpdates(TaskStatus.TaskState.Accepted)));
String secondJobId = jobsScenarioBuilder.takeJob(1).getJobId();
// Query the gRPC endpoint and ensure the first task does not have a waiting task context field.
TaskQueryResult firstTaskQueryResult = client.findTasks(TaskQuery.newBuilder()
.setPage(Page.newBuilder().setPageSize(100).build())
.putFilteringCriteria("jobIds", firstJobId)
.build());
assertThat(firstTaskQueryResult.getItemsCount()).isEqualTo(1);
firstTaskQueryResult.getItemsList().forEach(task -> {
assertThat(task.getTaskContextMap()).doesNotContainKeys(TaskAttributes.TASK_ATTRIBUTES_IN_USE_IP_ALLOCATION);
});
String firstTaskId = firstTaskQueryResult.getItems(0).getId();
// Query the gRPC endpoint and ensure the second task has a waiting task context field.
TaskQueryResult secondTaskQueryResult = client.findTasks(TaskQuery.newBuilder()
.setPage(Page.newBuilder().setPageSize(100).build())
.putFilteringCriteria("jobIds", secondJobId)
.build());
assertThat(secondTaskQueryResult.getItemsCount()).isEqualTo(1);
secondTaskQueryResult.getItemsList().forEach(task -> {
assertThat(task.getTaskContextMap()).contains(new AbstractMap.SimpleImmutableEntry<>(TaskAttributes.TASK_ATTRIBUTES_IN_USE_IP_ALLOCATION, firstTaskId));
});
// Observe the second job and ensure the streamed task has a waiting task context field.
boolean verified = false;
Iterator<JobChangeNotification> it = client.observeJob(JobId.newBuilder().setId(secondJobId).build());
while (it.hasNext()) {
JobChangeNotification jobChangeNotification = it.next();
if (jobChangeNotification.hasTaskUpdate()) {
Map<String, String> taskContext = jobChangeNotification.getTaskUpdate().getTask().getTaskContextMap();
assertThat(taskContext).contains(new AbstractMap.SimpleImmutableEntry<>(TaskAttributes.TASK_ATTRIBUTES_IN_USE_IP_ALLOCATION, firstTaskId));
verified = true;
} else if (jobChangeNotification.hasSnapshotEnd()) {
break;
}
}
assertThat(verified).isTrue();
}
/**
* Tests a service job with max greater than IP allocations cannot be created.
*/
@Test(timeout = 30_000)
public void testServiceJobInstanceValidation() {
JobDescriptor<ServiceJobExt> invalidJobDescriptor = ONE_TASK_SERVICE_JOB
.but(j -> j.getExtensions().toBuilder().withCapacity(
j.getExtensions().getCapacity().toBuilder().withMax(2).build()).build());
submitBadJob(client,
toGrpcJobDescriptor(invalidJobDescriptor),
"container.containerResources.signedIpAllocations");
}
/**
* Tests a batch job with size greater than IP allocations cannot be created.
*/
@Test(timeout = 30_000)
public void testBatchJobInstanceValidation() {
JobDescriptor<BatchJobExt> invalidJobDescriptor = ONE_TASK_BATCH_JOB
.but(j -> j.getExtensions().toBuilder().withSize(2).build());
submitBadJob(client,
toGrpcJobDescriptor(invalidJobDescriptor),
"container.containerResources.signedIpAllocations");
}
/**
* Tests a service job update with max greater than IP allocations cannot be applied.
*/
@Test(timeout = 30_000)
public void testServiceJobUpdateValidation() {
jobsScenarioBuilder.schedule(ONE_TASK_SERVICE_JOB, jobScenarioBuilder ->
// Schedule job with 1 IP allocation
jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectTaskContext(TaskAttributes.TASK_ATTRIBUTES_IP_ALLOCATION_ID, getIpAllocationIdFromJob(0, ONE_TASK_SERVICE_JOB)))
.allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectZoneId(getZoneFromJobIpAllocation(0, ONE_TASK_SERVICE_JOB)))
// Try and update max capacity to 2 and make sure it is rejected.
.updateJobCapacityMaxInvalid(2)
);
}
private static Function<JobDescriptor<BatchJobExt>, JobDescriptor<BatchJobExt>> batchOfSizeAndIps(int size) {
List<SignedIpAddressAllocation> ipAllocations = JobIpAllocationGenerator.jobIpAllocations(size).toList();
return jd -> JobFunctions.changeBatchJobSize(jd, size)
.but(d -> JobFunctions.jobWithIpAllocations(d, ipAllocations));
}
private static Function<JobDescriptor<ServiceJobExt>, JobDescriptor<ServiceJobExt>> serviceOfSizeAndIps(int size) {
List<SignedIpAddressAllocation> ipAllocations = JobIpAllocationGenerator.jobIpAllocations(size).toList();
return jd -> JobFunctions.changeServiceJobCapacity(jd, size)
.but(d -> JobFunctions.jobWithIpAllocations(d, ipAllocations));
}
private static String getIpAllocationIdFromJob(int idx, JobDescriptor<?> jobDescriptor) {
return jobDescriptor.getContainer().getContainerResources().getSignedIpAddressAllocations().get(idx).getIpAddressAllocation().getAllocationId();
}
private static String getZoneFromJobIpAllocation(int idx, JobDescriptor<?> jobDescriptor) {
return jobDescriptor.getContainer().getContainerResources().getSignedIpAddressAllocations().get(idx).getIpAddressAllocation().getIpAddressLocation().getAvailabilityZone();
}
}
| 9,951 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/other/JobLoadSheddingTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.other;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc.JobManagementServiceBlockingStub;
import com.netflix.titus.grpc.protogen.Page;
import com.netflix.titus.grpc.protogen.TaskQuery;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.testkit.embedded.kube.EmbeddedKubeClusters;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusMasterResource;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static com.netflix.titus.master.endpoint.MasterEndpointModule.GRPC_ADMISSION_CONTROLLER_CONFIGURATION_PREFIX;
import static com.netflix.titus.testkit.embedded.cell.master.EmbeddedTitusMasters.basicMasterWithKubeIntegration;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.fail;
@Category(IntegrationTest.class)
public class JobLoadSheddingTest extends BaseIntegrationTest {
@Rule
public final TitusMasterResource titusMasterResource = new TitusMasterResource(
basicMasterWithKubeIntegration(EmbeddedKubeClusters.basicCluster(2))
.toBuilder()
.withProperty(GRPC_ADMISSION_CONTROLLER_CONFIGURATION_PREFIX + ".default.order", "1")
.withProperty(GRPC_ADMISSION_CONTROLLER_CONFIGURATION_PREFIX + ".default.sharedByCallers", "true")
.withProperty(GRPC_ADMISSION_CONTROLLER_CONFIGURATION_PREFIX + ".default.callerPattern", ".*")
.withProperty(GRPC_ADMISSION_CONTROLLER_CONFIGURATION_PREFIX + ".default.endpointPattern", ".*")
.withProperty(GRPC_ADMISSION_CONTROLLER_CONFIGURATION_PREFIX + ".default.capacity", "1")
.withProperty(GRPC_ADMISSION_CONTROLLER_CONFIGURATION_PREFIX + ".default.refillRateInSec", "1")
.build()
);
private JobManagementServiceBlockingStub client;
@Before
public void setUp() throws Exception {
client = titusMasterResource.getMaster().getV3BlockingGrpcClient();
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testMasterRateLimits() {
int counter = 0;
try {
// Assuming execution time is < 1sec we should fail quickly.
for (; counter < 100; counter++) {
client.findTasks(TaskQuery.newBuilder().setPage(Page.newBuilder().setPageSize(1).build()).build());
}
fail("Expected rate limit error");
} catch (StatusRuntimeException e) {
assertThat(e.getStatus().getCode()).isEqualTo(Status.Code.RESOURCE_EXHAUSTED);
}
// We expect at least one request to succeed.
assertThat(counter).isGreaterThan(0);
}
}
| 9,952 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/other/JobEbsVolumesTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.other;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.function.Predicate;
import com.netflix.titus.api.jobmanager.TaskAttributes;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.ebs.EbsVolume;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.api.jobmanager.model.job.vpc.SignedIpAddressAllocation;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc;
import com.netflix.titus.grpc.protogen.TaskStatus;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.master.integration.v3.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.integration.v3.scenario.ScenarioTemplates;
import com.netflix.titus.master.integration.v3.scenario.TaskScenarioBuilder;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import com.netflix.titus.testkit.model.job.JobEbsVolumeGenerator;
import com.netflix.titus.testkit.model.job.JobIpAllocationGenerator;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.RuleChain;
import static com.netflix.titus.master.integration.v3.job.JobTestUtils.submitBadJob;
import static com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters.toGrpcJobDescriptor;
import static com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCells.basicKubeCellWithCustomZones;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.batchJobDescriptors;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.serviceJobDescriptors;
@Category(IntegrationTest.class)
public class JobEbsVolumesTest extends BaseIntegrationTest {
private static final JobDescriptor<BatchJobExt> ONE_TASK_BATCH_JOB = batchJobDescriptors(batchOfSizeAndEbsVolumes(1)).getValue();
private static final JobDescriptor<ServiceJobExt> ONE_TASK_SERVICE_JOB = serviceJobDescriptors(serviceOfSizeAndEbsVolumes(1)).getValue();
private final TitusStackResource titusStackResource = new TitusStackResource(basicKubeCellWithCustomZones(2, "us-east-1a"));
private final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder(titusStackResource);
@Rule
public final RuleChain ruleChain = RuleChain.outerRule(titusStackResource).around(jobsScenarioBuilder);
private static JobManagementServiceGrpc.JobManagementServiceBlockingStub client;
@Before
public void setUp() throws Exception {
client = titusStackResource.getMaster().getV3BlockingGrpcClient();
}
/**
* Tests a service job with a single task and EBS volume.
*/
@Test(timeout = 30_000)
public void testServiceEbsVolumeConstraint() {
jobsScenarioBuilder.schedule(ONE_TASK_SERVICE_JOB, jobScenarioBuilder ->
jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
// Expect tasks to have been assigned a volume
.allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectTaskContext(
TaskAttributes.TASK_ATTRIBUTES_EBS_VOLUME_ID,
ONE_TASK_SERVICE_JOB.getContainer().getContainerResources().getEbsVolumes().get(0).getVolumeId())));
}
/**
* Tests a batch job with a single task and EBS volume.
*/
@Test(timeout = 30_000)
public void testBatchEbsVolumeConstraint() {
jobsScenarioBuilder.schedule(ONE_TASK_BATCH_JOB, jobScenarioBuilder ->
jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
// Expect tasks to have been assigned a volume
.allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectTaskContext(
TaskAttributes.TASK_ATTRIBUTES_EBS_VOLUME_ID,
ONE_TASK_BATCH_JOB.getContainer().getContainerResources().getEbsVolumes().get(0).getVolumeId())));
}
/**
* Tests a service job with multiple tasks and EBS volumes.
*/
@Test(timeout = 30_000)
public void testMultiVolumeJob() {
JobDescriptor<ServiceJobExt> serviceJobDescriptor = serviceJobDescriptors(serviceOfSizeAndEbsVolumes(4)).getValue();
List<EbsVolume> ebsVolumes = serviceJobDescriptor.getContainer().getContainerResources().getEbsVolumes();
jobsScenarioBuilder.schedule(serviceJobDescriptor, jobScenarioBuilder ->
jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.expectSome(1, taskScenarioBuilder -> taskScenarioBuilder.getTask().getTaskContext().getOrDefault(TaskAttributes.TASK_ATTRIBUTES_EBS_VOLUME_ID, "").equals(ebsVolumes.get(0).getVolumeId()))
.expectSome(1, taskScenarioBuilder -> taskScenarioBuilder.getTask().getTaskContext().getOrDefault(TaskAttributes.TASK_ATTRIBUTES_EBS_VOLUME_ID, "").equals(ebsVolumes.get(1).getVolumeId()))
.expectSome(1, taskScenarioBuilder -> taskScenarioBuilder.getTask().getTaskContext().getOrDefault(TaskAttributes.TASK_ATTRIBUTES_EBS_VOLUME_ID, "").equals(ebsVolumes.get(2).getVolumeId()))
.expectSome(1, taskScenarioBuilder -> taskScenarioBuilder.getTask().getTaskContext().getOrDefault(TaskAttributes.TASK_ATTRIBUTES_EBS_VOLUME_ID, "").equals(ebsVolumes.get(3).getVolumeId()))
);
}
/**
* Tests a new replacement task retains the same EBS volume as the previous task.
*/
@Test(timeout = 30_000)
public void testReplacementTaskEbsVolume() {
JobDescriptor<ServiceJobExt> serviceJobDescriptor = serviceJobDescriptors(serviceOfSizeAndEbsVolumes(3)).getValue();
List<EbsVolume> ebsVolumes = serviceJobDescriptor.getContainer().getContainerResources().getEbsVolumes();
jobsScenarioBuilder.schedule(serviceJobDescriptor, jobScenarioBuilder ->
jobScenarioBuilder
// Start the initial tasks
.template(ScenarioTemplates.startTasksInNewJob())
.expectSome(1, taskScenarioBuilder -> taskScenarioBuilder.getTask().getTaskContext().getOrDefault(TaskAttributes.TASK_ATTRIBUTES_EBS_VOLUME_ID, "").equals(ebsVolumes.get(0).getVolumeId()))
.expectSome(1, taskScenarioBuilder -> taskScenarioBuilder.getTask().getTaskContext().getOrDefault(TaskAttributes.TASK_ATTRIBUTES_EBS_VOLUME_ID, "").equals(ebsVolumes.get(1).getVolumeId()))
.expectSome(1, taskScenarioBuilder -> taskScenarioBuilder.getTask().getTaskContext().getOrDefault(TaskAttributes.TASK_ATTRIBUTES_EBS_VOLUME_ID, "").equals(ebsVolumes.get(2).getVolumeId()))
// Finish the initial tasks and make sure they are replaced
.inTask(0, taskScenarioBuilder -> taskScenarioBuilder.transitionUntil(TaskStatus.TaskState.Finished))
.inTask(0, taskScenarioBuilder -> taskScenarioBuilder.expectStateUpdateSkipOther(TaskStatus.TaskState.Finished))
.inTask(1, taskScenarioBuilder -> taskScenarioBuilder.transitionUntil(TaskStatus.TaskState.Finished))
.inTask(1, taskScenarioBuilder -> taskScenarioBuilder.expectStateUpdateSkipOther(TaskStatus.TaskState.Finished))
.inTask(2, taskScenarioBuilder -> taskScenarioBuilder.transitionUntil(TaskStatus.TaskState.Finished))
.inTask(2, taskScenarioBuilder -> taskScenarioBuilder.expectStateUpdateSkipOther(TaskStatus.TaskState.Finished))
.expectAllTasksCreated()
.allTasks(TaskScenarioBuilder::expectTaskOnAgent)
.assertTasks(task -> task.get(0).getResubmitNumber() == 1)
.assertTasks(task -> task.get(1).getResubmitNumber() == 1)
.assertTasks(task -> task.get(2).getResubmitNumber() == 1)
.inTask(0, taskScenarioBuilder -> taskScenarioBuilder.template(ScenarioTemplates.startTask()))
.inTask(1, taskScenarioBuilder -> taskScenarioBuilder.template(ScenarioTemplates.startTask()))
.inTask(2, taskScenarioBuilder -> taskScenarioBuilder.template(ScenarioTemplates.startTask()))
// Make sure replacements have the same volume as its resubmitOf
.expectTasksInSlot(0, sameEbsVolumePredicate())
.expectTasksInSlot(1, sameEbsVolumePredicate())
.expectTasksInSlot(2, sameEbsVolumePredicate())
);
}
/**
* Tests a batch job with size greater than EBS volumes cannot be created.
*/
@Test(timeout = 30_000)
public void testBatchJobInstanceValidation() {
JobDescriptor<BatchJobExt> invalidJobDescriptor = ONE_TASK_BATCH_JOB
.but(j -> j.getExtensions().toBuilder().withSize(2).build());
submitBadJob(client,
toGrpcJobDescriptor(invalidJobDescriptor),
"container.containerResources.ebsVolumes");
}
/**
* Tests a service job with max greater than EBS volumes cannot be created.
*/
@Test(timeout = 30_000)
public void testServiceJobInstanceValidation() {
JobDescriptor<ServiceJobExt> invalidJobDescriptor = ONE_TASK_SERVICE_JOB
.but(j -> j.getExtensions().toBuilder().withCapacity(
j.getExtensions().getCapacity().toBuilder().withMax(2).build()).build());
submitBadJob(client,
toGrpcJobDescriptor(invalidJobDescriptor),
"container.containerResources.ebsVolumes");
}
/**
* Tests a service job update with max greater than EBS volumes cannot be applied.
*/
@Test(timeout = 30_000)
public void testServiceJobUpdateValidation() throws Exception {
jobsScenarioBuilder.schedule(ONE_TASK_SERVICE_JOB, jobScenarioBuilder ->
// Schedule job with 1 EBS volume
jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectTaskContext(
TaskAttributes.TASK_ATTRIBUTES_EBS_VOLUME_ID,
ONE_TASK_SERVICE_JOB.getContainer().getContainerResources().getEbsVolumes().get(0).getVolumeId())
)
// Try and update max capacity to 2 and make sure it is rejected.
.updateJobCapacityMaxInvalid(2)
);
}
/**
* Tests that a job with EBS volumes and Static IPs are paired with resources in the expected order.
*/
@Test(timeout = 30_000)
public void testServiceJobWithStaticIps() {
int size = 4;
JobDescriptor<ServiceJobExt> serviceJobDescriptor = serviceJobDescriptors(serviceOfSizeAndEbsVolumes(size)).getValue();
List<SignedIpAddressAllocation> ipAllocations = JobIpAllocationGenerator.jobIpAllocations(size).toList();
JobDescriptor<ServiceJobExt> serviceJobDescriptorWithIps = JobFunctions.jobWithIpAllocations(serviceJobDescriptor, ipAllocations);
List<EbsVolume> ebsVolumes = serviceJobDescriptorWithIps.getContainer().getContainerResources().getEbsVolumes();
jobsScenarioBuilder.schedule(serviceJobDescriptorWithIps, jobScenarioBuilder ->
jobScenarioBuilder
.expectAllTasksCreated()
.expectSome(1, taskScenarioBuilder -> matchingEbsAndIpIndex(taskScenarioBuilder.getTask(), ebsVolumes, ipAllocations, 0))
.expectSome(1, taskScenarioBuilder -> matchingEbsAndIpIndex(taskScenarioBuilder.getTask(), ebsVolumes, ipAllocations, 1))
.expectSome(1, taskScenarioBuilder -> matchingEbsAndIpIndex(taskScenarioBuilder.getTask(), ebsVolumes, ipAllocations, 2))
.expectSome(1, taskScenarioBuilder -> matchingEbsAndIpIndex(taskScenarioBuilder.getTask(), ebsVolumes, ipAllocations, 3))
);
}
private static Function<JobDescriptor<BatchJobExt>, JobDescriptor<BatchJobExt>> batchOfSizeAndEbsVolumes(int size) {
List<EbsVolume> ebsVolumes = JobEbsVolumeGenerator.jobEbsVolumes(size).toList();
Map<String, String> ebsVolumeAttributes = JobEbsVolumeGenerator.jobEbsVolumesToAttributes(ebsVolumes);
return jd -> JobFunctions.changeBatchJobSize(jd, size)
.but(jdb -> JobFunctions.jobWithEbsVolumes(jdb, ebsVolumes, ebsVolumeAttributes));
}
private static Function<JobDescriptor<ServiceJobExt>, JobDescriptor<ServiceJobExt>> serviceOfSizeAndEbsVolumes(int size) {
List<EbsVolume> ebsVolumes = JobEbsVolumeGenerator.jobEbsVolumes(size).toList();
Map<String, String> ebsVolumeAttributes = JobEbsVolumeGenerator.jobEbsVolumesToAttributes(ebsVolumes);
return jd -> JobFunctions.changeServiceJobCapacity(jd, size)
.but(jdb -> JobFunctions.jobWithEbsVolumes(jdb, ebsVolumes, ebsVolumeAttributes));
}
private static Predicate<List<TaskScenarioBuilder>> sameEbsVolumePredicate() {
return taskScenarioBuilders -> taskScenarioBuilders.stream()
.map(taskScenarioBuilder -> taskScenarioBuilder.getTask().getTaskContext().get(TaskAttributes.TASK_ATTRIBUTES_EBS_VOLUME_ID))
.distinct()
.count() == 1;
}
private boolean matchingEbsAndIpIndex(Task task, List<EbsVolume> ebsVolumes, List<SignedIpAddressAllocation> signedIpAddressAllocations, int index) {
return task.getTaskContext().getOrDefault(TaskAttributes.TASK_ATTRIBUTES_EBS_VOLUME_ID, "").equals(ebsVolumes.get(index).getVolumeId())
&& task.getTaskContext().getOrDefault(TaskAttributes.TASK_ATTRIBUTES_IP_ALLOCATION_ID, "").equals(signedIpAddressAllocations.get(index).getIpAddressAllocation().getAllocationId());
}
}
| 9,953 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/other/JobScalingTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.other;
import com.netflix.titus.api.jobmanager.model.job.Capacity;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobGroupInfo;
import com.netflix.titus.api.jobmanager.model.job.JobModel;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.grpc.protogen.TaskStatus;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.master.integration.v3.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.integration.v3.scenario.ScenarioTemplates;
import com.netflix.titus.master.integration.v3.scenario.TaskScenarioBuilder;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import org.junit.After;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.RuleChain;
import static com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCells.basicKubeCell;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskServiceJobDescriptor;
import static org.assertj.core.api.Assertions.assertThat;
@Category(IntegrationTest.class)
public class JobScalingTest extends BaseIntegrationTest {
private final TitusStackResource titusStackResource = new TitusStackResource(basicKubeCell(2));
private final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder(titusStackResource);
@Rule
public final RuleChain ruleChain = RuleChain.outerRule(titusStackResource).around(jobsScenarioBuilder);
@After
public void tearDown() throws Exception {
jobsScenarioBuilder.expectVersionsOrdered();
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testScaleUpAndDownServiceJob() throws Exception {
jobsScenarioBuilder.schedule(newJob("testScaleUpAndDownServiceJob"), jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.updateJobCapacity(JobModel.newCapacity().withMin(0).withDesired(2).withMax(5).build())
.expectAllTasksCreated()
.updateJobCapacity(JobModel.newCapacity().withMin(0).withDesired(1).withMax(5).build())
.expectJobToScaleDown()
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testScaleUpAndDownServiceJobDesired() throws Exception {
jobsScenarioBuilder.schedule(newJob("testScaleUpAndDownServiceJob"), jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.updateJobCapacity(JobModel.newCapacity().withMin(0).withDesired(2).withMax(5).build())
.expectAllTasksCreated()
.updateJobCapacityDesired(4, 0, 5)
.expectJobToScaleDown()
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testScaleUpAndDownServiceJobMin() throws Exception {
jobsScenarioBuilder.schedule(newJob("testScaleUpAndDownServiceJob"), jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.updateJobCapacity(JobModel.newCapacity().withMin(0).withDesired(2).withMax(5).build())
.expectAllTasksCreated()
.updateJobCapacityMin(2, 5, 2)
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testScaleUpAndDownServiceJobMinAdjustsDesired() throws Exception {
jobsScenarioBuilder.schedule(newJob("testScaleUpAndDownServiceJob"), jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.updateJobCapacity(JobModel.newCapacity().withMin(0).withDesired(1).withMax(5).build())
.expectAllTasksCreated()
.updateJobCapacityMin(2, 5, 2)
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testScaleUpAndDownServiceJobMax() throws Exception {
jobsScenarioBuilder.schedule(newJob("testScaleUpAndDownServiceJob"), jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.updateJobCapacity(JobModel.newCapacity().withMin(0).withDesired(2).withMax(5).build())
.expectAllTasksCreated()
.updateJobCapacityMax(4, 0, 2)
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testScaleUpAndDownServiceJobMaxAdjustsDesired() throws Exception {
jobsScenarioBuilder.schedule(newJob("testScaleUpAndDownServiceJob"), jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.updateJobCapacity(JobModel.newCapacity().withMin(0).withDesired(4).withMax(5).build())
.expectAllTasksCreated()
.updateJobCapacityMax(2, 0, 2)
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testScaleUpAndDownServiceJobDesiredInvalid() throws Exception {
jobsScenarioBuilder.schedule(newJob("testScaleUpAndDownServiceJob"), jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.updateJobCapacity(JobModel.newCapacity().withMin(0).withDesired(2).withMax(5).build())
.expectAllTasksCreated()
.updateJobCapacityDesiredInvalid(6, 2)
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testTerminateAndShrink() throws Exception {
jobsScenarioBuilder.schedule(newJob("testTerminateAndShrink"), jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.updateJobCapacity(JobModel.newCapacity().withMin(2).withDesired(2).withMax(5).build())
.expectAllTasksCreated()
.inTask(0, taskScenarioBuilder -> taskScenarioBuilder
.killTaskAndShrink()
.expectStateUpdateSkipOther(TaskStatus.TaskState.Finished)
)
.expectJobUpdateEvent(job -> {
Capacity capacity = ((Job<ServiceJobExt>) (Job<?>) job).getJobDescriptor().getExtensions().getCapacity();
return capacity.getMin() == 1 && capacity.getDesired() == 1;
}, "Expected job to scale down to one instance")
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testTerminateAndShrinkNotAllowedIfDesiredToLowAndCheckEnabled() {
try {
jobsScenarioBuilder.schedule(newJob("testTerminateAndShrinkNotAllowed"), jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.updateJobCapacity(JobModel.newCapacity().withMin(2).withDesired(2).withMax(5).build())
.expectAllTasksCreated()
.inTask(0, TaskScenarioBuilder::killTaskAndShrinkWithMinCheck)
);
} catch (Exception e) {
StatusRuntimeException cause = (StatusRuntimeException) e.getCause();
assertThat(cause.getStatus().getCode()).isEqualTo(Status.Code.FAILED_PRECONDITION);
assertThat(cause.getMessage()).contains("Terminate and shrink would make desired job size go below the configured minimum");
}
}
private JobDescriptor<ServiceJobExt> newJob(String detail) {
return oneTaskServiceJobDescriptor().toBuilder()
.withApplicationName(TitusStackResource.V3_ENGINE_APP_PREFIX)
.withJobGroupInfo(JobGroupInfo.newBuilder().withDetail(detail).build())
.build();
}
}
| 9,954 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/other/JobFederationTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.other;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import com.netflix.titus.api.jobmanager.model.job.ContainerResources;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.grpc.protogen.Job;
import com.netflix.titus.grpc.protogen.JobChangeNotification;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc;
import com.netflix.titus.grpc.protogen.ObserveJobsQuery;
import com.netflix.titus.grpc.protogen.SchedulerServiceGrpc;
import com.netflix.titus.grpc.protogen.SchedulingResultEvent;
import com.netflix.titus.grpc.protogen.SchedulingResultRequest;
import com.netflix.titus.grpc.protogen.Task;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.testkit.embedded.federation.EmbeddedTitusFederation;
import com.netflix.titus.testkit.grpc.TestStreamObserver;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static com.jayway.awaitility.Awaitility.await;
import static com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters.toGrpcJobDescriptor;
import static com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCells.basicKubeCell;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskBatchJobDescriptor;
import static org.assertj.core.api.Assertions.assertThat;
@Category(IntegrationTest.class)
public class JobFederationTest extends BaseIntegrationTest {
private static final Logger logger = LoggerFactory.getLogger(JobFederationTest.class);
private final String federatedStackName = UUID.randomUUID().toString();
@Rule
public final TitusStackResource titusStackResource = new TitusStackResource(
EmbeddedTitusFederation.aDefaultTitusFederation()
.withProperty("titus.federation.stack", federatedStackName)
.withCell("a.*", basicKubeCell("defaultCell", 2))
.withCell("b.*", basicKubeCell("v3OnlyCell", 2))
.build()
);
private JobManagementServiceGrpc.JobManagementServiceBlockingStub blockingJobClient;
private SchedulerServiceGrpc.SchedulerServiceBlockingStub blockingSchedulerClient;
private TestStreamObserver<JobChangeNotification> eventStreamObserver;
@Before
public void setUp() throws Exception {
this.blockingJobClient = titusStackResource.getOperations().getV3BlockingGrpcClient();
this.blockingSchedulerClient = titusStackResource.getOperations().getV3BlockingSchedulerClient();
this.eventStreamObserver = new TestStreamObserver<>();
JobManagementServiceGrpc.JobManagementServiceStub asyncClient = titusStackResource.getOperations().getV3GrpcClient();
asyncClient.observeJobs(ObserveJobsQuery.newBuilder().build(), eventStreamObserver);
}
@Test(timeout = LONG_TEST_TIMEOUT_MS)
public void testJobCreateRouting() {
Map<String, Job> jobs = new ConcurrentHashMap<>();
Map<String, Task> tasks = new ConcurrentHashMap<>();
eventStreamObserver.toObservable().subscribe(
event -> {
switch (event.getNotificationCase()) {
case JOBUPDATE:
Job job = event.getJobUpdate().getJob();
jobs.put(job.getId(), job);
break;
case TASKUPDATE:
Task task = event.getTaskUpdate().getTask();
tasks.put(task.getJobId(), task);
break;
}
}
);
String cell1JobId = blockingJobClient.createJob(toGrpcJobDescriptor(oneTaskBatchJobDescriptor().toBuilder().withCapacityGroup("a123").build())).getId();
String cell2JobId = blockingJobClient.createJob(toGrpcJobDescriptor(oneTaskBatchJobDescriptor().toBuilder().withCapacityGroup("b123").build())).getId();
await().timeout(5, TimeUnit.SECONDS).until(() -> tasks.containsKey(cell1JobId));
await().timeout(5, TimeUnit.SECONDS).until(() -> tasks.containsKey(cell2JobId));
Map<String, String> cell1JobAttributes = jobs.get(cell1JobId).getJobDescriptor().getAttributesMap();
assertThat(cell1JobAttributes).containsEntry("titus.stack", federatedStackName);
assertThat(cell1JobAttributes).containsEntry("titus.cell", "defaultCell");
Map<String, String> cell2JobAttributes = jobs.get(cell2JobId).getJobDescriptor().getAttributesMap();
assertThat(cell2JobAttributes).containsEntry("titus.stack", federatedStackName);
assertThat(cell2JobAttributes).containsEntry("titus.cell", "v3OnlyCell");
Map<String, String> cell1TaskContext = tasks.get(cell1JobId).getTaskContextMap();
assertThat(cell1TaskContext).containsEntry("titus.stack", federatedStackName);
assertThat(cell1TaskContext).containsEntry("titus.cell", "defaultCell");
Map<String, String> cell2TaskContext = tasks.get(cell2JobId).getTaskContextMap();
assertThat(cell2TaskContext).containsEntry("titus.stack", federatedStackName);
assertThat(cell2TaskContext).containsEntry("titus.cell", "v3OnlyCell");
}
@Test(timeout = LONG_TEST_TIMEOUT_MS)
public void testLastSchedulingResult() {
// Ask for a lot of
JobDescriptor<BatchJobExt> tooBigJob = oneTaskBatchJobDescriptor().but(jd -> jd.getContainer().toBuilder()
.withContainerResources(ContainerResources.newBuilder()
.withCpu(64)
.withGpu(0)
.withMemoryMB(472_000)
.withDiskMB(999_000)
.withNetworkMbps(40_000)
.build()
)
.build()
);
blockingJobClient.createJob(toGrpcJobDescriptor(tooBigJob.toBuilder().withCapacityGroup("a123").build())).getId();
blockingJobClient.createJob(toGrpcJobDescriptor(tooBigJob.toBuilder().withCapacityGroup("b123").build())).getId();
List<Task> tasks = eventStreamObserver.toObservable()
.filter(event -> event.getNotificationCase() == JobChangeNotification.NotificationCase.TASKUPDATE)
.map(e -> e.getTaskUpdate().getTask())
.take(2)
.toList()
.timeout(5, TimeUnit.SECONDS)
.toBlocking()
.first();
checkSchedulingResult(tasks.get(0));
checkSchedulingResult(tasks.get(1));
}
private void checkSchedulingResult(Task task) {
await().timeout(30, TimeUnit.SECONDS).until(() -> {
try {
SchedulingResultEvent result = blockingSchedulerClient.getSchedulingResult(SchedulingResultRequest.newBuilder()
.setTaskId(task.getId())
.build()
);
logger.info("Received scheduling result: {}", result);
if (result.getFailures().getFailuresList().size() >= 1) {
return true;
}
} catch (Exception e) {
logger.warn("Scheduling result query error", e);
}
return false;
});
}
}
| 9,955 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/other/JobRetryTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.other;
import com.netflix.titus.api.jobmanager.model.job.Capacity;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobModel;
import com.netflix.titus.api.jobmanager.model.job.ServiceJobProcesses;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.grpc.protogen.TaskStatus.TaskState;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.master.integration.v3.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.integration.v3.scenario.ScenarioTemplates;
import com.netflix.titus.master.integration.v3.scenario.TaskScenarioBuilder;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import org.junit.After;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.RuleChain;
import static com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCells.basicKubeCell;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskBatchJobDescriptor;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskServiceJobDescriptor;
@Category(IntegrationTest.class)
public class JobRetryTest extends BaseIntegrationTest {
private static final JobDescriptor<BatchJobExt> ONE_TASK_BATCH_JOB = oneTaskBatchJobDescriptor().toBuilder()
.withApplicationName(TitusStackResource.V3_ENGINE_APP_PREFIX)
.withExtensions(BatchJobExt.newBuilder()
.withSize(1)
.withRuntimeLimitMs(600000)
.withRetryPolicy(JobModel.newImmediateRetryPolicy().withRetries(1).build())
.build()
)
.build();
private static final JobDescriptor<ServiceJobExt> ONE_TASK_SERVICE_JOB = oneTaskServiceJobDescriptor().toBuilder()
.withApplicationName(TitusStackResource.V3_ENGINE_APP_PREFIX)
.withExtensions(ServiceJobExt.newBuilder()
.withCapacity(Capacity.newBuilder().withMin(0).withDesired(1).withMax(2).build())
.withRetryPolicy(JobModel.newImmediateRetryPolicy().withRetries(1).build())
.withServiceJobProcesses(ServiceJobProcesses.newBuilder().build())
.build()
)
.build();
private final TitusStackResource titusStackResource = new TitusStackResource(basicKubeCell(5));
private final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder(titusStackResource);
@Rule
public final RuleChain ruleChain = RuleChain.outerRule(titusStackResource).around(jobsScenarioBuilder);
@After
public void tearDown() throws Exception {
jobsScenarioBuilder.expectVersionsOrdered();
}
@Test(timeout = LONG_TEST_TIMEOUT_MS)
public void testBatchJobRetry() {
JobDescriptor<BatchJobExt> jobDescriptor = ONE_TASK_BATCH_JOB.toBuilder().withApplicationName("testBatchJobRetry").build();
jobsScenarioBuilder.schedule(jobDescriptor, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.inTask(0, TaskScenarioBuilder::failTaskExecution)
.inTask(0, taskScenarioBuilder -> taskScenarioBuilder.expectStateUpdateSkipOther(TaskState.Finished))
.expectAllTasksCreated()
.allTasks(TaskScenarioBuilder::expectTaskOnAgent)
.assertTasks(task -> task.get(0).getResubmitNumber() == 1)
.inTask(0, taskScenarioBuilder -> taskScenarioBuilder.template(ScenarioTemplates.startTask()))
.inTask(0, taskScenarioBuilder -> taskScenarioBuilder.template(ScenarioTemplates.completeTask()))
.expectJobEventStreamCompletes()
);
}
@Test(timeout = LONG_TEST_TIMEOUT_MS)
public void testServiceJobRetry() {
JobDescriptor<ServiceJobExt> jobDescriptor = ONE_TASK_SERVICE_JOB.toBuilder().withApplicationName("testServiceJobRetry").build();
jobsScenarioBuilder.schedule(jobDescriptor, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.inTask(0, taskScenarioBuilder -> taskScenarioBuilder.transitionUntil(TaskState.Finished))
.inTask(0, taskScenarioBuilder -> taskScenarioBuilder.expectStateUpdateSkipOther(TaskState.Finished))
.expectAllTasksCreated()
.allTasks(TaskScenarioBuilder::expectTaskOnAgent)
.assertTasks(task -> task.get(0).getResubmitNumber() == 1)
.inTask(0, taskScenarioBuilder -> taskScenarioBuilder.template(ScenarioTemplates.startTask()))
.template(ScenarioTemplates.killJob())
.expectJobEventStreamCompletes()
);
}
@Test(timeout = LONG_TEST_TIMEOUT_MS)
public void testBatchJobFailsAfterRetrying() {
JobDescriptor<BatchJobExt> jobDescriptor = ONE_TASK_BATCH_JOB.toBuilder().withApplicationName("testBatchJobFailsAfterRetrying").build();
jobsScenarioBuilder.schedule(jobDescriptor, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.inTask(0, TaskScenarioBuilder::failTaskExecution)
.inTask(0, taskScenarioBuilder -> taskScenarioBuilder.expectStateUpdateSkipOther(TaskState.Finished))
.expectAllTasksCreated()
.allTasks(TaskScenarioBuilder::expectTaskOnAgent)
.inTask(0, TaskScenarioBuilder::failTaskExecution)
.expectJobEventStreamCompletes()
);
}
@Test(timeout = LONG_TEST_TIMEOUT_MS)
public void testServiceJobFailsAfterRetrying() {
JobDescriptor<ServiceJobExt> jobDescriptor = ONE_TASK_SERVICE_JOB.toBuilder().withApplicationName("testServiceJobFailsAfterRetrying").build();
jobsScenarioBuilder.schedule(jobDescriptor, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.inTask(0, TaskScenarioBuilder::failTaskExecution)
.inTask(0, taskScenarioBuilder -> taskScenarioBuilder.expectStateUpdateSkipOther(TaskState.Finished))
.expectAllTasksCreated()
.allTasks(TaskScenarioBuilder::expectTaskOnAgent)
.inTask(0, TaskScenarioBuilder::failTaskExecution)
.inTask(0, taskScenarioBuilder -> taskScenarioBuilder.expectStateUpdateSkipOther(TaskState.Finished))
.expectAllTasksCreated() // Service job retries forever
.template(ScenarioTemplates.killJob())
.expectJobEventStreamCompletes()
);
}
}
| 9,956 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/basic/JobSubmitAndControlNegativeTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.basic;
import java.util.Iterator;
import java.util.regex.Pattern;
import com.netflix.titus.api.jobmanager.model.job.sanitizer.JobConfiguration;
import com.netflix.titus.api.model.ResourceDimension;
import com.netflix.titus.common.util.Evaluators;
import com.netflix.titus.grpc.protogen.BatchJobSpec;
import com.netflix.titus.grpc.protogen.Capacity;
import com.netflix.titus.grpc.protogen.Constraints;
import com.netflix.titus.grpc.protogen.ContainerResources;
import com.netflix.titus.grpc.protogen.Image;
import com.netflix.titus.grpc.protogen.JobChangeNotification;
import com.netflix.titus.grpc.protogen.JobDescriptor;
import com.netflix.titus.grpc.protogen.JobId;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc.JobManagementServiceBlockingStub;
import com.netflix.titus.grpc.protogen.Owner;
import com.netflix.titus.grpc.protogen.RetryPolicy;
import com.netflix.titus.grpc.protogen.SecurityProfile;
import com.netflix.titus.grpc.protogen.ServiceJobSpec;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import com.netflix.titus.testkit.model.job.JobDescriptorGenerator;
import io.grpc.StatusRuntimeException;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static com.netflix.titus.master.integration.v3.job.JobTestUtils.submitBadJob;
import static com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters.toGrpcJobDescriptor;
import static com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCells.basicKubeCell;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.batchJobDescriptors;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.serviceJobDescriptors;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.fail;
/**
*
*/
@Category(IntegrationTest.class)
public class JobSubmitAndControlNegativeTest extends BaseIntegrationTest {
private final JobDescriptor.Builder BATCH_JOB_DESCR_BUILDER = toGrpcJobDescriptor(batchJobDescriptors().getValue()).toBuilder();
private final BatchJobSpec.Builder BATCH_JOB_SPEC_BUILDER = BATCH_JOB_DESCR_BUILDER.getBatch().toBuilder();
private final JobDescriptor.Builder SERVICE_JOB_DESCR_BUILDER = toGrpcJobDescriptor(serviceJobDescriptors().getValue()).toBuilder();
private final ServiceJobSpec.Builder SERVICE_JOB_SPEC_BUILDER = SERVICE_JOB_DESCR_BUILDER.getService().toBuilder();
@ClassRule
public static final TitusStackResource titusStackResource = new TitusStackResource(basicKubeCell(2));
private static JobManagementServiceBlockingStub client;
@BeforeClass
public static void setUp() throws Exception {
client = titusStackResource.getGateway().getV3BlockingGrpcClient();
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testJobWithNoOwner() {
submitBadJob(
client,
BATCH_JOB_DESCR_BUILDER.setOwner(Owner.getDefaultInstance()).build(),
"owner.teamEmail"
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testJobWithNoApplicationName() {
submitBadJob(client, BATCH_JOB_DESCR_BUILDER.setApplicationName("").build(), "applicationName");
submitBadJob(client, BATCH_JOB_DESCR_BUILDER.setApplicationName(" ").build(), "applicationName");
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testJobWithInvalidComputeResources() {
ContainerResources badContainer = ContainerResources.newBuilder()
.setGpu(-1)
.build();
submitBadJob(
client,
BATCH_JOB_DESCR_BUILDER.setContainer(BATCH_JOB_DESCR_BUILDER.getContainer().toBuilder().setResources(badContainer)).build(),
"container.containerResources.gpu"
);
}
/**
* TODO GPU is not limited today. We should add GPU to {@link ResourceDimension} model.
*/
@Test(timeout = TEST_TIMEOUT_MS)
public void testJobWithExcessiveComputeResources() {
ContainerResources badContainer = ContainerResources.newBuilder()
.setCpu(100)
.setGpu(100)
.setMemoryMB(1000_000_000)
.setDiskMB(1000_000_000)
.setNetworkMbps(10_000_000)
.build();
submitBadJob(
client,
BATCH_JOB_DESCR_BUILDER.setContainer(BATCH_JOB_DESCR_BUILDER.getContainer().toBuilder().setResources(badContainer)).build(),
"container.containerResources.cpu",
"container.containerResources.gpu",
"container.containerResources.memoryMB",
"container.containerResources.networkMbps",
"container.containerResources.diskMB"
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testJobWithInvalidEfsMounts() {
ContainerResources badEfs = ContainerResources.newBuilder()
.addEfsMounts(ContainerResources.EfsMount.getDefaultInstance())
.build();
submitBadJob(
client,
BATCH_JOB_DESCR_BUILDER.setContainer(BATCH_JOB_DESCR_BUILDER.getContainer().toBuilder().setResources(badEfs)).build(),
"container.containerResources.efsMounts[0].efsId",
"container.containerResources.efsMounts[0].mountPoint"
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testJobWithBadSecurityProfile() {
SecurityProfile securityProfile = SecurityProfile.newBuilder()
.addSecurityGroups("not-good-security-group")
.setIamRole("A B")
.build();
submitBadJob(
client,
BATCH_JOB_DESCR_BUILDER.setContainer(BATCH_JOB_DESCR_BUILDER.getContainer().toBuilder().setSecurityProfile(securityProfile)).build(),
"container.securityProfile.securityGroups", "container.securityProfile.iamRole"
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testJobWithoutImage() {
submitBadJob(
client,
BATCH_JOB_DESCR_BUILDER.setContainer(BATCH_JOB_DESCR_BUILDER.getContainer().toBuilder().setImage(Image.getDefaultInstance())).build(),
"container.image.name",
"container.image.noValidImageDigestOrTag"
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testJobWithInvalidNameAndTag() {
Image badImage = Image.newBuilder().setName("????????").setTag("############").build();
submitBadJob(
client,
BATCH_JOB_DESCR_BUILDER.setContainer(BATCH_JOB_DESCR_BUILDER.getContainer().toBuilder().setImage(badImage)).build(),
"container.image.name",
"container.image.noValidImageDigestOrTag"
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testInvalidSoftAndHardConstraints() {
submitBadJob(
client,
BATCH_JOB_DESCR_BUILDER.setContainer(BATCH_JOB_DESCR_BUILDER.getContainer().toBuilder()
.setSoftConstraints(Constraints.newBuilder().putConstraints("badSoftConstraint", "").build())
.setHardConstraints(Constraints.newBuilder().putConstraints("badHardConstraint", "").build())
).build(),
"container.hardConstraints",
"container.softConstraints"
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testOverlappingSoftAndHardConstraints() {
submitBadJob(
client,
BATCH_JOB_DESCR_BUILDER.setContainer(BATCH_JOB_DESCR_BUILDER.getContainer().toBuilder()
.setSoftConstraints(Constraints.newBuilder().putConstraints("UniqueHost", "true").build())
.setHardConstraints(Constraints.newBuilder().putConstraints("UniqueHost", "true").build())
).build(),
"container"
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testBatchJobWithInvalidSize() {
submitBadJob(
client,
BATCH_JOB_DESCR_BUILDER.setBatch(BATCH_JOB_SPEC_BUILDER.setSize(-5)).build(),
"extensions.size"
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testTooLargeBatchJob() {
submitBadJob(
client,
BATCH_JOB_DESCR_BUILDER.setBatch(BATCH_JOB_SPEC_BUILDER.setSize(5000)).build(),
"extensions.size"
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testBatchJobWithTooLowRuntimeLimit() {
submitBadJob(
client,
BATCH_JOB_DESCR_BUILDER.setBatch(BATCH_JOB_SPEC_BUILDER.setRuntimeLimitSec(4)).build(),
"extensions.runtimeLimitMs"
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testTooLargeBatchJobRuntimeLimit() {
submitBadJob(
client,
BATCH_JOB_DESCR_BUILDER.setBatch(BATCH_JOB_SPEC_BUILDER.setRuntimeLimitSec(2 * JobConfiguration.MAX_RUNTIME_LIMIT_SEC)).build(),
"extensions.runtimeLimitMs"
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testServiceJobInvalidCapacity() {
Capacity badCapacity = Capacity.newBuilder().setMin(-2).setDesired(-3).setMax(-4).build();
submitBadJob(
client,
SERVICE_JOB_DESCR_BUILDER.setService(SERVICE_JOB_SPEC_BUILDER.setCapacity(badCapacity).build()).build(),
"extensions.capacity",
"extensions.capacity.desired",
"extensions.capacity.max",
"extensions.capacity.min"
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testTooLargeServiceJob() {
Capacity badCapacity = Capacity.newBuilder().setMin(1).setDesired(100).setMax(10_001).build();
submitBadJob(
client,
SERVICE_JOB_DESCR_BUILDER.setService(SERVICE_JOB_SPEC_BUILDER.setCapacity(badCapacity)).build(),
"extensions.capacity"
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testJobWithInvalidImmediateRetryPolicy() {
RetryPolicy badRetryPolicy = RetryPolicy.newBuilder().setImmediate(
RetryPolicy.Immediate.newBuilder().setRetries(-1)
).build();
submitBadJob(
client,
BATCH_JOB_DESCR_BUILDER.setBatch(BATCH_JOB_SPEC_BUILDER.setRetryPolicy(badRetryPolicy)).build(),
"extensions.retryPolicy.retries"
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testJobWithInvalidDelayedRetryPolicy() {
RetryPolicy badRetryPolicy = RetryPolicy.newBuilder().setDelayed(
RetryPolicy.Delayed.newBuilder().setRetries(-1).setDelayMs(-1)
).build();
submitBadJob(
client,
BATCH_JOB_DESCR_BUILDER.setBatch(BATCH_JOB_SPEC_BUILDER.setRetryPolicy(badRetryPolicy)).build(),
"extensions.retryPolicy.retries",
"extensions.retryPolicy.delayMs"
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testJobWithInvalidExpBackoffRetryPolicy() {
RetryPolicy badRetryPolicy = RetryPolicy.newBuilder().setExponentialBackOff(
RetryPolicy.ExponentialBackOff.newBuilder().setRetries(-1).setInitialDelayMs(-1).setMaxDelayIntervalMs(-1)
).build();
submitBadJob(
client,
BATCH_JOB_DESCR_BUILDER.setBatch(BATCH_JOB_SPEC_BUILDER.setRetryPolicy(badRetryPolicy)).build(),
"extensions.retryPolicy.retries",
"extensions.retryPolicy.initialDelayMs",
"extensions.retryPolicy.maxDelayMs"
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testSubmitJobsWithIdenticalJobGroupIdentity() {
JobDescriptor jobDescriptor = toGrpcJobDescriptor(JobDescriptorGenerator.oneTaskServiceJobDescriptor()
.but(jd -> jd.toBuilder().withApplicationName("v3App").build())
);
try {
String jobId = client.createJob(jobDescriptor).getId();
Iterator<JobChangeNotification> it = client.observeJob(JobId.newBuilder().setId(jobId).build());
// Make sure notifications are sent. For that we need to consume snapshot (job + marker), and actual event from reconciler.
Evaluators.times(3, it::next);
client.createJob(jobDescriptor).getId();
fail("Expected test to fail");
} catch (StatusRuntimeException e) {
assertThat(e.getMessage()).containsPattern(Pattern.compile("job with group sequence.*exists"));
}
}
}
| 9,957 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/basic/JobDisruptionBudgetTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.basic;
import java.util.Collections;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.disruptionbudget.DisruptionBudget;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.master.integration.v3.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.integration.v3.scenario.ScenarioTemplates;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import org.junit.After;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.RuleChain;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.changeDisruptionBudget;
import static com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCells.basicKubeCell;
import static com.netflix.titus.testkit.model.eviction.DisruptionBudgetGenerator.budget;
import static com.netflix.titus.testkit.model.eviction.DisruptionBudgetGenerator.numberOfHealthyPolicy;
import static com.netflix.titus.testkit.model.eviction.DisruptionBudgetGenerator.officeHourTimeWindow;
import static com.netflix.titus.testkit.model.eviction.DisruptionBudgetGenerator.percentageOfHealthyPolicy;
import static com.netflix.titus.testkit.model.eviction.DisruptionBudgetGenerator.unlimitedRate;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskServiceJobDescriptor;
@Category(IntegrationTest.class)
public class JobDisruptionBudgetTest extends BaseIntegrationTest {
private static final DisruptionBudget NUMBER_OF_HEALTHY = budget(
numberOfHealthyPolicy(10), unlimitedRate(), Collections.singletonList(officeHourTimeWindow())
);
private static final DisruptionBudget PERCENTAGE = budget(
percentageOfHealthyPolicy(80), unlimitedRate(), Collections.singletonList(officeHourTimeWindow())
);
private final TitusStackResource titusStackResource = new TitusStackResource(basicKubeCell(2));
private final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder(titusStackResource);
@Rule
public final RuleChain ruleChain = RuleChain.outerRule(titusStackResource).around(jobsScenarioBuilder);
@After
public void tearDown() throws Exception {
jobsScenarioBuilder.expectVersionsOrdered();
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testDisruptionBudgetUpdate() {
JobDescriptor<ServiceJobExt> jobWithSelfManaged = changeDisruptionBudget(oneTaskServiceJobDescriptor(), NUMBER_OF_HEALTHY);
jobsScenarioBuilder.schedule(jobWithSelfManaged, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.jobAccepted())
.updateJobDisruptionBudget(PERCENTAGE)
);
}
}
| 9,958 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/basic/JobSanitizeTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.basic;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.titus.api.jobmanager.model.job.Image;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.common.model.admission.AdmissionSanitizer;
import com.netflix.titus.common.model.sanitizer.ValidationError;
import com.netflix.titus.grpc.protogen.Job;
import com.netflix.titus.grpc.protogen.JobId;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.runtime.connector.registry.RegistryClient;
import com.netflix.titus.runtime.connector.registry.TitusRegistryException;
import com.netflix.titus.runtime.endpoint.admission.JobImageSanitizer;
import com.netflix.titus.runtime.endpoint.admission.JobImageSanitizerConfiguration;
import com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCell;
import com.netflix.titus.testkit.embedded.cell.master.EmbeddedTitusMasters;
import com.netflix.titus.testkit.embedded.kube.EmbeddedKubeCluster;
import com.netflix.titus.testkit.embedded.kube.EmbeddedKubeClusters;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.http.HttpStatus;
import reactor.core.publisher.Mono;
import static com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters.toGrpcJobDescriptor;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.batchJobDescriptors;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@Category(IntegrationTest.class)
public class JobSanitizeTest extends BaseIntegrationTest {
private static final Logger logger = LoggerFactory.getLogger(JobSanitizeTest.class);
private static final String repo = "myRepo";
private static final String tag = "myTag";
private static final String digest = "sha256:f9f5bb506406b80454a4255b33ed2e4383b9e4a32fb94d6f7e51922704e818fa";
private static final String missingImageErrorMsg = "does not exist in registry";
private final JobImageSanitizerConfiguration configuration = mock(JobImageSanitizerConfiguration.class);
private final RegistryClient registryClient = mock(RegistryClient.class);
@Rule
public final TitusStackResource titusStackResource = getTitusStackResource(
new JobImageSanitizer(configuration, registryClient, new DefaultRegistry())
);
private JobManagementServiceGrpc.JobManagementServiceBlockingStub client;
@Before
public void setUp() throws Exception {
when(configuration.isEnabled()).thenReturn(true);
when(configuration.getJobImageValidationTimeoutMs()).thenReturn(1000L);
when(configuration.getErrorType()).thenReturn(ValidationError.Type.HARD.name());
this.client = titusStackResource.getGateway().getV3BlockingGrpcClient();
}
/**
* Verifies that a digest value is properly added to a job descriptor that is using tag.
*/
@Test(timeout = TEST_TIMEOUT_MS)
public void testJobDigestResolution() {
when(registryClient.getImageDigest(anyString(), anyString())).thenReturn(Mono.just(digest));
com.netflix.titus.grpc.protogen.JobDescriptor jobDescriptor =
toGrpcJobDescriptor(batchJobDescriptors()
.map(jd -> jd.but(d -> d.getContainer().toBuilder()
.withImage(Image.newBuilder()
.withName(repo)
.withTag(tag)
.build())
))
.getValue());
String jobId = client.createJob(jobDescriptor).getId();
Job resultJobDescriptor = client.findJob(JobId.newBuilder().setId(jobId).build());
assertEquals(digest, resultJobDescriptor.getJobDescriptor().getContainer().getImage().getDigest());
}
/**
* Verifies that a NOT_FOUND image produces an invalid argument exception.
*/
@Test(timeout = TEST_TIMEOUT_MS)
public void testNonexistentTag() {
when(registryClient.getImageDigest(anyString(), anyString()))
.thenReturn(Mono.error(TitusRegistryException.imageNotFound(repo, tag)));
final com.netflix.titus.grpc.protogen.JobDescriptor jobDescriptor =
toGrpcJobDescriptor(batchJobDescriptors()
.map(jd -> jd.but(d -> d.getContainer().toBuilder()
.withImage(Image.newBuilder()
.withName(repo)
.withTag(tag)
.build())
))
.getValue());
try {
client.createJob(jobDescriptor).getId();
fail("Expect createJob() to fail");
} catch (StatusRuntimeException e) {
assertEquals(Status.INVALID_ARGUMENT.getCode(), e.getStatus().getCode());
assertTrue(e.getMessage().contains(missingImageErrorMsg));
}
}
/**
* Verifies that non-NOT_FOUND errors are suppressed and the original job descriptor is not modified.
*/
@Test(timeout = TEST_TIMEOUT_MS)
public void testSuppressedInternalError() {
when(registryClient.getImageDigest(anyString(), anyString()))
.thenReturn(
Mono.error(TitusRegistryException.internalError(repo, tag, HttpStatus.INTERNAL_SERVER_ERROR))
);
final com.netflix.titus.grpc.protogen.JobDescriptor jobDescriptor =
toGrpcJobDescriptor(batchJobDescriptors()
.map(jd -> jd.but(d -> d.getContainer().toBuilder()
.withImage(Image.newBuilder()
.withName(repo)
.withTag(tag)
.build())
))
.getValue());
String jobId = client.createJob(jobDescriptor).getId();
Job resultJobDescriptor = client.findJob(JobId.newBuilder().setId(jobId).build());
logger.info("Got back result {}", resultJobDescriptor);
assertTrue(resultJobDescriptor.getJobDescriptor().getContainer().getImage().getDigest().isEmpty());
}
private TitusStackResource getTitusStackResource(AdmissionSanitizer<JobDescriptor> sanitizer) {
EmbeddedKubeCluster kubeCluster = EmbeddedKubeClusters.basicCluster(2);
return new TitusStackResource(EmbeddedTitusCell.aTitusCell()
.withMaster(EmbeddedTitusMasters.basicMasterWithKubeIntegration(kubeCluster).toBuilder()
.withCellName("cell-name")
.build())
.withDefaultGateway()
.withJobSanitizer(sanitizer)
.build());
}
}
| 9,959 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/basic/JobCustomConfigurationValidatorTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.basic;
import com.netflix.archaius.config.DefaultSettableConfig;
import com.netflix.titus.api.jobmanager.model.job.Capacity;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static com.netflix.titus.runtime.TitusEntitySanitizerModule.CUSTOM_JOB_CONFIGURATION_ROOT;
import static com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters.toGrpcJobDescriptor;
import static com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCells.basicKubeCell;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskBatchJobDescriptor;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskServiceJobDescriptor;
import static org.assertj.core.api.Assertions.assertThat;
@Category(IntegrationTest.class)
public class JobCustomConfigurationValidatorTest extends BaseIntegrationTest {
@Rule
public final TitusStackResource titusStackResource = new TitusStackResource(basicKubeCell(1)
.toMaster(master -> master
.withProperty(CUSTOM_JOB_CONFIGURATION_ROOT + ".pattern", ".*")
.withProperty(CUSTOM_JOB_CONFIGURATION_ROOT + ".maxBatchJobSize", "1"
)
));
private JobManagementServiceGrpc.JobManagementServiceBlockingStub client;
@Before
public void setUp() throws Exception {
client = titusStackResource.getGateway().getV3BlockingGrpcClient();
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testCustomBatchJobSizeValidation() {
JobDescriptor<BatchJobExt> jobDescriptor = JobFunctions.changeBatchJobSize(
oneTaskBatchJobDescriptor().toBuilder()
.withApplicationName("a1")
.build(),
2
);
DefaultSettableConfig config = titusStackResource.getMaster().getConfig();
config.setProperty(CUSTOM_JOB_CONFIGURATION_ROOT + ".a1.pattern", ".*");
config.setProperty(CUSTOM_JOB_CONFIGURATION_ROOT + ".a1.maxBatchJobSize", "1");
try {
client.createJob(toGrpcJobDescriptor(jobDescriptor));
Assert.fail("Expected to fail due to validation error");
} catch (StatusRuntimeException e) {
assertThat(e.getStatus().getCode()).isEqualTo(Status.Code.INVALID_ARGUMENT);
}
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testCustomServiceJobSizeValidation() {
JobDescriptor<ServiceJobExt> jobDescriptor = JobFunctions.changeServiceJobCapacity(
oneTaskServiceJobDescriptor().toBuilder()
.withApplicationName("a1")
.build(),
Capacity.newBuilder().withMax(2).build()
);
DefaultSettableConfig config = titusStackResource.getMaster().getConfig();
config.setProperty(CUSTOM_JOB_CONFIGURATION_ROOT + ".a1.pattern", ".*");
config.setProperty(CUSTOM_JOB_CONFIGURATION_ROOT + ".a1.maxServiceJobSize", "1");
try {
client.createJob(toGrpcJobDescriptor(jobDescriptor));
Assert.fail("Expected to fail due to validation error");
} catch (StatusRuntimeException e) {
assertThat(e.getStatus().getCode()).isEqualTo(Status.Code.INVALID_ARGUMENT);
}
}
}
| 9,960 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/basic/TaskLifecycleTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.basic;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobGroupInfo;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.grpc.protogen.TaskStatus.TaskState;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.master.integration.v3.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.integration.v3.scenario.ScenarioTemplates;
import com.netflix.titus.master.integration.v3.scenario.TaskScenarioBuilder;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import org.junit.After;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.RuleChain;
import static com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCells.basicKubeCell;
import static com.netflix.titus.testkit.junit.master.TitusStackResource.V3_ENGINE_APP_PREFIX;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskBatchJobDescriptor;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskServiceJobDescriptor;
/**
* 'Launched' state is no longer tested, as it is not used in the Kube integration (we fill in a dummy value).
*/
@Category(IntegrationTest.class)
public class TaskLifecycleTest extends BaseIntegrationTest {
private static final JobDescriptor<BatchJobExt> ONE_TASK_BATCH_JOB = oneTaskBatchJobDescriptor().toBuilder().withApplicationName(V3_ENGINE_APP_PREFIX).build();
private final TitusStackResource titusStackResource = new TitusStackResource(basicKubeCell(2).toMaster(master -> master
.withProperty("titusMaster.jobManager.taskInLaunchedStateTimeoutMs", "2000")
.withProperty("titusMaster.jobManager.batchTaskInStartInitiatedStateTimeoutMs", "2000")
.withProperty("titusMaster.jobManager.serviceTaskInStartInitiatedStateTimeoutMs", "2000")
.withProperty("titusMaster.jobManager.taskInKillInitiatedStateTimeoutMs", "100")
));
private final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder(titusStackResource);
@Rule
public final RuleChain ruleChain = RuleChain.outerRule(titusStackResource).around(jobsScenarioBuilder);
@After
public void tearDown() throws Exception {
jobsScenarioBuilder.expectVersionsOrdered();
}
@Test(timeout = 30_000)
public void submitBatchTaskStuckInStartInitiated() {
testTaskStuckInState(ONE_TASK_BATCH_JOB, TaskState.StartInitiated);
}
@Test(timeout = 30_000)
public void submitBatchJobStuckInKillInitiated() {
jobsScenarioBuilder.schedule(ONE_TASK_BATCH_JOB, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startJobAndMoveTasksToKillInitiated())
.expectJobEventStreamCompletes()
);
}
@Test(timeout = 30_000)
public void submitServiceTaskStuckInStartInitiated() {
testTaskStuckInState(newJob("submitServiceJobStuckInStartInitiated"), TaskState.StartInitiated);
}
@Test(timeout = 30_000)
public void submitServiceJobStuckInKillInitiated() {
jobsScenarioBuilder.schedule(newJob("submitServiceJobStuckInKillInitiated"), jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startJobAndMoveTasksToKillInitiated())
.expectTaskInSlot(0, 1)
);
}
private void testTaskStuckInState(JobDescriptor<?> jobDescriptor, TaskState state) {
jobsScenarioBuilder.schedule(jobDescriptor, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.jobAccepted())
.expectAllTasksCreated()
.allTasks(TaskScenarioBuilder::expectAllTasksInKube)
.schedule()
.inTask(0, taskScenarioBuilder -> taskScenarioBuilder.template(ScenarioTemplates.lockTaskInState(state)))
);
}
private JobDescriptor<ServiceJobExt> newJob(String detail) {
return oneTaskServiceJobDescriptor().toBuilder()
.withApplicationName(TitusStackResource.V3_ENGINE_APP_PREFIX)
.withJobGroupInfo(JobGroupInfo.newBuilder().withDetail(detail).build())
.build();
}
}
| 9,961 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/basic/JobSubmitAndControlBasicTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.basic;
import java.util.List;
import com.netflix.titus.api.jobmanager.JobAttributes;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobModel;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.api.model.EfsMount;
import com.netflix.titus.common.aws.AwsInstanceType;
import com.netflix.titus.grpc.protogen.TaskStatus.TaskState;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.master.integration.v3.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.integration.v3.scenario.ScenarioTemplates;
import com.netflix.titus.master.integration.v3.scenario.TaskScenarioBuilder;
import com.netflix.titus.runtime.jobmanager.JobManagerConfiguration;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import com.netflix.titus.testkit.model.job.ContainersGenerator;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.RuleChain;
import static com.netflix.titus.master.integration.v3.scenario.JobAsserts.podWithEfsMounts;
import static com.netflix.titus.master.integration.v3.scenario.JobAsserts.podWithResources;
import static com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCells.basicKubeCell;
import static com.netflix.titus.testkit.junit.master.TitusStackResource.V3_ENGINE_APP_PREFIX;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskBatchJobDescriptor;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskServiceJobDescriptor;
import static java.util.Arrays.asList;
import static org.assertj.core.api.Assertions.assertThat;
@Category(IntegrationTest.class)
public class JobSubmitAndControlBasicTest extends BaseIntegrationTest {
private static final JobDescriptor<BatchJobExt> ONE_TASK_BATCH_JOB = oneTaskBatchJobDescriptor().toBuilder().withApplicationName(V3_ENGINE_APP_PREFIX).build();
private static final JobDescriptor<ServiceJobExt> ONE_TASK_SERVICE_JOB = oneTaskServiceJobDescriptor().toBuilder().withApplicationName(V3_ENGINE_APP_PREFIX).build();
private final TitusStackResource titusStackResource = new TitusStackResource(basicKubeCell(2));
private final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder(titusStackResource);
@Rule
public final RuleChain ruleChain = RuleChain.outerRule(titusStackResource).around(jobsScenarioBuilder);
private JobManagerConfiguration jobConfiguration;
@Before
public void setUp() throws Exception {
this.jobConfiguration = titusStackResource.getGateway().getInstance(JobManagerConfiguration.class);
}
@After
public void tearDown() throws Exception {
jobsScenarioBuilder.expectVersionsOrdered();
}
/**
* Verify batch job submit with the expected state transitions. Verify agent receives proper resources.
*/
@Test(timeout = 30_000)
public void testSubmitSimpleBatchJobWhichEndsOk() {
jobsScenarioBuilder.schedule(ONE_TASK_BATCH_JOB, jobScenarioBuilder -> jobScenarioBuilder
.inJob(job -> {
assertThat(job.getJobDescriptor().getAttributes().containsKey(JobAttributes.JOB_ATTRIBUTE_ROUTING_CELL));
assertThat(job.getJobDescriptor().getAttributes().containsKey(JobAttributes.JOB_ATTRIBUTES_CREATED_BY));
assertThat(job.getJobDescriptor().getAttributes().containsKey(JobAttributes.JOB_ATTRIBUTES_CALL_REASON));
})
.inStrippedJob(job -> assertThat(job.getJobDescriptor()).isEqualTo(ONE_TASK_BATCH_JOB))
.template(ScenarioTemplates.startTasksInNewJob())
.assertEachPod(
podWithResources(ONE_TASK_BATCH_JOB.getContainer().getContainerResources(), jobConfiguration.getMinDiskSizeMB()),
"Container not assigned the expected amount of resources"
)
.allTasks(ScenarioTemplates.completeTask())
.template(ScenarioTemplates.jobFinished())
.expectJobEventStreamCompletes()
);
}
/**
* Verify batch job submit with the expected state transitions. Verify agent receives proper EFS mount data.
*/
@Test(timeout = 30_000)
public void testSubmitBatchJobWithEfsMount() {
EfsMount efsMount1 = ContainersGenerator.efsMounts().getValue().toBuilder().withMountPoint("/data/logs").build();
EfsMount efsMount2 = ContainersGenerator.efsMounts().skip(1).getValue().toBuilder().withMountPoint("/data").build();
List<EfsMount> efsMounts = asList(efsMount1, efsMount2);
List<EfsMount> expectedOrder = asList(efsMount2, efsMount1);
JobDescriptor<BatchJobExt> jobWithEfs = ONE_TASK_BATCH_JOB.but(jd -> jd.getContainer().but(c -> c.getContainerResources().toBuilder().withEfsMounts(efsMounts)));
jobsScenarioBuilder.schedule(jobWithEfs, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.assertEachPod(
podWithEfsMounts(expectedOrder),
"Container not assigned the expected EFS mount"
)
.allTasks(ScenarioTemplates.completeTask())
.template(ScenarioTemplates.jobFinished())
.expectJobEventStreamCompletes()
);
}
@Test(timeout = 30_000)
public void testSubmitSimpleBatchJobWhichFails() {
jobsScenarioBuilder.schedule(ONE_TASK_BATCH_JOB, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.allTasks(TaskScenarioBuilder::transitionToFailed)
.allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectStateUpdates(TaskState.Finished))
.template(ScenarioTemplates.jobFinished())
.expectJobEventStreamCompletes()
);
}
@Test(timeout = 30_000)
public void testSubmitSimpleBatchJobAndKillTask() {
JobDescriptor<BatchJobExt> retryableJob = ONE_TASK_BATCH_JOB.but(jd -> jd.getExtensions().toBuilder()
.withRetryPolicy(JobModel.newImmediateRetryPolicy().withRetries(1).build())
);
jobsScenarioBuilder.schedule(retryableJob, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.allTasks(TaskScenarioBuilder::killTask)
.allTasks(taskScenarioBuilder -> taskScenarioBuilder
.completeKillInitiated()
.expectStateUpdates(TaskState.Finished)
)
.expectTaskInSlot(0, 1)
.inTask(0, 1, taskScenarioBuilder -> taskScenarioBuilder
.expectStateAndReasonUpdateSkipOther(TaskState.Accepted, "podCreated")
.killTask()
.completeKillInitiated()
.expectStateUpdates(TaskState.Finished)
)
.template(ScenarioTemplates.jobFinished())
.expectJobEventStreamCompletes()
);
}
@Test(timeout = 30_000)
public void testSubmitSimpleBatchJobAndKillIt() {
JobDescriptor<BatchJobExt> retryableJob = ONE_TASK_BATCH_JOB.but(jd -> jd.getExtensions().toBuilder()
.withRetryPolicy(JobModel.newImmediateRetryPolicy().withRetries(3).build())
);
jobsScenarioBuilder.schedule(retryableJob, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.template(ScenarioTemplates.killJob())
);
}
@Test(timeout = 30_000)
public void testSubmitSimpleBatchJobWithNotRunningTaskAndKillIt() {
jobsScenarioBuilder.schedule(ONE_TASK_BATCH_JOB, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.jobAccepted())
.allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectStateAndReasonUpdateSkipOther(TaskState.Accepted, "podCreated"))
.template(ScenarioTemplates.killJob())
);
}
@Test(timeout = 30_000)
public void submitGpuBatchJob() {
JobDescriptor<BatchJobExt> gpuJobDescriptor =
ONE_TASK_BATCH_JOB.but(j -> j.getContainer().but(c -> c.getContainerResources().toBuilder().withGpu(1)));
jobsScenarioBuilder.schedule(gpuJobDescriptor, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectInstanceType(AwsInstanceType.G2_2XLarge))
);
}
/**
* Verify service job submit with the expected state transitions.
*/
@Test(timeout = 30_000)
public void testSubmitSimpleServiceJob() {
jobsScenarioBuilder.schedule(ONE_TASK_SERVICE_JOB, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.template(ScenarioTemplates.killJob())
.expectJobEventStreamCompletes()
);
}
@Test(timeout = 30_000)
public void testEnableDisableServiceJob() {
jobsScenarioBuilder.schedule(ONE_TASK_SERVICE_JOB, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.jobAccepted())
.updateJobStatus(false)
.updateJobStatus(true)
);
}
}
| 9,962 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/basic/JobValidatorNegativeTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.basic;
import java.util.Collections;
import java.util.List;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.common.model.admission.AdmissionValidator;
import com.netflix.titus.common.model.admission.TitusValidatorConfiguration;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.runtime.endpoint.admission.AggregatingValidator;
import com.netflix.titus.runtime.endpoint.admission.FailJobValidator;
import com.netflix.titus.runtime.endpoint.admission.PassJobValidator;
import com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCell;
import com.netflix.titus.testkit.embedded.cell.master.EmbeddedTitusMasters;
import com.netflix.titus.testkit.embedded.kube.EmbeddedKubeCluster;
import com.netflix.titus.testkit.embedded.kube.EmbeddedKubeClusters;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import io.grpc.StatusRuntimeException;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters.toGrpcJobDescriptor;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.batchJobDescriptors;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* This test suite proves that {@link AdmissionValidator <JobDescriptor>} failures behave as expected. Outside of this test suite
* the default AdmissionValidator is the {@link PassJobValidator}. All
* other test suites prove that it does not invalidate jobs inappropriately.
*/
@Category(IntegrationTest.class)
public class JobValidatorNegativeTest extends BaseIntegrationTest {
private static final Logger logger = LoggerFactory.getLogger(JobValidatorNegativeTest.class);
private static final TitusValidatorConfiguration configuration = mock(TitusValidatorConfiguration.class);
private static final List<AdmissionValidator<JobDescriptor>> validators = Collections.singletonList(new FailJobValidator());
public static TitusStackResource titusStackResource;
private JobManagementServiceGrpc.JobManagementServiceBlockingStub client;
@BeforeClass
public static void setUpClass() {
// This is an arbitrary large timeout; the FailJobValidator fails instantaneously, so
// timeout never occurs.
when(configuration.getTimeoutMs()).thenReturn(10 * 1000);
AggregatingValidator validator = new AggregatingValidator(configuration, new DefaultRegistry(), validators);
titusStackResource = getTitusStackResource(validator);
}
@AfterClass
public static void afterClass() {
if (titusStackResource != null) {
titusStackResource.after();
}
}
@Before
public void setUp() throws Exception {
this.client = titusStackResource.getGateway().getV3BlockingGrpcClient();
}
@Test(timeout = 30_000)
public void testFailedValidationThrowsException() {
final com.netflix.titus.grpc.protogen.JobDescriptor jobDescriptor =
toGrpcJobDescriptor(batchJobDescriptors().getValue());
try {
client.createJob(jobDescriptor).getId();
fail("Expected test to fail");
} catch (StatusRuntimeException e) {
logger.info("Received StatusRuntimeException: {}", e.getMessage());
assertThat(e.getMessage()).contains(FailJobValidator.ERR_DESCRIPTION);
assertThat(e.getMessage()).contains(FailJobValidator.ERR_FIELD);
}
}
private static TitusStackResource getTitusStackResource(AdmissionValidator<JobDescriptor> validator) {
EmbeddedKubeCluster kubeCluster = EmbeddedKubeClusters.basicCluster(0);
TitusStackResource titusStackResource = new TitusStackResource(EmbeddedTitusCell.aTitusCell()
.withMaster(EmbeddedTitusMasters.basicMasterWithKubeIntegration(kubeCluster).toBuilder()
.withCellName("cell-name")
.build())
.withDefaultGateway()
.withJobValidator(validator)
.build());
titusStackResource.before();
return titusStackResource;
}
}
| 9,963 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/basic/TaskMoveTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.basic;
import com.netflix.titus.api.jobmanager.TaskAttributes;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.master.integration.v3.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.integration.v3.scenario.ScenarioTemplates;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import org.junit.After;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.RuleChain;
import static com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCells.basicKubeCell;
import static com.netflix.titus.testkit.junit.master.TitusStackResource.V3_ENGINE_APP_PREFIX;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskServiceJobDescriptor;
import static org.assertj.core.api.Assertions.assertThat;
@Category(IntegrationTest.class)
public class TaskMoveTest extends BaseIntegrationTest {
private static final JobDescriptor<ServiceJobExt> ONE_TASK_SERVICE_JOB = oneTaskServiceJobDescriptor().toBuilder().withApplicationName(V3_ENGINE_APP_PREFIX).build();
private final TitusStackResource titusStackResource = new TitusStackResource(basicKubeCell(2).toMaster(master ->
master.withProperty("titus.feature.moveTaskApiEnabled", "true")
), true);
private final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder(titusStackResource);
@Rule
public final RuleChain ruleChain = RuleChain.outerRule(titusStackResource).around(jobsScenarioBuilder);
@After
public void tearDown() throws Exception {
jobsScenarioBuilder.expectVersionsOrdered();
}
@Test(timeout = 30_000)
public void testSubmitSimpleServiceJob() throws Exception {
jobsScenarioBuilder.schedule(ONE_TASK_SERVICE_JOB, 2, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
);
String targetJobId = jobsScenarioBuilder.takeJob(0).getJobId();
String sourceJobId = jobsScenarioBuilder.takeJob(1).getJobId();
jobsScenarioBuilder.takeJob(1)
.inTask(0, taskScenarioBuilder -> taskScenarioBuilder.moveTask(targetJobId))
.expectJobUpdateEvent(job -> JobFunctions.getJobDesiredSize(job) == 0, "Job with no tasks expected");
jobsScenarioBuilder.takeJob(0)
.expectJobUpdateEvent(job -> JobFunctions.getJobDesiredSize(job) == 2, "Job with two tasks expected")
.inTask(1, taskScenarioBuilder -> taskScenarioBuilder
.assertTaskUpdate(task -> {
assertThat(task.getJobId()).isEqualTo(targetJobId);
assertThat(task.getTaskContext()).containsEntry(TaskAttributes.TASK_ATTRIBUTES_MOVED_FROM_JOB, sourceJobId);
})
);
}
}
| 9,964 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/basic/V3JobSchedulingAndRebootTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.basic;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobState;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.master.integration.v3.scenario.JobAsserts;
import com.netflix.titus.master.integration.v3.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.integration.v3.scenario.ScenarioTemplates;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.RuleChain;
import static com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCells.basicKubeCell;
import static com.netflix.titus.testkit.junit.master.TitusStackResource.V3_ENGINE_APP_PREFIX;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskBatchJobDescriptor;
@Category(IntegrationTest.class)
public class V3JobSchedulingAndRebootTest extends BaseIntegrationTest {
private static final JobDescriptor<BatchJobExt> ONE_TASK_BATCH_JOB = oneTaskBatchJobDescriptor().toBuilder().withApplicationName(V3_ENGINE_APP_PREFIX).build();
private final TitusStackResource titusStackResource = new TitusStackResource(basicKubeCell(2));
private final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder(titusStackResource);
@Rule
public final RuleChain ruleChain = RuleChain.outerRule(titusStackResource).around(jobsScenarioBuilder);
@Test(timeout = LONG_TEST_TIMEOUT_MS)
public void submitBatchJobAndRebootTitusMaster() {
jobsScenarioBuilder.schedule(ONE_TASK_BATCH_JOB, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
);
jobsScenarioBuilder.stop();
titusStackResource.getMaster().reboot();
JobsScenarioBuilder newJobsScenarioBuilder = new JobsScenarioBuilder(titusStackResource.getOperations());
newJobsScenarioBuilder
.assertJobs(jobs -> jobs.size() == 1)
.takeJob(0)
.assertJob(JobAsserts.jobInState(JobState.Accepted))
.assertTasks(tasks -> tasks.size() == 1);
}
}
| 9,965 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/basic/TaskAttributesTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.basic;
import java.util.Collections;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.master.integration.v3.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.integration.v3.scenario.ScenarioTemplates;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import org.junit.After;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.RuleChain;
import static com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCells.basicKubeCell;
import static com.netflix.titus.testkit.junit.master.TitusStackResource.V3_ENGINE_APP_PREFIX;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskServiceJobDescriptor;
import static org.assertj.core.api.Assertions.assertThat;
@Category(IntegrationTest.class)
public class TaskAttributesTest extends BaseIntegrationTest {
private static final JobDescriptor<ServiceJobExt> ONE_TASK_SERVICE_JOB = oneTaskServiceJobDescriptor().toBuilder().withApplicationName(V3_ENGINE_APP_PREFIX).build();
private final TitusStackResource titusStackResource = new TitusStackResource(basicKubeCell(1), true);
private final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder(titusStackResource);
@Rule
public final RuleChain ruleChain = RuleChain.outerRule(titusStackResource).around(jobsScenarioBuilder);
@After
public void tearDown() throws Exception {
jobsScenarioBuilder.expectVersionsOrdered();
}
@Test(timeout = 30_000)
public void testUpdateTaskAttributes() throws Exception {
jobsScenarioBuilder.schedule(ONE_TASK_SERVICE_JOB, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.inTask(0, taskScenarioBuilder -> taskScenarioBuilder
.updateTaskAttributes(Collections.singletonMap("attributeA", "value123"))
.assertTaskUpdate(task -> assertThat(task.getAttributes()).containsEntry("attributeA", "value123"))
)
);
}
@Test(timeout = 30_000)
public void testDeleteTaskAttributes() throws Exception {
jobsScenarioBuilder.schedule(ONE_TASK_SERVICE_JOB, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
.inTask(0, taskScenarioBuilder -> taskScenarioBuilder
.updateTaskAttributes(Collections.singletonMap("attributeA", "value123"))
.assertTaskUpdate(task -> assertThat(task.getAttributes()).containsEntry("attributeA", "value123"))
.deleteTaskAttributes(Collections.singletonList("attributeA"))
.assertTaskUpdate(task -> assertThat(task.getAttributes()).doesNotContainKeys("attributeA"))
)
);
}
}
| 9,966 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/basic/JobAttributesTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.basic;
import java.util.Collections;
import java.util.Map;
import com.google.common.collect.ImmutableMap;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.master.integration.v3.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.integration.v3.scenario.ScenarioTemplates;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import org.junit.After;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.RuleChain;
import static com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCells.basicKubeCell;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskServiceJobDescriptor;
@Category(IntegrationTest.class)
public class JobAttributesTest extends BaseIntegrationTest {
private final TitusStackResource titusStackResource = new TitusStackResource(basicKubeCell(2));
private final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder(titusStackResource);
@Rule
public final RuleChain ruleChain = RuleChain.outerRule(titusStackResource).around(jobsScenarioBuilder);
@After
public void tearDown() throws Exception {
jobsScenarioBuilder.expectVersionsOrdered();
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testIgnoreTitusAttributesFromInput() {
JobDescriptor<ServiceJobExt> job = oneTaskServiceJobDescriptor().but(jd ->
jd.toBuilder().withAttributes(CollectionsExt.copyAndAdd(
jd.getAttributes(),
ImmutableMap.<String, String>builder()
.put("titus.sanitization.something", "foo")
.put("titus.runtimePrediction.anything", "bar")
.build()
))
);
jobsScenarioBuilder.schedule(job, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.jobAccepted())
.assertJob(j -> {
Map<String, String> attributes = ((JobDescriptor<?>) j.getJobDescriptor()).getAttributes();
return !attributes.containsKey("titus.sanitization.something") &&
!attributes.containsKey("titus.runtimePrediction.anything");
})
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testUpdateJobAttributes() {
JobDescriptor<ServiceJobExt> job = oneTaskServiceJobDescriptor();
jobsScenarioBuilder.schedule(job, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.jobAccepted())
.updateJobAttributes(Collections.singletonMap("a", "1"))
);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testDeleteJobAttributes() {
JobDescriptor<ServiceJobExt> job = oneTaskServiceJobDescriptor();
jobsScenarioBuilder.schedule(job, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.jobAccepted())
.updateJobAttributes(Collections.singletonMap("a", "1"))
.deleteJobAttributes(Collections.singletonList("a"))
);
}
}
| 9,967 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/query/JobCursorQueryTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.query;
import java.util.List;
import com.netflix.titus.api.jobmanager.model.job.Capacity;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.grpc.protogen.Job;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc;
import com.netflix.titus.grpc.protogen.JobQuery;
import com.netflix.titus.grpc.protogen.JobQueryResult;
import com.netflix.titus.grpc.protogen.Page;
import com.netflix.titus.grpc.protogen.Task;
import com.netflix.titus.grpc.protogen.TaskQuery;
import com.netflix.titus.grpc.protogen.TaskQueryResult;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.master.integration.v3.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.integration.v3.scenario.ScenarioTemplates;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import com.netflix.titus.testkit.model.job.JobDescriptorGenerator;
import io.grpc.StatusRuntimeException;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.RuleChain;
import static com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCells.basicKubeCell;
import static org.assertj.core.api.Assertions.assertThat;
@Category(IntegrationTest.class)
public class JobCursorQueryTest extends BaseIntegrationTest {
private static final int JOBS_PER_ENGINE = 3;
private static final int TASKS_PER_JOB = 2;
private static final TitusStackResource titusStackResource = new TitusStackResource(basicKubeCell(10));
private static final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder(titusStackResource);
@ClassRule
public static final RuleChain ruleChain = RuleChain.outerRule(titusStackResource).around(jobsScenarioBuilder);
private static JobManagementServiceGrpc.JobManagementServiceBlockingStub client;
private static List<Job> allJobsInOrder;
private static List<Task> allTasksInOrder;
@BeforeClass
public static void setUp() throws Exception {
client = titusStackResource.getGateway().getV3BlockingGrpcClient();
JobDescriptor<ServiceJobExt> jobDescriptor = JobDescriptorGenerator.oneTaskServiceJobDescriptor()
.but(jd -> jd.getExtensions().toBuilder().withCapacity(
Capacity.newBuilder().withMin(0).withDesired(TASKS_PER_JOB).withMax(TASKS_PER_JOB).build()
).build());
jobsScenarioBuilder.schedule(jobDescriptor.toBuilder().withApplicationName("app1").build(), JOBS_PER_ENGINE, ScenarioTemplates.startTasksInNewJob());
jobsScenarioBuilder.schedule(jobDescriptor.toBuilder().withApplicationName("app2").build(), JOBS_PER_ENGINE, ScenarioTemplates.startTasksInNewJob());
allJobsInOrder = client.findJobs(JobQuery.newBuilder().setPage(Page.newBuilder().setPageSize(Integer.MAX_VALUE / 2)).build()).getItemsList();
assertThat(allJobsInOrder).hasSize(2 * JOBS_PER_ENGINE);
allTasksInOrder = client.findTasks(TaskQuery.newBuilder().setPage(Page.newBuilder().setPageSize(Integer.MAX_VALUE / 2)).build()).getItemsList();
assertThat(allTasksInOrder).hasSize(2 * JOBS_PER_ENGINE * TASKS_PER_JOB);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testJobQueryWithCursor() {
// Page 0
JobQueryResult result0 = client.findJobs(JobQuery.newBuilder()
.setPage(Page.newBuilder().setPageSize(2)).build()
);
assertThat(result0.getItemsList()).containsExactlyElementsOf(allJobsInOrder.subList(0, 2));
assertThat(result0.getPagination().getCursor()).isNotEmpty();
assertThat(result0.getPagination().getCursorPosition()).isEqualTo(1);
assertThat(result0.getPagination().getHasMore()).isTrue();
assertThat(result0.getPagination().getCurrentPage().getPageNumber()).isEqualTo(0);
// Page 1
JobQueryResult result1 = client.findJobs(JobQuery.newBuilder()
.setPage(Page.newBuilder().setPageSize(2).setCursor(result0.getPagination().getCursor())).build()
);
assertThat(result1.getItemsList()).containsExactlyElementsOf(allJobsInOrder.subList(2, 4));
assertThat(result1.getPagination().getCursor()).isNotEmpty();
assertThat(result1.getPagination().getCursorPosition()).isEqualTo(3);
assertThat(result1.getPagination().getHasMore()).isTrue();
assertThat(result1.getPagination().getCurrentPage().getPageNumber()).isEqualTo(1);
// Page 2
JobQueryResult result2 = client.findJobs(JobQuery.newBuilder()
.setPage(Page.newBuilder().setPageSize(2).setCursor(result1.getPagination().getCursor())).build()
);
assertThat(result2.getItemsList()).containsExactlyElementsOf(allJobsInOrder.subList(4, 6));
assertThat(result2.getPagination().getCursor()).isNotEmpty();
assertThat(result2.getPagination().getCursorPosition()).isEqualTo(5);
assertThat(result2.getPagination().getHasMore()).isFalse();
assertThat(result2.getPagination().getCurrentPage().getPageNumber()).isEqualTo(2);
// Check cursor points to the latest returned element
JobQueryResult result3 = client.findJobs(JobQuery.newBuilder()
.setPage(Page.newBuilder().setPageSize(2).setCursor(result2.getPagination().getCursor())).build()
);
assertThat(result3.getItemsList()).isEmpty();
assertThat(result3.getPagination().getCursor()).isEqualTo(result2.getPagination().getCursor());
assertThat(result3.getPagination().getCursorPosition()).isEqualTo(result2.getPagination().getCursorPosition());
assertThat(result3.getPagination().getHasMore()).isFalse();
assertThat(result3.getPagination().getCurrentPage().getPageNumber())
.isEqualTo(result3.getPagination().getTotalPages());
}
@Test(timeout = TEST_TIMEOUT_MS, expected = StatusRuntimeException.class)
public void testJobQueryWithBadCursor() {
client.findJobs(JobQuery.newBuilder().setPage(Page.newBuilder().setPageSize(4).setCursor("bad_cursor_value")).build());
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testJobQueryWithCursorAndEmptyResult() {
JobQueryResult result = client.findJobs(JobQuery.newBuilder()
.setPage(Page.newBuilder().setPageSize(4))
.putFilteringCriteria("jobState", "KillInitiated") // Filter by something that gives us empty result
.build()
);
assertThat(result.getItemsList()).isEmpty();
assertThat(result.getPagination().getCursor()).isEmpty();
assertThat(result.getPagination().getCursorPosition()).isZero();
assertThat(result.getPagination().getHasMore()).isFalse();
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testTaskQueryWithCursor() {
// Page 0
TaskQueryResult result0 = client.findTasks(TaskQuery.newBuilder()
.setPage(Page.newBuilder().setPageSize(4)).build()
);
assertThat(result0.getItemsList()).containsExactlyElementsOf(allTasksInOrder.subList(0, 4));
assertThat(result0.getPagination().getCursor()).isNotEmpty();
assertThat(result0.getPagination().getCursorPosition()).isEqualTo(3);
assertThat(result0.getPagination().getHasMore()).isTrue();
assertThat(result0.getPagination().getCurrentPage().getPageNumber()).isEqualTo(0);
// Page 1
TaskQueryResult result1 = client.findTasks(TaskQuery.newBuilder()
.setPage(Page.newBuilder().setPageSize(4).setCursor(result0.getPagination().getCursor())).build()
);
assertThat(result1.getItemsList()).containsExactlyElementsOf(allTasksInOrder.subList(4, 8));
assertThat(result1.getPagination().getCursor()).isNotEmpty();
assertThat(result1.getPagination().getCursorPosition()).isEqualTo(7);
assertThat(result1.getPagination().getHasMore()).isTrue();
assertThat(result1.getPagination().getCurrentPage().getPageNumber()).isEqualTo(1);
// Page 2
TaskQueryResult result2 = client.findTasks(TaskQuery.newBuilder()
.setPage(Page.newBuilder().setPageSize(4).setCursor(result1.getPagination().getCursor())).build()
);
assertThat(result2.getItemsList()).containsExactlyElementsOf(allTasksInOrder.subList(8, 12));
assertThat(result2.getPagination().getHasMore()).isFalse();
assertThat(result2.getPagination().getCursor()).isNotEmpty();
assertThat(result2.getPagination().getCursorPosition()).isEqualTo(11);
assertThat(result2.getPagination().getCurrentPage().getPageNumber()).isEqualTo(2);
// Check cursor points to the latest returned element
TaskQueryResult result3 = client.findTasks(TaskQuery.newBuilder()
.setPage(Page.newBuilder().setPageSize(4).setCursor(result2.getPagination().getCursor())).build()
);
assertThat(result3.getItemsList()).isEmpty();
assertThat(result3.getPagination().getCursor()).isEqualTo(result2.getPagination().getCursor());
assertThat(result3.getPagination().getCursorPosition()).isEqualTo(result2.getPagination().getCursorPosition());
assertThat(result3.getPagination().getHasMore()).isFalse();
assertThat(result3.getPagination().getCurrentPage().getPageNumber())
.isEqualTo(result3.getPagination().getTotalPages());
}
@Test(timeout = TEST_TIMEOUT_MS, expected = StatusRuntimeException.class)
public void testTaskQueryWithBadCursor() {
client.findTasks(TaskQuery.newBuilder().setPage(Page.newBuilder().setPageSize(4).setCursor("bad_cursor_value")).build());
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testTaskQueryWithCursorAndEmptyResult() {
TaskQueryResult result = client.findTasks(TaskQuery.newBuilder()
.setPage(Page.newBuilder().setPageSize(4))
.putFilteringCriteria("jobState", "KillInitiated") // Filter by something that gives us empty result
.build()
);
assertThat(result.getItemsList()).isEmpty();
assertThat(result.getPagination().getCursor()).isEmpty();
assertThat(result.getPagination().getCursorPosition()).isZero();
assertThat(result.getPagination().getHasMore()).isFalse();
}
}
| 9,968 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/query/JobCursorArchiveQueryTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.query;
import java.util.HashSet;
import java.util.Set;
import java.util.stream.Collectors;
import com.netflix.titus.api.jobmanager.model.job.Capacity;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.common.util.Evaluators;
import com.netflix.titus.common.util.StringExt;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc;
import com.netflix.titus.grpc.protogen.Page;
import com.netflix.titus.grpc.protogen.Task;
import com.netflix.titus.grpc.protogen.TaskQuery;
import com.netflix.titus.grpc.protogen.TaskQueryResult;
import com.netflix.titus.grpc.protogen.TaskStatus;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.master.integration.v3.scenario.JobScenarioBuilder;
import com.netflix.titus.master.integration.v3.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.integration.v3.scenario.ScenarioTemplates;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import com.netflix.titus.testkit.model.job.JobDescriptorGenerator;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.RuleChain;
import static com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCells.basicKubeCell;
import static org.assertj.core.api.Assertions.assertThat;
@Category(IntegrationTest.class)
public class JobCursorArchiveQueryTest extends BaseIntegrationTest {
private static final String ALL_TASK_STATES = StringExt.concatenate(TaskStatus.TaskState.class, ",", s -> s != TaskStatus.TaskState.UNRECOGNIZED);
private static final Page TWO_ITEM_PAGE = Page.newBuilder().setPageSize(2).build();
private static final int TASKS_PER_JOB = 4;
private static final TitusStackResource titusStackResource = new TitusStackResource(basicKubeCell(5));
private static final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder(titusStackResource);
@ClassRule
public static final RuleChain ruleChain = RuleChain.outerRule(titusStackResource).around(jobsScenarioBuilder);
private static JobManagementServiceGrpc.JobManagementServiceBlockingStub client;
private static JobScenarioBuilder jobScenarioBuilder;
@BeforeClass
public static void setUp() {
client = titusStackResource.getGateway().getV3BlockingGrpcClient();
JobDescriptor<ServiceJobExt> jobDescriptor = JobDescriptorGenerator.oneTaskServiceJobDescriptor()
.but(jd -> jd.getExtensions().toBuilder()
.withCapacity(Capacity.newBuilder().withMin(0).withDesired(TASKS_PER_JOB).withMax(TASKS_PER_JOB).build())
.build()
);
jobScenarioBuilder = jobsScenarioBuilder.schedule(jobDescriptor, ScenarioTemplates.startTasksInNewJob()).takeJob(0);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testArchiveQuery() {
Evaluators.times(TASKS_PER_JOB, idx -> jobScenarioBuilder.getTaskByIndex(idx)
.killTask()
.completeKillInitiated()
.expectStateUpdateSkipOther(TaskStatus.TaskState.Finished)
);
Evaluators.times(TASKS_PER_JOB, idx -> jobScenarioBuilder.expectTaskInSlot(idx, 1));
int allTasksCount = TASKS_PER_JOB * 2;
Set<String> taskIds = new HashSet<>();
Page current = TWO_ITEM_PAGE;
for (int i = 0; i < TASKS_PER_JOB; i++) {
TaskQuery query = TaskQuery.newBuilder()
.setPage(current)
.putFilteringCriteria("jobIds", jobScenarioBuilder.getJobId())
.putFilteringCriteria("taskStates", ALL_TASK_STATES)
.build();
TaskQueryResult result = client.findTasks(query);
assertThat(result.getPagination().getTotalItems()).isEqualTo(allTasksCount);
assertThat(result.getPagination().getCursor()).isNotEmpty();
taskIds.addAll(result.getItemsList().stream().map(Task::getId).collect(Collectors.toList()));
current = query.getPage().toBuilder().setCursor(result.getPagination().getCursor()).build();
}
assertThat(taskIds).hasSize(allTasksCount);
}
}
| 9,969 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/query/JobCriteriaQueryTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.query;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
import com.google.common.collect.ImmutableMap;
import com.netflix.titus.api.jobmanager.model.job.Image;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.JobGroupInfo;
import com.netflix.titus.api.jobmanager.model.job.JobState;
import com.netflix.titus.api.jobmanager.model.job.Owner;
import com.netflix.titus.api.jobmanager.model.job.PlatformSidecar;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.grpc.protogen.Job;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc;
import com.netflix.titus.grpc.protogen.JobQuery;
import com.netflix.titus.grpc.protogen.JobQueryResult;
import com.netflix.titus.grpc.protogen.Page;
import com.netflix.titus.grpc.protogen.Pagination;
import com.netflix.titus.grpc.protogen.Task;
import com.netflix.titus.grpc.protogen.TaskQuery;
import com.netflix.titus.grpc.protogen.TaskQueryResult;
import com.netflix.titus.grpc.protogen.TaskStatus;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.master.integration.v3.scenario.JobScenarioBuilder;
import com.netflix.titus.master.integration.v3.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.integration.v3.scenario.ScenarioTemplates;
import com.netflix.titus.master.integration.v3.scenario.TaskScenarioBuilder;
import com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCell;
import com.netflix.titus.testkit.embedded.cell.master.EmbeddedTitusMaster;
import com.netflix.titus.testkit.embedded.cell.master.EmbeddedTitusMasters;
import com.netflix.titus.testkit.embedded.kube.EmbeddedKubeClusters;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.RuleChain;
import static com.netflix.titus.master.integration.v3.job.CellAssertions.assertCellInfo;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.batchJobDescriptors;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskBatchJobDescriptor;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskServiceJobDescriptor;
import static org.assertj.core.api.Assertions.assertThat;
/**
* As queries are immutable, the tests share single stack which is preloaded with all the data required for all test cases.
* Furthermore each test may add its own data set.
*/
@Category(IntegrationTest.class)
public class JobCriteriaQueryTest extends BaseIntegrationTest {
private static final Page PAGE = Page.newBuilder().setPageNumber(0).setPageSize(100).build();
private final static TitusStackResource titusStackResource = new TitusStackResource(
EmbeddedTitusCell.aTitusCell()
.withMaster(EmbeddedTitusMasters.basicMasterWithKubeIntegration(EmbeddedKubeClusters.basicClusterWithLargeInstances(20)).toBuilder()
.withCellName("embeddedCell")
// Set to very high value as we do not want to expire it.
.withProperty("titusMaster.jobManager.taskInLaunchedStateTimeoutMs", "30000000")
.withProperty("titusMaster.jobManager.batchTaskInStartInitiatedStateTimeoutMs", "30000000")
.withProperty("titusMaster.jobManager.serviceTaskInStartInitiatedStateTimeoutMs", "30000000")
.build()
)
.withDefaultGateway()
.build()
);
private final static JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder(titusStackResource);
@ClassRule
public static final RuleChain ruleChain = RuleChain.outerRule(titusStackResource).around(jobsScenarioBuilder);
private static final String BATCH_OWNER = "batchOwner@netflix.com";
private static final String BATCH_APPLICATION = "batchApplication";
private static final String BATCH_CAPACITY_GROUP = "batchCapacityGroup";
private static final JobGroupInfo BATCH_JOB_GROUP_INFO = JobGroupInfo.newBuilder().withStack("batchStack").withDetail("batchDetail").withSequence("batch001").build();
private static final String BATCH_IMAGE_NAME = "batchImageName";
private static final String BATCH_IMAGE_TAG = "batchImageTag";
private static final String SERVICE_OWNER = "serviceOwner@netflix.com";
private static final String SERVICE_APPLICATION = "serviceApplication";
private static final String SERVICE_CAPACITY_GROUP = "serviceCapacityGroup";
private static final JobGroupInfo SERVICE_JOB_GROUP_INFO = JobGroupInfo.newBuilder().withStack("serviceStack").withDetail("serviceDetail").withSequence("service001").build();
private static final String SERVICE_IMAGE_NAME = "serviceImageName";
private static final String SERVICE_IMAGE_TAG = "serviceImageTag";
/**
* Add to jobs created in the setup method.
*/
private static final String PRE_CREATED_JOBS_LABEL = "precreatedJob";
private static final JobDescriptor<BatchJobExt> BATCH_JOB_TEMPLATE = oneTaskBatchJobDescriptor();
private static final JobDescriptor<ServiceJobExt> SERVICE_JOB_TEMPLATE = oneTaskServiceJobDescriptor();
private static JobManagementServiceGrpc.JobManagementServiceBlockingStub client;
private static final List<String> batchJobsWithCreatedTasks = new ArrayList<>();
private static final List<String> batchTasks = new ArrayList<>();
private static final List<String> serviceJobsWithCreatedTasks = new ArrayList<>();
private static final List<String> serviceTasks = new ArrayList<>();
private static String finishedBatchJobWithFiveTasks;
@BeforeClass
public static void setUp() throws Exception {
client = titusStackResource.getGateway().getV3BlockingGrpcClient();
// Jobs with launched tasks
JobDescriptor<BatchJobExt> batchJobDescriptor = BATCH_JOB_TEMPLATE.toBuilder()
.withOwner(Owner.newBuilder().withTeamEmail(BATCH_OWNER).build())
.withApplicationName(BATCH_APPLICATION)
.withCapacityGroup(BATCH_CAPACITY_GROUP)
.withJobGroupInfo(BATCH_JOB_GROUP_INFO)
.withContainer(BATCH_JOB_TEMPLATE.getContainer().toBuilder()
.withImage(Image.newBuilder().withName(BATCH_IMAGE_NAME).withTag(BATCH_IMAGE_TAG).build())
.build()
)
.withAttributes(Collections.singletonMap(PRE_CREATED_JOBS_LABEL, "true"))
.build();
jobsScenarioBuilder.schedule(batchJobDescriptor, 3,
jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.launchJob())
.inJob(job -> batchJobsWithCreatedTasks.add(job.getId()))
);
batchJobsWithCreatedTasks.forEach(jobId -> {
String taskId = jobsScenarioBuilder.takeJob(jobId).getTaskByIndex(0).getTask().getId();
batchTasks.add(taskId);
});
JobDescriptor<ServiceJobExt> serviceJobDescriptor = SERVICE_JOB_TEMPLATE.toBuilder()
.withOwner(Owner.newBuilder().withTeamEmail(SERVICE_OWNER).build())
.withApplicationName(SERVICE_APPLICATION)
.withCapacityGroup(SERVICE_CAPACITY_GROUP)
.withJobGroupInfo(SERVICE_JOB_GROUP_INFO)
.withContainer(SERVICE_JOB_TEMPLATE.getContainer().toBuilder()
.withImage(Image.newBuilder().withName(SERVICE_IMAGE_NAME).withTag(SERVICE_IMAGE_TAG).build())
.build()
)
.withAttributes(Collections.singletonMap(PRE_CREATED_JOBS_LABEL, "true"))
.build();
jobsScenarioBuilder.schedule(serviceJobDescriptor, 3,
jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.launchJob())
.inJob(job -> serviceJobsWithCreatedTasks.add(job.getId()))
);
serviceJobsWithCreatedTasks.forEach(jobId -> {
String taskId = jobsScenarioBuilder.takeJob(jobId).getTaskByIndex(0).getTask().getId();
serviceTasks.add(taskId);
});
// Finished job with 5 tasks
int numberOfTasks = 5;
JobDescriptor<BatchJobExt> jobDescriptor = BATCH_JOB_TEMPLATE
.but(jd -> jd.getExtensions().toBuilder().withSize(numberOfTasks).build());
jobsScenarioBuilder.schedule(jobDescriptor, 1, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.launchJob())
.allTasks(ScenarioTemplates.completeTask())
.expectJobUpdateEvent(job -> job.getStatus().getState() == JobState.Finished, "Expected job to complete")
.inJob(job -> finishedBatchJobWithFiveTasks = job.getId())
);
}
@Test(timeout = 30_000)
public void testFindJobAndTaskByJobIdsV3() {
String job0 = batchJobsWithCreatedTasks.get(0);
String job2 = batchJobsWithCreatedTasks.get(2);
// Jobs
JobQueryResult jobQueryResult = client.findJobs(JobQuery.newBuilder().putFilteringCriteria("jobIds", job0 + ',' + job2).setPage(PAGE).build());
final List<Job> itemsList = jobQueryResult.getItemsList();
assertThat(itemsList).hasSize(2);
// Tasks
TaskQueryResult taskQueryResult = client.findTasks(TaskQuery.newBuilder().putFilteringCriteria("jobIds", job0 + ',' + job2).setPage(PAGE).build());
assertThat(taskQueryResult.getItemsList()).hasSize(2);
}
@Test(timeout = 30_000)
public void testFindJobAndTaskByTaskIdsV3() {
String task0 = jobsScenarioBuilder.takeJob(batchJobsWithCreatedTasks.get(0)).getTaskByIndex(0).getTask().getId();
String task2 = jobsScenarioBuilder.takeJob(batchJobsWithCreatedTasks.get(2)).getTaskByIndex(0).getTask().getId();
// Jobs
JobQueryResult jobQueryResult = client.findJobs(JobQuery.newBuilder().putFilteringCriteria("taskIds", task0 + ',' + task2).setPage(PAGE).build());
final List<Job> itemsList = jobQueryResult.getItemsList();
assertThat(itemsList).hasSize(2);
// Tasks
TaskQueryResult taskQueryResult = client.findTasks(TaskQuery.newBuilder().putFilteringCriteria("taskIds", task0 + ',' + task2).setPage(PAGE).build());
assertThat(taskQueryResult.getItemsList()).hasSize(2);
}
@Test(timeout = 60_000)
public void testFindArchivedTasksByTaskIdsV3() {
TaskQueryResult taskQueryResult = client.findTasks(TaskQuery.newBuilder()
.putFilteringCriteria("jobIds", finishedBatchJobWithFiveTasks)
.putFilteringCriteria("taskStates", com.netflix.titus.grpc.protogen.TaskStatus.TaskState.Finished.name())
.setPage(PAGE)
.build()
);
List<Task> tasks = taskQueryResult.getItemsList();
assertThat(tasks).hasSize(5);
assertThat(tasks).allMatch(task -> task.getStatus().getState() == TaskStatus.TaskState.Finished);
}
@Test(timeout = 30_000)
public void testSearchByJobTypeV3() {
// Batch only (jobs)
JobQueryResult batchQueryJobs = client.findJobs(JobQuery.newBuilder().putFilteringCriteria("jobType", "batch").setPage(PAGE).build());
Set<String> batchJobIds = batchQueryJobs.getItemsList().stream().map(Job::getId).collect(Collectors.toSet());
assertThat(batchJobIds).containsAll(batchJobsWithCreatedTasks);
// Batch only (tasks)
TaskQueryResult batchQueryTasks = client.findTasks(TaskQuery.newBuilder().putFilteringCriteria("jobType", "batch").setPage(PAGE).build());
Set<String> batchTaskIds = batchQueryTasks.getItemsList().stream().map(Task::getId).collect(Collectors.toSet());
assertThat(batchTaskIds).containsAll(batchTasks);
// Service only (jobs)
JobQueryResult serviceQueryJobs = client.findJobs(JobQuery.newBuilder().putFilteringCriteria("jobType", "service").setPage(PAGE).build());
Set<String> serviceJobIds = serviceQueryJobs.getItemsList().stream().map(Job::getId).collect(Collectors.toSet());
assertThat(serviceJobIds).containsAll(serviceJobsWithCreatedTasks);
// Service only (tasks)
TaskQueryResult serviceQueryTasks = client.findTasks(TaskQuery.newBuilder().putFilteringCriteria("jobType", "service").setPage(PAGE).build());
Set<String> serviceTaskIds = serviceQueryTasks.getItemsList().stream().map(Task::getId).collect(Collectors.toSet());
assertThat(serviceTaskIds).containsAll(serviceTasks);
}
@Test(timeout = 30_000)
public void testSearchByJobState() throws Exception {
JobDescriptor<BatchJobExt> jobDescriptor = batchJobDescriptors().getValue().toBuilder().withApplicationName("testSearchByJobState").build();
String acceptedJobId = jobsScenarioBuilder.scheduleAndReturnJob(jobDescriptor, jobScenarioBuilder -> jobScenarioBuilder.template(ScenarioTemplates.launchJob())).getId();
String killInitiatedJobId = jobsScenarioBuilder.scheduleAndReturnJob(jobDescriptor, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.launchJob())
.killJob()
.expectJobUpdateEvent(job -> job.getStatus().getState() == JobState.KillInitiated, "Expected state: " + JobState.KillInitiated)
).getId();
String acceptedTaskId = jobsScenarioBuilder.takeJob(acceptedJobId).getTaskByIndex(0).getTask().getId();
String killInitiatedTaskId = jobsScenarioBuilder.takeJob(killInitiatedJobId).getTaskByIndex(0).getTask().getId();
// Indexes are recomputed after events are sent, so if we run findJobs/findTasks immediately, they may use stale index.
Thread.sleep(10);
JobQuery.Builder jobQueryBuilder = JobQuery.newBuilder()
.putFilteringCriteria("applicationName", "testSearchByJobState")
.setPage(PAGE);
TaskQuery.Builder taskQueryBuilder = TaskQuery.newBuilder()
.putFilteringCriteria("applicationName", "testSearchByJobState")
.setPage(PAGE);
// Jobs (Accepted)
JobQueryResult acceptedJobQueryResult = client.findJobs(jobQueryBuilder.putFilteringCriteria("jobState", "Accepted").build());
assertThat(acceptedJobQueryResult.getItemsList()).hasSize(1);
Job acceptedJobQueryResultItem = acceptedJobQueryResult.getItems(0);
assertThat(acceptedJobQueryResultItem.getId()).isEqualTo(acceptedJobId);
// Jobs (KillInitiated)
JobQueryResult killInitJobQueryResult = client.findJobs(jobQueryBuilder.putFilteringCriteria("jobState", "KillInitiated").setPage(PAGE).build());
assertThat(killInitJobQueryResult.getItemsList()).hasSize(1);
Job killInitJobQueryResultItem = killInitJobQueryResult.getItems(0);
assertThat(killInitJobQueryResultItem.getId()).isEqualTo(killInitiatedJobId);
// Tasks (Accepted)
TaskQueryResult acceptedTaskQueryResult = client.findTasks(taskQueryBuilder.putFilteringCriteria("jobState", "Accepted").setPage(PAGE).build());
assertThat(acceptedTaskQueryResult.getItemsList()).hasSize(1);
assertThat(acceptedTaskQueryResult.getItems(0).getId()).isEqualTo(acceptedTaskId);
// Tasks (KillInitiated)
TaskQueryResult killInitTaskQueryResult = client.findTasks(taskQueryBuilder.putFilteringCriteria("jobState", "KillInitiated").setPage(PAGE).build());
assertThat(killInitTaskQueryResult.getItemsList()).hasSize(1);
assertThat(killInitTaskQueryResult.getItems(0).getId()).isEqualTo(killInitiatedTaskId);
}
@Test(timeout = 30_000)
public void testSearchByTaskStateV3() {
Function<Function<JobScenarioBuilder, JobScenarioBuilder>, String> jobSubmitter = template ->
jobsScenarioBuilder.scheduleAndReturnJob(
BATCH_JOB_TEMPLATE.toBuilder().withApplicationName("testSearchByTaskStateV3").build(),
jobScenarioBuilder -> jobScenarioBuilder.template(template)
).getId();
String jobLaunchedId = jobSubmitter.apply(ScenarioTemplates.launchJob());
String startInitiatedJobId = jobSubmitter.apply(ScenarioTemplates.startJob(TaskStatus.TaskState.StartInitiated));
String startedJobId = jobSubmitter.apply(ScenarioTemplates.startJob(TaskStatus.TaskState.Started));
String killInitiatedJobId = jobSubmitter.apply(ScenarioTemplates.startJobAndMoveTasksToKillInitiated());
testSearchByTaskStateV3("Launched", jobLaunchedId, jobsScenarioBuilder.takeTaskId(jobLaunchedId, 0));
testSearchByTaskStateV3("StartInitiated", startInitiatedJobId, jobsScenarioBuilder.takeTaskId(startInitiatedJobId, 0));
testSearchByTaskStateV3("Started", startedJobId, jobsScenarioBuilder.takeTaskId(startedJobId, 0));
testSearchByTaskStateV3("KillInitiated", killInitiatedJobId, jobsScenarioBuilder.takeTaskId(killInitiatedJobId, 0));
}
private void testSearchByTaskStateV3(String taskState, String expectedJobId, String expectedTaskId) {
// Job
JobQueryResult jobQueryResult = client.findJobs(JobQuery.newBuilder()
.putFilteringCriteria("applicationName", "testSearchByTaskStateV3")
.putFilteringCriteria("taskStates", taskState)
.setPage(PAGE)
.build()
);
assertThat(jobQueryResult.getItemsList()).hasSize(1);
Job jobQueryResultItem = jobQueryResult.getItems(0);
assertThat(jobQueryResultItem.getId()).isEqualTo(expectedJobId);
// Task
TaskQueryResult taskQueryResult = client.findTasks(TaskQuery.newBuilder()
.putFilteringCriteria("applicationName", "testSearchByTaskStateV3")
.putFilteringCriteria("taskStates", taskState)
.setPage(PAGE)
.build()
);
assertThat(taskQueryResult.getItemsList()).hasSize(1);
assertThat(taskQueryResult.getItems(0).getId()).isEqualTo(expectedTaskId);
}
@Test(timeout = 30_000)
public void testSearchByTaskReasonInFinishedJobV3() {
JobDescriptor<BatchJobExt> jobDescriptor = JobFunctions.changeBatchJobSize(BATCH_JOB_TEMPLATE, 2);
String jobId = jobsScenarioBuilder.scheduleAndReturnJob(jobDescriptor, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.launchJob())
.inTask(0, TaskScenarioBuilder::failTaskExecution)
.schedule()
.inTask(1, taskScenarioBuilder -> taskScenarioBuilder
.transitionTo(TaskStatus.TaskState.StartInitiated)
.transitionTo(TaskStatus.TaskState.Started)
.template(ScenarioTemplates.completeTask())
)
.expectJobUpdateEvent(job -> job.getStatus().getState() == JobState.Finished, "Expected job to complete")
).getId();
List<Task> task0List = client.findTasks(TaskQuery.newBuilder()
.putFilteringCriteria("jobIds", jobId)
.putFilteringCriteria("taskStates", TaskStatus.TaskState.Finished.name())
.putFilteringCriteria("taskStateReasons", "failed")
.setPage(PAGE)
.build()
).getItemsList();
assertThat(task0List).hasSize(1);
assertThat(task0List.get(0).getStatus().getReasonCode()).isEqualTo("failed");
List<Task> task1List = client.findTasks(TaskQuery.newBuilder()
.putFilteringCriteria("jobIds", jobId)
.putFilteringCriteria("taskStates", TaskStatus.TaskState.Finished.name())
.putFilteringCriteria("taskStateReasons", "normal")
.setPage(PAGE)
.build()
).getItemsList();
assertThat(task1List).hasSize(1);
assertThat(task1List.get(0).getStatus().getReasonCode()).isEqualTo("normal");
}
@Test(timeout = 30_000)
public void testSearchByOwnerV3() {
testBatchSearchBy("owner", BATCH_OWNER);
testServiceSearchBy("owner", SERVICE_OWNER);
}
@Test(timeout = 30_000)
public void testSearchByAppNameV3() {
testBatchSearchBy("appName", BATCH_APPLICATION);
testServiceSearchBy("appName", SERVICE_APPLICATION);
}
@Test(timeout = 30_000)
public void testSearchByApplicationNameV3() {
testBatchSearchBy("applicationName", BATCH_APPLICATION);
testServiceSearchBy("applicationName", SERVICE_APPLICATION);
}
@Test(timeout = 30_000)
public void testSearchByCapacityGroupV3() {
testBatchSearchBy("capacityGroup", BATCH_CAPACITY_GROUP);
testServiceSearchBy("capacityGroup", SERVICE_CAPACITY_GROUP);
}
@Test(timeout = 30_000)
public void testSearchByJobGroupInfoV3() {
testBatchSearchBy("jobGroupStack", BATCH_JOB_GROUP_INFO.getStack());
testBatchSearchBy("jobGroupDetail", BATCH_JOB_GROUP_INFO.getDetail());
testServiceSearchBy("jobGroupStack", SERVICE_JOB_GROUP_INFO.getStack());
testServiceSearchBy("jobGroupDetail", SERVICE_JOB_GROUP_INFO.getDetail());
for (String jobId : CollectionsExt.merge(batchJobsWithCreatedTasks, serviceJobsWithCreatedTasks)) {
testSearchByJobGroupSequence(
jobId,
jobsScenarioBuilder.takeJob(jobId).getJob().getJobDescriptor().getJobGroupInfo().getSequence()
);
}
}
@Test(timeout = 30_000)
public void testSearchByPlatformSidecarV3() {
List<PlatformSidecar> ps = Collections.singletonList(PlatformSidecar.newBuilder().withName("test-platform-sidecar").withChannel("test-channel").build());
JobDescriptor<BatchJobExt> jobDescriptorWithTestSidecar = BATCH_JOB_TEMPLATE.toBuilder()
.withApplicationName("testAppThatDoesHavePlatformSidecar")
.withPlatformSidecars(ps)
.build();
jobsScenarioBuilder.schedule(jobDescriptorWithTestSidecar, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
);
List<Job> foundJobs = client.findJobs(JobQuery.newBuilder()
.putFilteringCriteria("platformSidecar", "test-platform-sidecar")
.putFilteringCriteria("platformSidecarChannel", "test-channel")
.setPage(PAGE)
.build()
).getItemsList();
assertThat(foundJobs).hasSize(1);
assertThat(foundJobs.get(0).getId()).isNotEmpty(); // Always present
com.netflix.titus.grpc.protogen.JobDescriptor foundJobDescriptor = foundJobs.get(0).getJobDescriptor();
assertThat(foundJobDescriptor.getPlatformSidecarsList()).isNotEmpty();
assertThat(foundJobDescriptor.getPlatformSidecarsList().get(0).getName()).isEqualTo("test-platform-sidecar");
}
private void testBatchSearchBy(String queryKey, String queryValue) {
List<Job> batchJobs = client.findJobs(newJobQuery(queryKey, queryValue)).getItemsList();
assertThat(batchJobs).hasSize(batchJobsWithCreatedTasks.size());
assertThat(batchJobs.stream().map(Job::getId)).containsAll(batchJobsWithCreatedTasks);
}
private void testServiceSearchBy(String queryKey, String queryValue) {
List<Job> serviceJobs = client.findJobs(newJobQuery(queryKey, queryValue)).getItemsList();
assertThat(serviceJobs).hasSize(serviceJobsWithCreatedTasks.size());
assertThat(serviceJobs.stream().map(Job::getId)).containsAll(serviceJobsWithCreatedTasks);
}
private void testSearchByJobGroupSequence(String expectedJobId, String sequence) {
List<Job> jobIds = client.findJobs(newJobQuery("jobGroupSequence", sequence)).getItemsList();
assertThat(jobIds).hasSize(1);
assertThat(jobIds.get(0).getId()).isEqualTo(expectedJobId);
}
@Test(timeout = 30_000)
public void testSearchByImageV3() {
testBatchSearchBy("imageName", BATCH_IMAGE_NAME);
testBatchSearchBy("imageTag", BATCH_IMAGE_TAG);
testServiceSearchBy("imageName", SERVICE_IMAGE_NAME);
testServiceSearchBy("imageTag", SERVICE_IMAGE_TAG);
}
@Test(timeout = 30_000)
public void testSearchByJobDescriptorAttributesV3() {
List<String> jobIds = new ArrayList<>();
for (int i = 0; i < 3; i++) {
JobDescriptor<BatchJobExt> jobDescriptor = BATCH_JOB_TEMPLATE.toBuilder()
.withApplicationName("testSearchByJobDescriptorAttributesV3")
.withAttributes(CollectionsExt.asMap(
String.format("job%d.key1", i), "value1",
String.format("job%d.key2", i), "value2"
))
.build();
String jobId = jobsScenarioBuilder.scheduleAndReturnJob(jobDescriptor, jobScenarioBuilder -> jobScenarioBuilder.template(ScenarioTemplates.launchJob())).getId();
jobIds.add(jobId);
}
String job0 = jobIds.get(0);
String task0 = jobsScenarioBuilder.takeJob(job0).getTaskByIndex(0).getTask().getId();
String job1 = jobIds.get(1);
String task1 = jobsScenarioBuilder.takeJob(job1).getTaskByIndex(0).getTask().getId();
// Jobs
JobQuery.Builder jobQueryBuilder = JobQuery.newBuilder()
.putFilteringCriteria("applicationName", "testSearchByJobDescriptorAttributesV3")
.setPage(PAGE);
assertContainsJobs(
client.findJobs(jobQueryBuilder
.putFilteringCriteria("attributes", "job0.key1,job1.key1")
.putFilteringCriteria("attributes.op", "or")
.build()
),
job0, job1
);
assertContainsJobs(
client.findJobs(jobQueryBuilder
.putFilteringCriteria("attributes", "job0.key1:value1,job0.key1:value2")
.putFilteringCriteria("attributes.op", "or")
.build()
),
job0
);
assertContainsJobs(
client.findJobs(jobQueryBuilder
.putFilteringCriteria("attributes", "job0.key1:value1,job0.key2:value2")
.putFilteringCriteria("attributes.op", "and")
.build()
),
job0
);
// Tasks
TaskQuery.Builder taskQueryBuilder = TaskQuery.newBuilder()
.putFilteringCriteria("applicationName", "testSearchByJobDescriptorAttributesV3")
.setPage(PAGE);
assertContainsTasks(
client.findTasks(taskQueryBuilder
.putFilteringCriteria("attributes", "job0.key1,job1.key1")
.putFilteringCriteria("attributes.op", "or")
.build()
),
task0, task1
);
assertContainsTasks(
client.findTasks(taskQueryBuilder
.putFilteringCriteria("attributes", "job0.key1:value1,job0.key1:value2")
.putFilteringCriteria("attributes.op", "or")
.build()
),
task0
);
assertContainsTasks(
client.findTasks(taskQueryBuilder
.putFilteringCriteria("attributes", "job0.key1:value1,job0.key2:value2")
.putFilteringCriteria("attributes.op", "and")
.build()
),
task0
);
}
@Test(timeout = 30_000)
public void testSearchByCellV3() {
final int numberOfJobs = 3;
String[] expectedJobIds = new String[numberOfJobs];
String[] expectedTaskIds = new String[numberOfJobs];
for (int i = 0; i < numberOfJobs; i++) {
String jobId = jobsScenarioBuilder.scheduleAndReturnJob(
BATCH_JOB_TEMPLATE.toBuilder().withApplicationName("testSearchByCellV3").build(),
jobScenarioBuilder -> jobScenarioBuilder.template(ScenarioTemplates.launchJob())
).getId();
expectedJobIds[i] = jobId;
expectedTaskIds[i] = jobsScenarioBuilder.takeJob(jobId).getTaskByIndex(0).getTask().getId();
}
// Jobs
JobQuery.Builder jobQueryBuilder = JobQuery.newBuilder()
.putFilteringCriteria("applicationName", "testSearchByCellV3")
.setPage(PAGE);
JobQueryResult jobs1 = client.findJobs(jobQueryBuilder
.putFilteringCriteria("attributes", "titus.cell,titus.stack")
.putFilteringCriteria("attributes.op", "or")
.build()
);
assertContainsJobs(jobs1, expectedJobIds);
jobs1.getItemsList().forEach(job -> assertCellInfo(job, EmbeddedTitusMaster.CELL_NAME));
JobQueryResult jobs2 = client.findJobs(jobQueryBuilder
.putFilteringCriteria("attributes", "titus.cell")
.putFilteringCriteria("attributes.op", "and")
.build()
);
assertContainsJobs(jobs2, expectedJobIds);
jobs2.getItemsList().forEach(job -> assertCellInfo(job, EmbeddedTitusMaster.CELL_NAME));
JobQueryResult jobs3 = client.findJobs(jobQueryBuilder
.putFilteringCriteria("attributes",
String.format("titus.cell:%1$s,titus.stack:%1$s", EmbeddedTitusMaster.CELL_NAME))
.putFilteringCriteria("attributes.op", "or")
.build()
);
assertContainsJobs(jobs3, expectedJobIds);
jobs3.getItemsList().forEach(job -> assertCellInfo(job, EmbeddedTitusMaster.CELL_NAME));
JobQueryResult jobs4 = client.findJobs(jobQueryBuilder
.putFilteringCriteria("attributes",
String.format("titus.cell:%1$s", EmbeddedTitusMaster.CELL_NAME))
.putFilteringCriteria("attributes.op", "and")
.build()
);
assertContainsJobs(jobs4, expectedJobIds);
jobs4.getItemsList().forEach(job -> assertCellInfo(job, EmbeddedTitusMaster.CELL_NAME));
// Tasks
TaskQuery.Builder taskQueryBuilder = TaskQuery.newBuilder()
.putFilteringCriteria("applicationName", "testSearchByCellV3")
.setPage(PAGE);
TaskQueryResult tasks1 = client.findTasks(taskQueryBuilder
.putFilteringCriteria("attributes", "titus.cell,titus.stack")
.putFilteringCriteria("attributes.op", "or")
.build()
);
assertContainsTasks(tasks1, expectedTaskIds);
tasks1.getItemsList().forEach(task -> assertCellInfo(task, EmbeddedTitusMaster.CELL_NAME));
TaskQueryResult tasks2 = client.findTasks(taskQueryBuilder
.putFilteringCriteria("attributes", "titus.cell")
.putFilteringCriteria("attributes.op", "and")
.build()
);
assertContainsTasks(tasks2, expectedTaskIds);
tasks2.getItemsList().forEach(task -> assertCellInfo(task, EmbeddedTitusMaster.CELL_NAME));
TaskQueryResult tasks3 = client.findTasks(taskQueryBuilder
.putFilteringCriteria("attributes",
String.format("titus.cell:%1$s,titus.stack:%1$s", EmbeddedTitusMaster.CELL_NAME))
.putFilteringCriteria("attributes.op", "or")
.build()
);
assertContainsTasks(tasks3, expectedTaskIds);
tasks3.getItemsList().forEach(task -> assertCellInfo(task, EmbeddedTitusMaster.CELL_NAME));
final TaskQueryResult tasks4 = client.findTasks(taskQueryBuilder
.putFilteringCriteria("attributes",
String.format("titus.cell:%1$s", EmbeddedTitusMaster.CELL_NAME))
.putFilteringCriteria("attributes.op", "and")
.build()
);
assertContainsTasks(tasks4, expectedTaskIds);
tasks4.getItemsList().forEach(task -> assertCellInfo(task, EmbeddedTitusMaster.CELL_NAME));
}
@Test(timeout = 30_000)
public void testPagination() {
// We have 3 batch and 3 service jobs.
Page firstPageOf5 = Page.newBuilder().setPageNumber(0).setPageSize(5).build();
Page secondPageOf5 = Page.newBuilder().setPageNumber(1).setPageSize(5).build();
// Jobs
JobQuery.Builder jobQueryBuilder = JobQuery.newBuilder().putFilteringCriteria("attributes", PRE_CREATED_JOBS_LABEL);
JobQueryResult jobQueryResult = client.findJobs(jobQueryBuilder.setPage(firstPageOf5).build());
assertThat(jobQueryResult.getItemsList()).hasSize(5);
checkPage(jobQueryResult.getPagination(), firstPageOf5, 2, 6, true);
JobQueryResult jobQueryResult2 = client.findJobs(jobQueryBuilder.setPage(secondPageOf5).build());
assertThat(jobQueryResult2.getItemsList()).hasSize(1);
checkPage(jobQueryResult2.getPagination(), secondPageOf5, 2, 6, false);
Set<String> foundJobIds = new HashSet<>();
jobQueryResult.getItemsList().forEach(j -> foundJobIds.add(j.getId()));
jobQueryResult2.getItemsList().forEach(j -> foundJobIds.add(j.getId()));
assertThat(foundJobIds).hasSize(6);
// Tasks
TaskQuery.Builder taskQueryBuilder = TaskQuery.newBuilder().putFilteringCriteria("attributes", PRE_CREATED_JOBS_LABEL);
TaskQueryResult taskQueryResult = client.findTasks(taskQueryBuilder.setPage(firstPageOf5).build());
assertThat(taskQueryResult.getItemsList()).hasSize(5);
checkPage(taskQueryResult.getPagination(), firstPageOf5, 2, 6, true);
TaskQueryResult taskQueryResult2 = client.findTasks(taskQueryBuilder.setPage(secondPageOf5).build());
assertThat(taskQueryResult2.getItemsList()).hasSize(1);
checkPage(taskQueryResult2.getPagination(), secondPageOf5, 2, 6, false);
Set<String> foundTasksIds = new HashSet<>();
taskQueryResult.getItemsList().forEach(j -> foundTasksIds.add(j.getId()));
taskQueryResult2.getItemsList().forEach(j -> foundTasksIds.add(j.getId()));
assertThat(foundTasksIds).hasSize(6);
}
private void checkPage(Pagination pagination, Page current, int totalPages, int totalItems, boolean hasMore) {
assertThat(pagination.getCurrentPage()).isEqualTo(current);
assertThat(pagination.getTotalPages()).isEqualTo(totalPages);
assertThat(pagination.getTotalItems()).isEqualTo(totalItems);
assertThat(pagination.getHasMore()).isEqualTo(hasMore);
}
@Test(timeout = 30_000)
public void testFieldsFiltering() {
JobDescriptor<BatchJobExt> jobDescriptor = BATCH_JOB_TEMPLATE.toBuilder()
.withApplicationName("testFieldsFiltering")
.withAttributes(ImmutableMap.of("keyA", "valueA", "keyB", "valueB"))
.build();
jobsScenarioBuilder.schedule(jobDescriptor, jobScenarioBuilder -> jobScenarioBuilder
.template(ScenarioTemplates.startTasksInNewJob())
);
// Check jobs
List<Job> foundJobs = client.findJobs(JobQuery.newBuilder()
.putFilteringCriteria("applicationName", "testFieldsFiltering")
.addFields("status")
.addFields("jobDescriptor.attributes.keyA")
.setPage(PAGE)
.build()
).getItemsList();
assertThat(foundJobs).hasSize(1);
assertThat(foundJobs.get(0).getId()).isNotEmpty(); // Always present
assertThat(foundJobs.get(0).getStatus().getReasonMessage()).isNotEmpty();
com.netflix.titus.grpc.protogen.JobDescriptor foundJobDescriptor = foundJobs.get(0).getJobDescriptor();
assertThat(foundJobDescriptor.getAttributesMap()).isNotEmpty();
assertThat(foundJobDescriptor.getAttributesMap()).containsEntry("keyA", "valueA");
// Check tasks
List<Task> foundTasks = client.findTasks(TaskQuery.newBuilder()
.putFilteringCriteria("applicationName", "testFieldsFiltering")
.addFields("status")
.addFields("statusHistory")
.setPage(PAGE)
.build()
).getItemsList();
assertThat(foundTasks).hasSize(1);
assertThat(foundTasks.get(0).getId()).isNotEmpty(); // Always present
assertThat(foundTasks.get(0).getStatus().getReasonMessage()).isNotEmpty();
assertThat(foundTasks.get(0).getStatusHistoryList()).isNotEmpty();
assertThat(foundTasks.get(0).getTaskContextMap()).isEmpty();
}
private JobQuery newJobQuery(String... criteria) {
return JobQuery.newBuilder()
.putAllFilteringCriteria(CollectionsExt.asMap(criteria))
.setPage(PAGE)
.build();
}
private void assertContainsJobs(JobQueryResult queryResult, String... jobIds) {
assertThat(queryResult.getItemsCount()).isEqualTo(jobIds.length);
Set<String> returnedJobIds = queryResult.getItemsList().stream().map(Job::getId).collect(Collectors.toSet());
assertThat(returnedJobIds).contains(jobIds);
}
private void assertContainsTasks(TaskQueryResult queryResult, String... taskIds) {
assertThat(queryResult.getItemsCount()).isEqualTo(taskIds.length);
Set<String> returnedTaskIds = queryResult.getItemsList().stream().map(com.netflix.titus.grpc.protogen.Task::getId).collect(Collectors.toSet());
assertThat(returnedTaskIds).contains(taskIds);
}
}
| 9,970 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/query/JobObserveTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.query;
import java.util.Arrays;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import com.google.common.collect.ImmutableMap;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobModel;
import com.netflix.titus.api.jobmanager.model.job.Owner;
import com.netflix.titus.grpc.protogen.Image;
import com.netflix.titus.grpc.protogen.Job;
import com.netflix.titus.grpc.protogen.JobChangeNotification;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc.JobManagementServiceStub;
import com.netflix.titus.grpc.protogen.JobStatus.JobState;
import com.netflix.titus.grpc.protogen.ObserveJobsQuery;
import com.netflix.titus.grpc.protogen.Task;
import com.netflix.titus.grpc.protogen.TaskStatus.TaskState;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.master.integration.v3.job.CellAssertions;
import com.netflix.titus.master.integration.v3.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.integration.v3.scenario.ScenarioTemplates;
import com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCells;
import com.netflix.titus.testkit.embedded.cell.master.EmbeddedTitusMaster;
import com.netflix.titus.testkit.grpc.TestStreamObserver;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.RuleChain;
import static com.netflix.titus.grpc.protogen.JobDescriptor.JobSpecCase.SERVICE;
import static com.netflix.titus.master.integration.v3.scenario.ScenarioTemplates.killJob;
import static com.netflix.titus.master.integration.v3.scenario.ScenarioTemplates.startTasksInNewJob;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.batchJobDescriptors;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskBatchJobDescriptor;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.serviceJobDescriptors;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.fail;
@Category(IntegrationTest.class)
public class JobObserveTest extends BaseIntegrationTest {
private final TitusStackResource titusStackResource = new TitusStackResource(EmbeddedTitusCells.basicKubeCell(4));
private final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder(titusStackResource);
@Rule
public RuleChain ruleChain = RuleChain.outerRule(titusStackResource).around(jobsScenarioBuilder);
private JobManagementServiceStub client;
@Before
public void setUp() throws Exception {
this.client = titusStackResource.getGateway().getV3GrpcClient();
}
@Test(timeout = LONG_TEST_TIMEOUT_MS)
public void observeJobs() throws InterruptedException {
TestStreamObserver<JobChangeNotification> subscriber = subscribe(ObserveJobsQuery.getDefaultInstance());
for (int i = 0; i < 2; i++) {
String jobId = jobsScenarioBuilder.scheduleAndReturnJob(oneTaskBatchJobDescriptor(), jobScenarioBuilder -> jobScenarioBuilder
.template(startTasksInNewJob())
.template(ScenarioTemplates.killJob())
).getId();
JobChangeNotification event;
while (true) {
event = subscriber.takeNext(SHORT_TIMEOUT_MS, TimeUnit.MILLISECONDS);
if (event.hasJobUpdate()) {
Job job = event.getJobUpdate().getJob();
assertThat(job.getId()).isEqualTo(jobId);
CellAssertions.assertCellInfo(job, EmbeddedTitusMaster.CELL_NAME);
if (job.getStatus().getState() == JobState.Finished) {
break;
}
}
}
}
subscriber.cancel();
}
@Test(timeout = LONG_TEST_TIMEOUT_MS)
public void observeSnapshotWithFilter() throws InterruptedException {
startAll(
batchJobDescriptors().getValue().toBuilder().withApplicationName("myApp").build(),
batchJobDescriptors().getValue().toBuilder().withApplicationName("otherApp").build()
);
String myAppJobId = jobsScenarioBuilder.takeJobId(0);
// start the stream after tasks are already running
ObserveJobsQuery query = ObserveJobsQuery.newBuilder().putFilteringCriteria("applicationName", "myApp").build();
TestStreamObserver<JobChangeNotification> subscriber = subscribe(query);
assertNextIsJobEvent(subscriber, job -> assertThat(job.getJobDescriptor().getApplicationName()).isEqualTo("myApp"));
assertNextIsTaskEvent(subscriber, task -> assertThat(task.getJobId()).isEqualTo(myAppJobId));
assertNextIsSnapshot(subscriber);
}
@Test(timeout = LONG_TEST_TIMEOUT_MS)
public void observeByJobDescriptor() throws InterruptedException {
startAll(
batchJobDescriptors().getValue().toBuilder().withApplicationName("myApp").build(),
batchJobDescriptors().getValue().toBuilder()
.withApplicationName("otherApp")
.withOwner(Owner.newBuilder().withTeamEmail("me@netflix.com").build())
.build(),
batchJobDescriptors().getValue().but(j -> j.getContainer().toBuilder().withImage(
JobModel.newImage().withName("some/image").withTag("stable").build()
)),
batchJobDescriptors().getValue().toBuilder()
.withAttributes(ImmutableMap.<String, String>builder()
.put("attr1", "value1")
.put("attr2", "value2")
.build())
.build()
);
startAll(serviceJobDescriptors().getValue());
observeByJobDescriptor(
jobsScenarioBuilder.takeJobId(0),
ObserveJobsQuery.newBuilder().putFilteringCriteria("applicationName", "myApp").build(),
job -> assertThat(job.getJobDescriptor().getApplicationName()).isEqualTo("myApp")
);
observeByJobDescriptor(
jobsScenarioBuilder.takeJobId(1),
ObserveJobsQuery.newBuilder().putFilteringCriteria("owner", "me@netflix.com").build(),
job -> assertThat(job.getJobDescriptor().getOwner().getTeamEmail()).isEqualTo("me@netflix.com")
);
observeByJobDescriptor(
jobsScenarioBuilder.takeJobId(2),
ObserveJobsQuery.newBuilder().putFilteringCriteria("imageName", "some/image").putFilteringCriteria("imageTag", "stable").build(),
job -> {
Image image = job.getJobDescriptor().getContainer().getImage();
assertThat(image.getName()).isEqualTo("some/image");
assertThat(image.getTag()).isEqualTo("stable");
}
);
observeByJobDescriptor(
jobsScenarioBuilder.takeJobId(3),
ObserveJobsQuery.newBuilder().putFilteringCriteria("attributes", "attr1,attr2:value2").putFilteringCriteria("attributes.op", "and").build(),
job -> assertThat(job.getJobDescriptor().getAttributesMap())
.containsKey("attr1")
.containsEntry("attr2", "value2")
);
observeByJobDescriptor(
jobsScenarioBuilder.takeJobId(4),
ObserveJobsQuery.newBuilder().putFilteringCriteria("jobType", "service").build(),
job -> assertThat(job.getJobDescriptor().getJobSpecCase()).isEqualTo(SERVICE)
);
}
private void observeByJobDescriptor(String jobId, ObserveJobsQuery query, Consumer<Job> check) throws InterruptedException {
TestStreamObserver<JobChangeNotification> subscriber = subscribe(query);
JobChangeNotification event;
while ((event = subscriber.takeNext(SHORT_TIMEOUT_MS, TimeUnit.MILLISECONDS)) != null) {
if (event.hasJobUpdate()) {
Job job = event.getJobUpdate().getJob();
assertThat(job.getId()).isEqualTo(jobId);
check.accept(job);
return;
}
}
fail(String.format("Expected job event not found: jobId=%s, query=%s", jobId, query));
}
@Test(timeout = LONG_TEST_TIMEOUT_MS)
public void observeByStates() throws InterruptedException {
TestStreamObserver<JobChangeNotification> subscriberWithJobFilter = subscribe(
ObserveJobsQuery.newBuilder().putFilteringCriteria("jobState", JobState.KillInitiated.toString()).build()
);
TestStreamObserver<JobChangeNotification> subscriberWithTaskFilter = subscribe(
ObserveJobsQuery.newBuilder()
.putFilteringCriteria("taskStates", String.join(",", Arrays.asList(
TaskState.Launched.toString(),
TaskState.Started.toString()
)))
.build()
);
assertNextIsSnapshot(subscriberWithJobFilter);
assertNextIsSnapshot(subscriberWithTaskFilter);
jobsScenarioBuilder.schedule(batchJobDescriptors().getValue(), jobScenarioBuilder -> jobScenarioBuilder
.template(startTasksInNewJob())
.template(killJob())
);
assertNextIsJobEvent(subscriberWithJobFilter, job -> assertThat(job.getStatus().getState()).isEqualTo(JobState.KillInitiated));
assertNextIsTaskEvent(subscriberWithJobFilter, task -> assertThat(task.getStatus().getState()).isEqualTo(TaskState.KillInitiated));
assertNextIsTaskEvent(subscriberWithTaskFilter, task -> assertThat(task.getStatus().getState()).isEqualTo(TaskState.Launched));
assertNextIsTaskEvent(subscriberWithTaskFilter, task -> assertThat(task.getStatus().getState()).isEqualTo(TaskState.Started));
assertNextIsJobEvent(subscriberWithTaskFilter, job -> assertThat(job.getStatus().getState()).isEqualTo(JobState.KillInitiated));
}
private TestStreamObserver<JobChangeNotification> subscribe(ObserveJobsQuery query) {
TestStreamObserver<JobChangeNotification> eventObserver = new TestStreamObserver<>();
client.observeJobs(query, eventObserver);
return eventObserver;
}
@SafeVarargs
private final <E extends JobDescriptor.JobDescriptorExt> void startAll(JobDescriptor<E>... descriptors) {
for (JobDescriptor<E> descriptor : descriptors) {
jobsScenarioBuilder.schedule(descriptor, jobScenarioBuilder -> jobScenarioBuilder.template(ScenarioTemplates.startTasksInNewJob()));
}
}
private void assertNextIsSnapshot(TestStreamObserver<JobChangeNotification> subscriber) throws InterruptedException {
assertThat(subscriber.takeNext(SHORT_TIMEOUT_MS, TimeUnit.MILLISECONDS).hasSnapshotEnd()).isTrue();
}
private void assertNextIsJobEvent(TestStreamObserver<JobChangeNotification> subscriber, Consumer<Job> check) throws InterruptedException {
JobChangeNotification event = subscriber.takeNext(SHORT_TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertThat(event.hasJobUpdate()).isTrue();
check.accept(event.getJobUpdate().getJob());
}
private void assertNextIsTaskEvent(TestStreamObserver<JobChangeNotification> subscriber, Consumer<Task> check) throws InterruptedException {
JobChangeNotification event = subscriber.takeNext(SHORT_TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertThat(event.hasTaskUpdate()).isTrue();
check.accept(event.getTaskUpdate().getTask());
}
}
| 9,971 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/query/JobCursorQueryWithUpdatesTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.query;
import java.util.List;
import com.netflix.titus.api.jobmanager.model.job.Capacity;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.grpc.protogen.Job;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc;
import com.netflix.titus.grpc.protogen.JobQuery;
import com.netflix.titus.grpc.protogen.JobQueryResult;
import com.netflix.titus.grpc.protogen.Page;
import com.netflix.titus.grpc.protogen.Task;
import com.netflix.titus.grpc.protogen.TaskQuery;
import com.netflix.titus.grpc.protogen.TaskQueryResult;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.master.integration.v3.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.integration.v3.scenario.ScenarioTemplates;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import com.netflix.titus.testkit.model.job.JobDescriptorGenerator;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.RuleChain;
import static com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCells.basicKubeCell;
import static org.assertj.core.api.Assertions.assertThat;
@Category(IntegrationTest.class)
public class JobCursorQueryWithUpdatesTest extends BaseIntegrationTest {
private static final int JOBS_PER_ENGINE = 3;
private static final int TASKS_PER_JOB = 2;
private final TitusStackResource titusStackResource = new TitusStackResource(basicKubeCell(10));
private final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder(titusStackResource);
@Rule
public final RuleChain ruleChain = RuleChain.outerRule(titusStackResource).around(jobsScenarioBuilder);
private JobManagementServiceGrpc.JobManagementServiceBlockingStub client;
private List<Job> allJobsInOrder;
private List<Task> allTasksInOrder;
@Before
public void setUp() throws Exception {
client = titusStackResource.getGateway().getV3BlockingGrpcClient();
JobDescriptor<ServiceJobExt> jobDescriptor = JobDescriptorGenerator.oneTaskServiceJobDescriptor()
.but(jd -> jd.getExtensions().toBuilder().withCapacity(
Capacity.newBuilder().withMin(0).withDesired(TASKS_PER_JOB).withMax(TASKS_PER_JOB).build()
).build());
jobsScenarioBuilder.schedule(jobDescriptor.toBuilder().withApplicationName("app1").build(), JOBS_PER_ENGINE, ScenarioTemplates.startTasksInNewJob());
jobsScenarioBuilder.schedule(jobDescriptor.toBuilder().withApplicationName("app2").build(), JOBS_PER_ENGINE, ScenarioTemplates.startTasksInNewJob());
this.allJobsInOrder = client.findJobs(JobQuery.newBuilder().setPage(Page.newBuilder().setPageSize(Integer.MAX_VALUE / 2)).build()).getItemsList();
assertThat(allJobsInOrder).hasSize(2 * JOBS_PER_ENGINE);
this.allTasksInOrder = client.findTasks(TaskQuery.newBuilder().setPage(Page.newBuilder().setPageSize(Integer.MAX_VALUE / 2)).build()).getItemsList();
assertThat(allTasksInOrder).hasSize(2 * JOBS_PER_ENGINE * TASKS_PER_JOB);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testJobQueryWithRemovedItems() {
// Page 0
JobQueryResult result0 = client.findJobs(JobQuery.newBuilder()
.setPage(Page.newBuilder().setPageSize(2)).build()
);
assertThat(result0.getItemsList()).containsExactlyElementsOf(allJobsInOrder.subList(0, 2));
// Remove item at the cursor position
jobsScenarioBuilder.takeJob(result0.getItems(1).getId()).template(ScenarioTemplates.killJob());
JobQueryResult result1 = client.findJobs(JobQuery.newBuilder()
.setPage(Page.newBuilder().setPageSize(2).setCursor(result0.getPagination().getCursor())).build()
);
assertThat(result1.getItemsList()).containsExactlyElementsOf(allJobsInOrder.subList(2, 4));
// Remove last items
jobsScenarioBuilder.takeJob(allJobsInOrder.get(4).getId()).template(ScenarioTemplates.killJob());
jobsScenarioBuilder.takeJob(allJobsInOrder.get(5).getId()).template(ScenarioTemplates.killJob());
JobQueryResult result2 = client.findJobs(JobQuery.newBuilder()
.setPage(Page.newBuilder().setPageSize(2).setCursor(result1.getPagination().getCursor())).build()
);
assertThat(result2.getItemsList()).isEmpty();
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testTaskQueryWithRemovedItems() {
// Page 0
TaskQueryResult result0 = client.findTasks(TaskQuery.newBuilder()
.setPage(Page.newBuilder().setPageSize(4)).build()
);
assertThat(result0.getItemsList()).containsExactlyElementsOf(allTasksInOrder.subList(0, 4));
// Remove item at the cursor position
jobsScenarioBuilder.takeJob(result0.getItems(3).getJobId())
.getTask(result0.getItems(3).getId()).template(ScenarioTemplates.terminateAndShrinkV3());
TaskQueryResult result1 = client.findTasks(TaskQuery.newBuilder()
.setPage(Page.newBuilder().setPageSize(4).setCursor(result0.getPagination().getCursor())).build()
);
assertThat(result1.getItemsList()).containsExactlyElementsOf(allTasksInOrder.subList(4, 8));
// Remove last items
jobsScenarioBuilder.takeJob(allTasksInOrder.get(10).getJobId())
.getTask(allTasksInOrder.get(10).getId()).template(ScenarioTemplates.terminateAndShrinkV3());
jobsScenarioBuilder.takeJob(allTasksInOrder.get(11).getJobId())
.getTask(allTasksInOrder.get(11).getId()).template(ScenarioTemplates.terminateAndShrinkV3());
TaskQueryResult result2 = client.findTasks(TaskQuery.newBuilder()
.setPage(Page.newBuilder().setPageSize(2).setCursor(result1.getPagination().getCursor())).build()
);
assertThat(result2.getItemsList()).containsExactlyElementsOf(allTasksInOrder.subList(8, 10));
}
}
| 9,972 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/job/query/JobDirectQueryTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.job.query;
import com.netflix.titus.api.jobmanager.model.job.JobState;
import com.netflix.titus.grpc.protogen.Job;
import com.netflix.titus.grpc.protogen.JobId;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc;
import com.netflix.titus.grpc.protogen.Task;
import com.netflix.titus.grpc.protogen.TaskId;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.master.integration.v3.job.CellAssertions;
import com.netflix.titus.master.integration.v3.scenario.JobsScenarioBuilder;
import com.netflix.titus.master.integration.v3.scenario.ScenarioTemplates;
import com.netflix.titus.testkit.embedded.cell.master.EmbeddedTitusMaster;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.RuleChain;
import static com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCells.basicKubeCell;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskBatchJobDescriptor;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskServiceJobDescriptor;
import static org.assertj.core.api.Assertions.assertThat;
@Category(IntegrationTest.class)
public class JobDirectQueryTest extends BaseIntegrationTest {
private static final String NON_EXISTING_V3_ID = "non_existing_id";
private static final TitusStackResource titusStackResource = new TitusStackResource(basicKubeCell(2));
private static final JobsScenarioBuilder jobsScenarioBuilder = new JobsScenarioBuilder(titusStackResource);
@ClassRule
public static final RuleChain ruleChain = RuleChain.outerRule(titusStackResource).around(jobsScenarioBuilder);
private static JobManagementServiceGrpc.JobManagementServiceBlockingStub client;
private static String v3BatchJobId;
private static String v3BatchTaskId;
private static String v3ArchivedBatchJobId;
private static String v3ArchivedBatchTaskId;
private static String v3ServiceJobId;
@BeforeClass
public static void setUp() throws Exception {
client = titusStackResource.getGateway().getV3BlockingGrpcClient();
// Batch Jobs
jobsScenarioBuilder.schedule(
oneTaskBatchJobDescriptor(),
jobScenarioBuilder -> jobScenarioBuilder.template(ScenarioTemplates.startTasksInNewJob())
);
jobsScenarioBuilder.schedule(
oneTaskBatchJobDescriptor(),
jobScenarioBuilder -> jobScenarioBuilder.template(ScenarioTemplates.startTasksInNewJob())
.allTasks(ScenarioTemplates.completeTask())
.expectJobUpdateEvent(job -> job.getStatus().getState() == JobState.Finished, "Expected job to finish")
);
//Service Jobs
jobsScenarioBuilder.schedule(
oneTaskServiceJobDescriptor(),
jobScenarioBuilder -> jobScenarioBuilder.template(ScenarioTemplates.startTasksInNewJob())
);
v3BatchJobId = jobsScenarioBuilder.takeJobId(0);
v3BatchTaskId = jobsScenarioBuilder.takeTaskId(0, 0);
v3ArchivedBatchJobId = jobsScenarioBuilder.takeJobId(1);
v3ArchivedBatchTaskId = jobsScenarioBuilder.takeTaskId(1, 0);
v3ServiceJobId = jobsScenarioBuilder.takeJobId(2);
}
@Test(timeout = 30_000)
public void testFindBatchJobByIdV3() throws Exception {
testFindBatchJob(v3BatchJobId);
}
@Test(timeout = 30_000)
public void testFindArchivedBatchJobByIdV3() throws Exception {
testFindBatchJob(v3ArchivedBatchJobId);
}
private void testFindBatchJob(String jobId) {
Job job = client.findJob(JobId.newBuilder().setId(jobId).build());
assertThat(job.getId()).isEqualTo(jobId);
CellAssertions.assertCellInfo(job, EmbeddedTitusMaster.CELL_NAME);
}
@Test(timeout = 30_000)
public void testFindServiceJobByIdV3() throws Exception {
testFindServiceJob(v3ServiceJobId);
}
private void testFindServiceJob(String jobId) {
Job job = client.findJob(JobId.newBuilder().setId(jobId).build());
assertThat(job.getId()).isEqualTo(jobId);
assertThat(job.getJobDescriptor().getContainer().getResources().getAllocateIP()).isTrue();
CellAssertions.assertCellInfo(job, EmbeddedTitusMaster.CELL_NAME);
}
@Test(timeout = 30_000)
public void testFindNonExistingJobByIdV3() throws Exception {
try {
client.findJob(JobId.newBuilder().setId(NON_EXISTING_V3_ID).build());
} catch (Exception e) {
assertThat(e.getMessage()).contains(NON_EXISTING_V3_ID);
}
}
@Test(timeout = 30_000)
public void testFindTaskByIdV3() throws Exception {
Task task = client.findTask(TaskId.newBuilder().setId(v3BatchTaskId).build());
assertThat(task.getId()).isEqualTo(v3BatchTaskId);
}
@Test(timeout = 30_000)
public void testFindArchivedTaskByIdV3() throws Exception {
Task task = client.findTask(TaskId.newBuilder().setId(v3ArchivedBatchTaskId).build());
assertThat(task.getId()).isEqualTo(v3ArchivedBatchTaskId);
}
@Test(timeout = 30_000)
public void testFindNonExistingTaskByIdV3() throws Exception {
try {
client.findTask(TaskId.newBuilder().setId(NON_EXISTING_V3_ID).build());
} catch (Exception e) {
assertThat(e.getMessage()).contains(NON_EXISTING_V3_ID);
}
}
}
| 9,973 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/appscale/AutoScalingGrpcTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.appscale;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import com.google.protobuf.DoubleValue;
import com.google.protobuf.Empty;
import com.netflix.titus.api.appscale.model.PolicyType;
import com.netflix.titus.grpc.protogen.AutoScalingServiceGrpc;
import com.netflix.titus.grpc.protogen.DeletePolicyRequest;
import com.netflix.titus.grpc.protogen.GetPolicyResult;
import com.netflix.titus.grpc.protogen.JobId;
import com.netflix.titus.grpc.protogen.PutPolicyRequest;
import com.netflix.titus.grpc.protogen.ScalingPolicyID;
import com.netflix.titus.grpc.protogen.ScalingPolicyStatus;
import com.netflix.titus.master.appscale.endpoint.v3.grpc.AutoScalingTestUtils;
import com.netflix.titus.master.appscale.service.AutoScalingPolicyTests;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCells;
import com.netflix.titus.testkit.grpc.TestStreamObserver;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static com.netflix.titus.grpc.protogen.ScalingPolicyStatus.ScalingPolicyState.Deleted;
import static com.netflix.titus.grpc.protogen.ScalingPolicyStatus.ScalingPolicyState.Deleting;
import static org.assertj.core.api.AssertionsForClassTypes.assertThat;
@Category(IntegrationTest.class)
public class AutoScalingGrpcTest extends BaseIntegrationTest {
private static Logger log = LoggerFactory.getLogger(AutoScalingGrpcTest.class);
private AutoScalingServiceGrpc.AutoScalingServiceStub client;
private static final long TIMEOUT_MS = 30_000;
@Rule
public final TitusStackResource titusStackResource = new TitusStackResource(EmbeddedTitusCells.basicKubeCell(2));
@Before
public void setUp() throws Exception {
client = titusStackResource.getGateway().getAutoScaleGrpcClient();
}
/**
* Test that we can retrieve a policy by a specific ID.
*
* @throws Exception
*/
@Test(timeout = TEST_TIMEOUT_MS)
public void testGetPolicyById() throws Exception {
String jobId = "Titus-123";
PutPolicyRequest putPolicyRequest = AutoScalingTestUtils.generatePutPolicyRequest(jobId, PolicyType.StepScaling);
TestStreamObserver<ScalingPolicyID> putResponse = new TestStreamObserver<>();
client.setAutoScalingPolicy(putPolicyRequest, putResponse);
ScalingPolicyID scalingPolicyID = putResponse.takeNext(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertThat(!scalingPolicyID.getId().isEmpty());
log.info("Put policy {} with ID {}", putPolicyRequest, scalingPolicyID);
JobId getPolicyRequest = JobId.newBuilder().setId(jobId).build();
TestStreamObserver<GetPolicyResult> getResponse = new TestStreamObserver<>();
client.getJobScalingPolicies(getPolicyRequest, getResponse);
GetPolicyResult getPolicyResult = getResponse.takeNext(TIMEOUT_MS, TimeUnit.MILLISECONDS);
log.info("Got result {}", getPolicyResult);
assertThat(getPolicyResult.getItemsCount()).isEqualTo(1);
assertThat(getPolicyResult.getItems(0).getId()).isEqualTo(scalingPolicyID);
assertThat(getPolicyResult.getItems(0).getJobId()).isEqualTo(jobId);
}
/**
* Test that a policy can be deleted.
*
* @throws Exception
*/
@Test(timeout = TEST_TIMEOUT_MS)
public void testDeletePolicyById() throws Exception {
// Put a policy
String jobId = "Titus-1";
PutPolicyRequest putPolicyRequest = AutoScalingTestUtils.generatePutPolicyRequest(jobId, PolicyType.StepScaling);
TestStreamObserver<ScalingPolicyID> putResponse = new TestStreamObserver<>();
client.setAutoScalingPolicy(putPolicyRequest, putResponse);
ScalingPolicyID scalingPolicyID = putResponse.takeNext(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertThat(!scalingPolicyID.getId().isEmpty());
// Delete the policy
TestStreamObserver<Empty> deletePolicyResult = new TestStreamObserver<>();
DeletePolicyRequest deletePolicyRequest = DeletePolicyRequest.newBuilder().setId(scalingPolicyID).build();
client.deleteAutoScalingPolicy(deletePolicyRequest, deletePolicyResult);
deletePolicyResult.awaitDone();
assertThat(deletePolicyResult.hasError()).isFalse();
// Make sure it's set to Deleting or Deleted state
TestStreamObserver<GetPolicyResult> getResponse = new TestStreamObserver<>();
client.getScalingPolicy(scalingPolicyID, getResponse);
GetPolicyResult getPolicyResult = getResponse.takeNext(TIMEOUT_MS, TimeUnit.MILLISECONDS);
log.info("Got result {}", getPolicyResult);
assertThat(getPolicyResult.getItemsCount()).isEqualTo(1);
assertThat(isDeletingState(getPolicyResult.getItems(0).getPolicyState()));
}
private static boolean isDeletingState(ScalingPolicyStatus status) {
if (status.getState() == Deleted || status.getState() == Deleting) {
return true;
}
return false;
}
/**
* Test that a non-existent job returns an empty list of policies.
*
* @throws Exception
*/
@Test(timeout = TEST_TIMEOUT_MS)
public void testGetNonexistentJob() throws Exception {
String jobId = "Titus-0";
JobId getPolicyRequest = JobId.newBuilder().setId(jobId).build();
TestStreamObserver<GetPolicyResult> getResponse = new TestStreamObserver<>();
client.getJobScalingPolicies(getPolicyRequest, getResponse);
GetPolicyResult getPolicyResult = getResponse.takeNext(TIMEOUT_MS, TimeUnit.MILLISECONDS);
log.info("Got result {}", getPolicyResult);
assertThat(getPolicyResult.getItemsCount()).isEqualTo(0);
}
/**
* Test that a non-exitent policy returns an empty list of policies.
*
* @throws Exception
*/
@Test(timeout = TEST_TIMEOUT_MS)
@Ignore // GRPC request/response semantics requires that a value is always returned
public void testGetNonexistentPolicy() throws Exception {
ScalingPolicyID scalingPolicyID = ScalingPolicyID.newBuilder().setId("deadbeef").build();
TestStreamObserver<GetPolicyResult> getResponse = new TestStreamObserver<>();
client.getScalingPolicy(scalingPolicyID, getResponse);
getResponse.awaitDone();
assertThat(getResponse.getEmittedItems().size()).isEqualTo(0);
assertThat(getResponse.hasError()).isFalse();
}
/**
* Test that we can get multiple exceptions.
*
* @throws Exception
*/
@Test(timeout = TEST_TIMEOUT_MS)
public void getAllPolicies() throws Exception {
Set<ScalingPolicyID> policyIDSet = new HashSet<>();
int numJobs = 2;
for (int i = 1; i <= numJobs; i++) {
PutPolicyRequest putPolicyRequest = AutoScalingTestUtils.generatePutPolicyRequest("Titus-" + i, PolicyType.StepScaling);
TestStreamObserver<ScalingPolicyID> putResponse = new TestStreamObserver<>();
client.setAutoScalingPolicy(putPolicyRequest, putResponse);
ScalingPolicyID scalingPolicyID = putResponse.takeNext(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertThat(!scalingPolicyID.getId().isEmpty());
policyIDSet.add(scalingPolicyID);
}
TestStreamObserver<GetPolicyResult> getResponse = new TestStreamObserver<>();
client.getAllScalingPolicies(Empty.newBuilder().build(), getResponse);
GetPolicyResult getPolicyResult = getResponse.takeNext(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertThat(getPolicyResult.getItemsCount()).isEqualTo(numJobs);
getPolicyResult.getItemsList().forEach(scalingPolicyResult -> {
assertThat(policyIDSet.contains(scalingPolicyResult.getId())).isTrue();
});
}
/**
* Test policy configuration update for target tracking policy
*
* @throws Exception
*/
@Test(timeout = TEST_TIMEOUT_MS)
public void testUpdatePolicyConfigurationForTargetTracking() throws Exception {
String jobId = "Titus-123";
PutPolicyRequest putPolicyRequest = AutoScalingTestUtils.generatePutPolicyRequest(jobId, PolicyType.TargetTrackingScaling);
TestStreamObserver<ScalingPolicyID> putResponse = new TestStreamObserver<>();
client.setAutoScalingPolicy(putPolicyRequest, putResponse);
ScalingPolicyID scalingPolicyID = putResponse.takeNext(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertThat(!scalingPolicyID.getId().isEmpty());
TestStreamObserver<Empty> updateResponse = new TestStreamObserver<>();
client.updateAutoScalingPolicy(
AutoScalingTestUtils.generateUpdateTargetTrackingPolicyRequest(scalingPolicyID.getId(), 100.0),
updateResponse);
updateResponse.awaitDone();
AutoScalingPolicyTests.waitForCondition(() -> {
TestStreamObserver<GetPolicyResult> getResponse = new TestStreamObserver<>();
client.getScalingPolicy(scalingPolicyID, getResponse);
try {
GetPolicyResult getPolicyResult = getResponse.takeNext(TIMEOUT_MS, TimeUnit.MILLISECONDS);
return getPolicyResult.getItemsCount() == 1 &&
getPolicyResult.getItems(0).getScalingPolicy().getTargetPolicyDescriptor().getTargetValue().getValue() == 100.0;
} catch (Exception ignored) {
}
return false;
});
TestStreamObserver<GetPolicyResult> getResponse = new TestStreamObserver<>();
client.getScalingPolicy(scalingPolicyID, getResponse);
getResponse.awaitDone();
GetPolicyResult getPolicyResult = getResponse.takeNext(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertThat(getPolicyResult.getItemsCount()).isEqualTo(1);
DoubleValue targetValue = getPolicyResult.getItems(0).getScalingPolicy().getTargetPolicyDescriptor().getTargetValue();
assertThat(targetValue.getValue()).isEqualTo(100.0);
}
/**
* Test policy configuration update for target tracking policy
*
* @throws Exception
*/
@Test(timeout = TEST_TIMEOUT_MS)
public void testUpdatePolicyConfigurationForStepScaling() throws Exception {
String jobId = "Titus-123";
PutPolicyRequest putPolicyRequest = AutoScalingTestUtils.generatePutPolicyRequest(jobId, PolicyType.StepScaling);
TestStreamObserver<ScalingPolicyID> putResponse = new TestStreamObserver<>();
client.setAutoScalingPolicy(putPolicyRequest, putResponse);
putResponse.awaitDone();
ScalingPolicyID scalingPolicyID = putResponse.takeNext(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertThat(!scalingPolicyID.getId().isEmpty());
TestStreamObserver<Empty> updateResponse = new TestStreamObserver<>();
client.updateAutoScalingPolicy(
AutoScalingTestUtils.generateUpdateStepScalingPolicyRequest(scalingPolicyID.getId(), 100.0),
updateResponse);
updateResponse.awaitDone();
AutoScalingPolicyTests.waitForCondition(() -> {
TestStreamObserver<GetPolicyResult> getResponse = new TestStreamObserver<>();
client.getScalingPolicy(scalingPolicyID, getResponse);
try {
GetPolicyResult getPolicyResult = getResponse.takeNext(TIMEOUT_MS, TimeUnit.MILLISECONDS);
return getPolicyResult.getItemsCount() == 1 &&
getPolicyResult.getItems(0).getScalingPolicy().getStepPolicyDescriptor().getAlarmConfig().getThreshold().getValue() == 100.0;
} catch (Exception ignored) {
}
return false;
});
TestStreamObserver<GetPolicyResult> getResponse = new TestStreamObserver<>();
client.getScalingPolicy(scalingPolicyID, getResponse);
getResponse.awaitDone();
GetPolicyResult getPolicyResult = getResponse.takeNext(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertThat(getPolicyResult.getItemsCount()).isEqualTo(1);
DoubleValue threshold = getPolicyResult.getItems(0).getScalingPolicy().getStepPolicyDescriptor().getAlarmConfig().getThreshold();
assertThat(threshold.getValue()).isEqualTo(100.0);
}
}
| 9,974 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/integration/v3/supervisor/SupervisorBasicTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.integration.v3.supervisor;
import java.util.Iterator;
import com.google.protobuf.Empty;
import com.netflix.titus.grpc.protogen.MasterInstance;
import com.netflix.titus.grpc.protogen.MasterInstanceId;
import com.netflix.titus.grpc.protogen.MasterInstances;
import com.netflix.titus.grpc.protogen.MasterStatus;
import com.netflix.titus.grpc.protogen.SupervisorEvent;
import com.netflix.titus.grpc.protogen.SupervisorServiceGrpc.SupervisorServiceBlockingStub;
import com.netflix.titus.master.integration.BaseIntegrationTest;
import com.netflix.titus.testkit.embedded.cell.EmbeddedTitusCells;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.master.TitusStackResource;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static org.assertj.core.api.Assertions.assertThat;
@Category(IntegrationTest.class)
public class SupervisorBasicTest extends BaseIntegrationTest {
@ClassRule
public static final TitusStackResource titusStackResource = new TitusStackResource(EmbeddedTitusCells.basicKubeCell(1));
private final SupervisorServiceBlockingStub blockingGrpcClient = titusStackResource.getMaster().getSupervisorBlockingGrpcClient();
@Test(timeout = TEST_TIMEOUT_MS)
public void testGetMasterInstances() {
MasterInstances instances = blockingGrpcClient.getMasterInstances(Empty.getDefaultInstance());
assertThat(instances.getInstancesList()).hasSize(1);
MasterInstance first = instances.getInstances(0);
MasterInstance instance = blockingGrpcClient.getMasterInstance(MasterInstanceId.newBuilder().setInstanceId(first.getInstanceId()).build());
assertThat(instance.getStatus().getState()).isEqualTo(MasterStatus.MasterState.LeaderActivated);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void testObserveEvents() {
Iterator<SupervisorEvent> it = blockingGrpcClient.observeEvents(Empty.getDefaultInstance());
SupervisorEvent next = it.next();
assertThat(next.getEventCase()).isEqualTo(SupervisorEvent.EventCase.MASTERINSTANCEUPDATE);
}
}
| 9,975 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/loadbalancer/endpoint.v3 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/loadbalancer/endpoint.v3/grpc/DefaultLoadBalancerGrpcTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.loadbalancer.endpoint.v3.grpc;
import java.util.Map;
import java.util.Set;
import java.util.function.BiConsumer;
import com.google.protobuf.Empty;
import com.netflix.titus.api.loadbalancer.service.LoadBalancerService;
import com.netflix.titus.grpc.protogen.AddLoadBalancerRequest;
import com.netflix.titus.grpc.protogen.GetAllLoadBalancersRequest;
import com.netflix.titus.grpc.protogen.GetAllLoadBalancersResult;
import com.netflix.titus.grpc.protogen.GetJobLoadBalancersResult;
import com.netflix.titus.grpc.protogen.JobId;
import com.netflix.titus.grpc.protogen.LoadBalancerId;
import com.netflix.titus.grpc.protogen.RemoveLoadBalancerRequest;
import com.netflix.titus.master.loadbalancer.endpoint.grpc.DefaultLoadBalancerServiceGrpc;
import com.netflix.titus.master.loadbalancer.service.LoadBalancerTests;
import com.netflix.titus.testkit.grpc.TestStreamObserver;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static com.netflix.titus.master.loadbalancer.service.LoadBalancerTests.buildPageSupplier;
import static org.assertj.core.api.AssertionsForClassTypes.assertThat;
public class DefaultLoadBalancerGrpcTest {
private static Logger logger = LoggerFactory.getLogger(DefaultLoadBalancerGrpcTest.class);
private DefaultLoadBalancerServiceGrpc serviceGrpc;
@Before
public void setUp() throws Exception {
LoadBalancerService loadBalancerService = LoadBalancerTests.getMockLoadBalancerService();
serviceGrpc = new DefaultLoadBalancerServiceGrpc(loadBalancerService);
}
@After
public void tearDown() throws Exception {
}
@Test
public void testGetLoadBalancers() throws Exception {
String jobIdStr = "Titus-123";
Set<LoadBalancerId> loadBalancerIds = LoadBalancerTests.getLoadBalancersForJob(jobIdStr, getJobLoadBalancers);
assertThat(loadBalancerIds.size()).isEqualTo(0);
}
@Test
public void testSetAndGetAndRmLoadBalancers() throws Exception {
int numJobs = 5;
int numLoadBalancers = 10;
Map<String, Set<LoadBalancerId>> jobIdToLoadBalancersMap =
LoadBalancerTests.putLoadBalancersPerJob(numJobs, numLoadBalancers, putLoadBalancerWithJobId);
// For each job, query the load balancers and check that they match.
jobIdToLoadBalancersMap.forEach((jobId, loadBalancerIdSet) -> {
Set<LoadBalancerId> getIdSet = LoadBalancerTests.getLoadBalancersForJob(jobId, getJobLoadBalancers);
logger.info("Checking that Job {} LB IDs {} match expected IDs {}", jobId, getIdSet, loadBalancerIdSet);
assertThat(loadBalancerIdSet.equals(getIdSet)).isTrue();
});
// Remove the load balancers for each job
jobIdToLoadBalancersMap.forEach((jobId, loadBalancerIdSet) -> {
loadBalancerIdSet.forEach(loadBalancerId -> {
logger.info("Removing load balancer {} from Job {}", loadBalancerId.getId(), jobId);
LoadBalancerTests.removeLoadBalancerFromJob(jobId, loadBalancerId, removeLoadBalancers);
});
});
// Check that there are no load balancers left
jobIdToLoadBalancersMap.forEach((jobId, loadBalancerIdSet) -> {
assertThat(LoadBalancerTests.getLoadBalancersForJob(jobId, getJobLoadBalancers).size()).isEqualTo(0);
logger.info("Job {} has no more load balancers", jobId);
});
}
@Test
public void testEmptyGetAllLoadBalancers() throws Exception {
int pageSize = 5;
int currentPageNum = 0;
assertThat(LoadBalancerTests.getAllLoadBalancers(buildPageSupplier(currentPageNum, pageSize), getAllLoadBalancers)
.getJobLoadBalancersCount()).isEqualTo(0);
}
private BiConsumer<AddLoadBalancerRequest, TestStreamObserver<Empty>> putLoadBalancerWithJobId = (request, addResponse) -> {
serviceGrpc.addLoadBalancer(request, addResponse);
};
private BiConsumer<JobId, TestStreamObserver<GetJobLoadBalancersResult>> getJobLoadBalancers = (request, getResponse) -> {
serviceGrpc.getJobLoadBalancers(request, getResponse);
};
private BiConsumer<GetAllLoadBalancersRequest, TestStreamObserver<GetAllLoadBalancersResult>> getAllLoadBalancers = (request, getResponse) -> {
serviceGrpc.getAllLoadBalancers(request, getResponse);
};
private BiConsumer<RemoveLoadBalancerRequest, TestStreamObserver<Empty>> removeLoadBalancers = (request, removeResponse) -> {
serviceGrpc.removeLoadBalancer(request, removeResponse);
};
}
| 9,976 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/loadbalancer | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/loadbalancer/service/DefaultLoadBalancerServiceTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.loadbalancer.service;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import com.netflix.titus.api.connector.cloud.LoadBalancer;
import com.netflix.titus.api.connector.cloud.LoadBalancerConnector;
import com.netflix.titus.api.jobmanager.TaskAttributes;
import com.netflix.titus.api.jobmanager.model.job.ServiceJobTask;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.TaskStatus;
import com.netflix.titus.api.jobmanager.model.job.event.JobManagerEvent;
import com.netflix.titus.api.jobmanager.model.job.event.TaskUpdateEvent;
import com.netflix.titus.api.jobmanager.service.JobManagerException;
import com.netflix.titus.api.jobmanager.service.V3JobOperations;
import com.netflix.titus.api.loadbalancer.model.JobLoadBalancer;
import com.netflix.titus.api.loadbalancer.model.JobLoadBalancerState;
import com.netflix.titus.api.loadbalancer.model.LoadBalancerTarget;
import com.netflix.titus.api.loadbalancer.model.LoadBalancerTargetState;
import com.netflix.titus.api.loadbalancer.model.sanitizer.DefaultLoadBalancerJobValidator;
import com.netflix.titus.api.loadbalancer.model.sanitizer.LoadBalancerJobValidator;
import com.netflix.titus.api.loadbalancer.model.sanitizer.LoadBalancerValidationConfiguration;
import com.netflix.titus.api.loadbalancer.store.LoadBalancerStore;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.rx.batch.Batch;
import com.netflix.titus.runtime.store.v3.memory.InMemoryLoadBalancerStore;
import org.junit.Before;
import org.junit.Test;
import rx.Completable;
import rx.Single;
import rx.observers.AssertableSubscriber;
import rx.schedulers.Schedulers;
import rx.schedulers.TestScheduler;
import rx.subjects.PublishSubject;
import static com.netflix.titus.api.loadbalancer.model.LoadBalancerTarget.State.DEREGISTERED;
import static com.netflix.titus.api.loadbalancer.model.LoadBalancerTarget.State.REGISTERED;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.ArgumentMatchers.argThat;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class DefaultLoadBalancerServiceTest {
private static final int MIN_TIME_IN_QUEUE_MS = 1_000;
private static final int FLUSH_WAIT_TIME_MS = 2 * MIN_TIME_IN_QUEUE_MS;
private TitusRuntime runtime;
private LoadBalancerConnector client;
private V3JobOperations v3JobOperations;
private LoadBalancerJobOperations loadBalancerJobOperations;
private LoadBalancerStore loadBalancerStore;
private LoadBalancerReconciler reconciler;
private PublishSubject<TargetStateBatchable> reconcilerEvents;
private LoadBalancerJobValidator validator;
private TestScheduler testScheduler;
private CallMetadata callMetadata = CallMetadata.newBuilder().withCallerId("Load Balancer test").build();
private void defaultStubs() {
when(client.registerAll(any(), any())).thenReturn(Completable.complete());
when(client.deregisterAll(any(), any())).thenReturn(Completable.complete());
when(v3JobOperations.observeJobs()).thenReturn(PublishSubject.create());
when(client.getLoadBalancer(any())).thenAnswer(invocation -> Single.just(
new LoadBalancer(invocation.getArgument(0), LoadBalancer.State.ACTIVE, Collections.emptySet())
));
}
@Before
public void setUp() throws Exception {
runtime = TitusRuntimes.internal();
client = mock(LoadBalancerConnector.class);
loadBalancerStore = new InMemoryLoadBalancerStore();
reconciler = mock(LoadBalancerReconciler.class);
reconcilerEvents = PublishSubject.create();
when(reconciler.events()).thenReturn(reconcilerEvents.toSerialized());
v3JobOperations = mock(V3JobOperations.class);
loadBalancerJobOperations = new LoadBalancerJobOperations(v3JobOperations);
LoadBalancerValidationConfiguration validationConfiguration = LoadBalancerTests.mockValidationConfig(30);
validator = new DefaultLoadBalancerJobValidator(v3JobOperations, loadBalancerStore, validationConfiguration);
testScheduler = Schedulers.test();
}
@Test
public void addLoadBalancerRegistersTasks() {
String jobId = UUID.randomUUID().toString();
String loadBalancerId = "lb-" + UUID.randomUUID().toString();
defaultStubs();
LoadBalancerTests.applyValidGetJobMock(v3JobOperations, jobId);
List<Task> tasks = LoadBalancerTests.buildTasksStarted(5, jobId);
Collection<LoadBalancerTargetState> expectedTargets = tasks.stream()
.map(task -> new LoadBalancerTargetState(
new LoadBalancerTarget(loadBalancerId, task.getId(), task.getTaskContext().get(TaskAttributes.TASK_ATTRIBUTES_CONTAINER_IP)),
REGISTERED
))
.collect(Collectors.toList());
when(v3JobOperations.getTasks(jobId)).thenReturn(CollectionsExt.merge(
tasks,
LoadBalancerTests.buildTasks(2, jobId, TaskState.StartInitiated),
LoadBalancerTests.buildTasks(2, jobId, TaskState.KillInitiated),
LoadBalancerTests.buildTasks(3, jobId, TaskState.Finished),
LoadBalancerTests.buildTasks(1, jobId, TaskState.Disconnected)
));
LoadBalancerConfiguration configuration = LoadBalancerTests.mockConfiguration(MIN_TIME_IN_QUEUE_MS);
DefaultLoadBalancerService service = new DefaultLoadBalancerService(
runtime, configuration, client, loadBalancerStore, loadBalancerJobOperations, reconciler, validator, testScheduler);
AssertableSubscriber<Batch<TargetStateBatchable, String>> testSubscriber = service.events().test();
assertTrue(service.addLoadBalancer(jobId, loadBalancerId).await(100, TimeUnit.MILLISECONDS));
assertThat(service.getJobLoadBalancers(jobId).toBlocking().first()).isEqualTo(loadBalancerId);
verify(v3JobOperations).getTasks(jobId);
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(1);
assertThat(loadBalancerStore.getLoadBalancerTargets(loadBalancerId).collectList().block())
.containsExactlyInAnyOrderElementsOf(expectedTargets);
verify(client).registerAll(eq(loadBalancerId), argThat(targets -> targets != null && targets.size() == tasks.size()));
verify(client, never()).deregisterAll(eq(loadBalancerId), any());
verifyReconcilerIgnore(loadBalancerId, LoadBalancerTests.ipAddresses(tasks));
}
@Test
public void targetsAreBufferedUpToATimeout() {
String jobId = UUID.randomUUID().toString();
String loadBalancerId = "lb-" + UUID.randomUUID().toString();
defaultStubs();
LoadBalancerTests.applyValidGetJobMock(v3JobOperations, jobId);
List<Task> tasks = LoadBalancerTests.buildTasksStarted(3, jobId);
when(v3JobOperations.getTasks(jobId)).thenReturn(CollectionsExt.merge(
tasks,
LoadBalancerTests.buildTasks(1, jobId, TaskState.StartInitiated),
LoadBalancerTests.buildTasks(1, jobId, TaskState.KillInitiated),
LoadBalancerTests.buildTasks(1, jobId, TaskState.Finished),
LoadBalancerTests.buildTasks(1, jobId, TaskState.Disconnected)
));
LoadBalancerConfiguration configuration = LoadBalancerTests.mockConfiguration(MIN_TIME_IN_QUEUE_MS);
DefaultLoadBalancerService service = new DefaultLoadBalancerService(
runtime, configuration, client, loadBalancerStore, loadBalancerJobOperations, reconciler, validator, testScheduler);
AssertableSubscriber<Batch<TargetStateBatchable, String>> testSubscriber = service.events().test();
assertTrue(service.addLoadBalancer(jobId, loadBalancerId).await(100, TimeUnit.MILLISECONDS));
assertThat(service.getJobLoadBalancers(jobId).toBlocking().first()).isEqualTo(loadBalancerId);
verify(v3JobOperations).getTasks(jobId);
testSubscriber.assertNoErrors().assertValueCount(0);
verify(client, never()).registerAll(any(), any());
verify(client, never()).deregisterAll(any(), any());
// targets are ignored before batching happens
verifyReconcilerIgnore(loadBalancerId, LoadBalancerTests.ipAddresses(tasks));
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(1);
verify(client).registerAll(eq(loadBalancerId), argThat(targets -> targets != null && targets.size() == tasks.size()));
verify(client, never()).deregisterAll(eq(loadBalancerId), any());
}
@Test
public void emptyBatchesAreFilteredOut() {
defaultStubs();
LoadBalancerConfiguration configuration = LoadBalancerTests.mockConfiguration(MIN_TIME_IN_QUEUE_MS);
DefaultLoadBalancerService service = new DefaultLoadBalancerService(
runtime, configuration, client, loadBalancerStore, loadBalancerJobOperations, reconciler, validator, testScheduler);
AssertableSubscriber<Batch<TargetStateBatchable, String>> testSubscriber = service.events().test();
testSubscriber.assertNoErrors().assertValueCount(0);
verify(client, never()).registerAll(any(), any());
verify(client, never()).deregisterAll(any(), any());
verifyNoReconcilerIgnore();
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(0);
verify(client, never()).registerAll(any(), any());
verify(client, never()).deregisterAll(any(), any());
verifyNoReconcilerIgnore();
}
@Test
public void addSkipLoadBalancerOperationsOnErrors() {
String firstJobId = UUID.randomUUID().toString();
String secondJobId = UUID.randomUUID().toString();
String loadBalancerId = "lb-" + UUID.randomUUID().toString();
defaultStubs();
// first fails, second succeeds
LoadBalancerTests.applyValidGetJobMock(v3JobOperations, firstJobId).thenThrow(new RuntimeException());
LoadBalancerTests.applyValidGetJobMock(v3JobOperations, secondJobId);
List<Task> tasks = LoadBalancerTests.buildTasksStarted(2, secondJobId);
when(v3JobOperations.getTasks(secondJobId)).thenReturn(tasks);
LoadBalancerConfiguration configuration = LoadBalancerTests.mockConfiguration(MIN_TIME_IN_QUEUE_MS);
DefaultLoadBalancerService service = new DefaultLoadBalancerService(
runtime, configuration, client, loadBalancerStore, loadBalancerJobOperations, reconciler, validator, testScheduler);
AssertableSubscriber<Batch<TargetStateBatchable, String>> testSubscriber = service.events().test();
// first fails and gets skipped after being saved, so convergence can pick it up later
assertTrue(service.addLoadBalancer(firstJobId, loadBalancerId).await(100, TimeUnit.MILLISECONDS));
assertThat(service.getJobLoadBalancers(firstJobId).toBlocking().first()).isEqualTo(loadBalancerId);
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertNoValues();
verify(v3JobOperations, never()).getTasks(firstJobId);
verify(client, never()).registerAll(any(), any());
verify(client, never()).deregisterAll(any(), any());
verifyNoReconcilerIgnore();
// second succeeds
assertTrue(service.addLoadBalancer(secondJobId, loadBalancerId).await(100, TimeUnit.MILLISECONDS));
assertThat(service.getJobLoadBalancers(secondJobId).toBlocking().first()).isEqualTo(loadBalancerId);
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(1);
verify(v3JobOperations).getTasks(secondJobId);
verify(client).registerAll(eq(loadBalancerId), argThat(targets -> targets != null && targets.size() == tasks.size()));
verify(client, never()).deregisterAll(eq(loadBalancerId), any());
verifyReconcilerIgnore(loadBalancerId, LoadBalancerTests.ipAddresses(tasks));
}
@Test
public void multipleLoadBalancersPerJob() {
PublishSubject<JobManagerEvent<?>> taskEvents = PublishSubject.create();
String jobId = UUID.randomUUID().toString();
String firstLoadBalancerId = "lb-" + UUID.randomUUID().toString();
String secondLoadBalancerId = "lb-" + UUID.randomUUID().toString();
int numberOfStartedTasks = 5;
when(client.registerAll(any(), any())).thenReturn(Completable.complete());
when(client.deregisterAll(any(), any())).thenReturn(Completable.complete());
when(v3JobOperations.observeJobs()).thenReturn(taskEvents);
LoadBalancerTests.applyValidGetJobMock(v3JobOperations, jobId);
List<Task> tasks = LoadBalancerTests.buildTasksStarted(numberOfStartedTasks, jobId);
Collection<LoadBalancerTargetState> firstExpectedTargets = tasks.stream()
.map(task -> new LoadBalancerTargetState(
new LoadBalancerTarget(firstLoadBalancerId, task.getId(), task.getTaskContext().get(TaskAttributes.TASK_ATTRIBUTES_CONTAINER_IP)),
REGISTERED
))
.collect(Collectors.toList());
Collection<LoadBalancerTargetState> secondExpectedTargets = tasks.stream()
.map(task -> new LoadBalancerTargetState(
new LoadBalancerTarget(secondLoadBalancerId, task.getId(), task.getTaskContext().get(TaskAttributes.TASK_ATTRIBUTES_CONTAINER_IP)),
REGISTERED
))
.collect(Collectors.toList());
when(v3JobOperations.getTasks(jobId)).thenReturn(CollectionsExt.merge(
tasks,
LoadBalancerTests.buildTasks(2, jobId, TaskState.StartInitiated),
LoadBalancerTests.buildTasks(2, jobId, TaskState.KillInitiated),
LoadBalancerTests.buildTasks(3, jobId, TaskState.Finished),
LoadBalancerTests.buildTasks(1, jobId, TaskState.Disconnected)
));
LoadBalancerConfiguration configuration = LoadBalancerTests.mockConfiguration(MIN_TIME_IN_QUEUE_MS);
DefaultLoadBalancerService service = new DefaultLoadBalancerService(
runtime, configuration, client, loadBalancerStore, loadBalancerJobOperations, reconciler, validator, testScheduler);
AssertableSubscriber<Batch<TargetStateBatchable, String>> testSubscriber = service.events().test();
// associate two load balancers to the same job
assertTrue(service.addLoadBalancer(jobId, firstLoadBalancerId).await(100, TimeUnit.MILLISECONDS));
assertTrue(service.addLoadBalancer(jobId, secondLoadBalancerId).await(100, TimeUnit.MILLISECONDS));
assertThat(service.getJobLoadBalancers(jobId).toList().toBlocking().single())
.containsOnly(firstLoadBalancerId, secondLoadBalancerId);
verify(v3JobOperations, times(2)).getTasks(jobId);
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
// 1 batch per loadbalancer
testSubscriber.assertNoErrors().assertValueCount(2);
assertThat(loadBalancerStore.getLoadBalancerTargets(firstLoadBalancerId).collectList().block())
.containsExactlyInAnyOrderElementsOf(firstExpectedTargets);
assertThat(loadBalancerStore.getLoadBalancerTargets(secondLoadBalancerId).collectList().block())
.containsExactlyInAnyOrderElementsOf(secondExpectedTargets);
verify(client).registerAll(eq(firstLoadBalancerId), argThat(targets -> targets != null && targets.size() == numberOfStartedTasks));
verify(client).registerAll(eq(secondLoadBalancerId), argThat(targets -> targets != null && targets.size() == numberOfStartedTasks));
verify(client, never()).deregisterAll(eq(firstLoadBalancerId), any());
verify(client, never()).deregisterAll(eq(secondLoadBalancerId), any());
verifyReconcilerIgnore(firstLoadBalancerId, LoadBalancerTests.ipAddresses(tasks));
verifyReconcilerIgnore(secondLoadBalancerId, LoadBalancerTests.ipAddresses(tasks));
// now some more tasks are added to the job, check if both load balancers get updated
List<Task> newTasks = new ArrayList<>();
for (int i = 1; i <= numberOfStartedTasks; i++) {
String taskId = UUID.randomUUID().toString();
Task startingWithIp = ServiceJobTask.newBuilder()
.withJobId(jobId)
.withId(taskId)
.withStatus(TaskStatus.newBuilder().withState(TaskState.StartInitiated).build())
.withTaskContext(CollectionsExt.asMap(
TaskAttributes.TASK_ATTRIBUTES_CONTAINER_IP, String.format("%1$d.%1$d.%1$d.%1$d", i + numberOfStartedTasks)
)).build();
Task started = startingWithIp.toBuilder()
.withStatus(TaskStatus.newBuilder().withState(TaskState.Started).build())
.build();
newTasks.add(started);
taskEvents.onNext(TaskUpdateEvent.taskChange(null, started, startingWithIp, callMetadata));
}
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
// 2 more batches (one per load balancer)
testSubscriber.assertNoErrors().assertValueCount(4);
verify(client, times(2)).registerAll(eq(firstLoadBalancerId), argThat(targets -> targets != null && targets.size() == numberOfStartedTasks));
verify(client, times(2)).registerAll(eq(secondLoadBalancerId), argThat(targets -> targets != null && targets.size() == numberOfStartedTasks));
verify(client, never()).deregisterAll(eq(firstLoadBalancerId), any());
verify(client, never()).deregisterAll(eq(secondLoadBalancerId), any());
verifyReconcilerIgnore(firstLoadBalancerId, LoadBalancerTests.ipAddresses(newTasks));
verifyReconcilerIgnore(secondLoadBalancerId, LoadBalancerTests.ipAddresses(newTasks));
assertThat(loadBalancerStore.getLoadBalancerTargets(firstLoadBalancerId).collectList().block())
.hasSize(firstExpectedTargets.size() + numberOfStartedTasks);
assertThat(loadBalancerStore.getLoadBalancerTargets(secondLoadBalancerId).collectList().block())
.hasSize(secondExpectedTargets.size() + numberOfStartedTasks);
}
@Test
public void targetsAreBufferedInBatches() {
String jobId = UUID.randomUUID().toString();
String loadBalancerId = "lb-" + UUID.randomUUID().toString();
ThreadLocalRandom random = ThreadLocalRandom.current();
int batchSize = random.nextInt(5, 20);
defaultStubs();
LoadBalancerTests.applyValidGetJobMock(v3JobOperations, jobId);
List<Task> tasks = LoadBalancerTests.buildTasksStarted(batchSize, jobId);
when(v3JobOperations.getTasks(jobId)).thenReturn(tasks);
LoadBalancerConfiguration configuration = LoadBalancerTests.mockConfiguration(MIN_TIME_IN_QUEUE_MS);
DefaultLoadBalancerService service = new DefaultLoadBalancerService(
runtime, configuration, client, loadBalancerStore, loadBalancerJobOperations, reconciler, validator, testScheduler);
AssertableSubscriber<Batch<TargetStateBatchable, String>> testSubscriber = service.events().test();
assertTrue(service.addLoadBalancer(jobId, loadBalancerId).await(100, TimeUnit.MILLISECONDS));
assertThat(service.getJobLoadBalancers(jobId).toBlocking().first()).isEqualTo(loadBalancerId);
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(1);
verify(client).registerAll(eq(loadBalancerId), argThat(targets -> targets != null && targets.size() == batchSize));
verify(client, never()).deregisterAll(eq(loadBalancerId), any());
verifyReconcilerIgnore(loadBalancerId, LoadBalancerTests.ipAddresses(tasks));
}
@Test
public void batchesWithErrorsAreSkipped() {
String jobId = UUID.randomUUID().toString();
String firstLoadBalancerId = "lb-" + UUID.randomUUID().toString();
String secondLoadBalancerId = "lb-" + UUID.randomUUID().toString();
int batchSize = 1024;
when(client.registerAll(eq(firstLoadBalancerId), any())).thenReturn(Completable.error(new RuntimeException()));
when(client.registerAll(eq(secondLoadBalancerId), any())).thenReturn(Completable.complete());
when(client.deregisterAll(any(), any())).thenReturn(Completable.complete());
when(v3JobOperations.observeJobs()).thenReturn(PublishSubject.create());
LoadBalancerTests.applyValidGetJobMock(v3JobOperations, jobId);
List<Task> tasks = LoadBalancerTests.buildTasksStarted(batchSize, jobId);
when(v3JobOperations.getTasks(jobId)).thenReturn(tasks);
LoadBalancerConfiguration configuration = LoadBalancerTests.mockConfiguration(MIN_TIME_IN_QUEUE_MS);
DefaultLoadBalancerService service = new DefaultLoadBalancerService(
runtime, configuration, client, loadBalancerStore, loadBalancerJobOperations, reconciler, validator, testScheduler);
AssertableSubscriber<Batch<TargetStateBatchable, String>> testSubscriber = service.events().test();
assertTrue(service.addLoadBalancer(jobId, firstLoadBalancerId).await(100, TimeUnit.MILLISECONDS));
assertTrue(service.addLoadBalancer(jobId, secondLoadBalancerId).await(100, TimeUnit.MILLISECONDS));
assertThat(service.getJobLoadBalancers(jobId).toBlocking().toIterable())
.containsExactlyInAnyOrder(firstLoadBalancerId, secondLoadBalancerId);
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
// first errored and got skipped
testSubscriber.awaitValueCount(1, 2 * FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS)
.assertNoErrors()
.assertValueCount(1);
verify(client).registerAll(eq(firstLoadBalancerId), argThat(targets -> targets != null && targets.size() == batchSize));
verify(client).registerAll(eq(secondLoadBalancerId), argThat(targets -> targets != null && targets.size() == batchSize));
verify(client, never()).deregisterAll(any(), any());
// we still ignore reconciliation because the failure happens later in the connector
verifyReconcilerIgnore(firstLoadBalancerId, LoadBalancerTests.ipAddresses(tasks));
verifyReconcilerIgnore(secondLoadBalancerId, LoadBalancerTests.ipAddresses(tasks));
}
@Test
public void reconciliationErrorsDontHaltOthers() {
String jobId = UUID.randomUUID().toString();
String loadBalancerId = "lb-" + UUID.randomUUID().toString();
ThreadLocalRandom random = ThreadLocalRandom.current();
int batchSize = random.nextInt(3, 10);
defaultStubs();
LoadBalancerTests.applyValidGetJobMock(v3JobOperations, jobId);
List<Task> tasks = LoadBalancerTests.buildTasksStarted(batchSize, jobId);
when(v3JobOperations.getTasks(jobId)).thenReturn(tasks);
LoadBalancerConfiguration configuration = LoadBalancerTests.mockConfiguration(MIN_TIME_IN_QUEUE_MS);
DefaultLoadBalancerService service = new DefaultLoadBalancerService(
runtime, configuration, client, loadBalancerStore, loadBalancerJobOperations, reconciler, validator, testScheduler);
AssertableSubscriber<Batch<TargetStateBatchable, String>> testSubscriber = service.events().test();
assertTrue(service.addLoadBalancer(jobId, loadBalancerId).await(100, TimeUnit.MILLISECONDS));
assertThat(service.getJobLoadBalancers(jobId).toBlocking().first()).isEqualTo(loadBalancerId);
reconcilerEvents.onError(new RuntimeException("first error"));
testScheduler.triggerActions();
testSubscriber.assertNoErrors();
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
reconcilerEvents.onError(new RuntimeException("second error"));
testSubscriber.assertNoErrors().assertValueCount(1);
verify(client).registerAll(eq(loadBalancerId), argThat(targets -> targets != null && targets.size() == batchSize));
verify(client, never()).deregisterAll(any(), any());
verifyReconcilerIgnore(loadBalancerId, LoadBalancerTests.ipAddresses(tasks));
}
@Test
public void removeLoadBalancerDeregisterKnownTargets() {
String jobId = UUID.randomUUID().toString();
String loadBalancerId = "lb-" + UUID.randomUUID().toString();
JobLoadBalancer jobLoadBalancer = new JobLoadBalancer(jobId, loadBalancerId);
defaultStubs();
LoadBalancerTests.applyValidGetJobMock(v3JobOperations, jobId);
List<Task> tasks = LoadBalancerTests.buildTasksStarted(5, jobId);
Collection<LoadBalancerTargetState> expectedTargets = tasks.stream()
.map(task -> new LoadBalancerTargetState(
new LoadBalancerTarget(loadBalancerId, task.getId(), task.getTaskContext().get(TaskAttributes.TASK_ATTRIBUTES_CONTAINER_IP)),
DEREGISTERED // final state expected after updates are applied
))
.collect(Collectors.toList());
when(v3JobOperations.getTasks(jobId)).thenReturn(CollectionsExt.merge(
tasks,
LoadBalancerTests.buildTasks(2, jobId, TaskState.StartInitiated),
LoadBalancerTests.buildTasks(2, jobId, TaskState.KillInitiated),
LoadBalancerTests.buildTasks(3, jobId, TaskState.Finished),
LoadBalancerTests.buildTasks(1, jobId, TaskState.Disconnected)
));
LoadBalancerConfiguration configuration = LoadBalancerTests.mockConfiguration(MIN_TIME_IN_QUEUE_MS);
DefaultLoadBalancerService service = new DefaultLoadBalancerService(
runtime, configuration, client, loadBalancerStore, loadBalancerJobOperations, reconciler, validator, testScheduler);
AssertableSubscriber<Batch<TargetStateBatchable, String>> testSubscriber = service.events().test();
assertTrue(loadBalancerStore.addOrUpdateLoadBalancer(jobLoadBalancer, JobLoadBalancer.State.ASSOCIATED)
.await(100, TimeUnit.MILLISECONDS));
assertTrue(service.removeLoadBalancer(jobId, loadBalancerId).await(100, TimeUnit.MILLISECONDS));
List<JobLoadBalancerState> jobLoadBalancers = loadBalancerStore.getAssociations().stream()
.filter(association -> jobId.equals(association.getJobId()))
.collect(Collectors.toList());
assertThat(jobLoadBalancers).isNotEmpty();
assertThat(jobLoadBalancers).hasSize(1);
JobLoadBalancerState jobLoadBalancerState = jobLoadBalancers.iterator().next();
assertEquals(loadBalancerId, jobLoadBalancerState.getLoadBalancerId());
assertEquals(JobLoadBalancer.State.DISSOCIATED, jobLoadBalancerState.getState());
assertFalse(service.getJobLoadBalancers(jobId).toBlocking().getIterator().hasNext());
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(1);
assertThat(loadBalancerStore.getLoadBalancerTargets(loadBalancerId).collectList().block())
.containsExactlyInAnyOrderElementsOf(expectedTargets);
verify(client, never()).registerAll(eq(loadBalancerId), any());
verify(client).deregisterAll(eq(loadBalancerId), argThat(targets -> targets != null && targets.size() == tasks.size()));
verifyReconcilerIgnore(loadBalancerId, LoadBalancerTests.ipAddresses(tasks));
}
@Test
public void removeSkipLoadBalancerOperationsOnErrors() {
String firstJobId = UUID.randomUUID().toString();
String secondJobId = UUID.randomUUID().toString();
String loadBalancerId = "lb-" + UUID.randomUUID().toString();
JobLoadBalancer firstLoadBalancer = new JobLoadBalancer(firstJobId, loadBalancerId);
JobLoadBalancer secondLoadBalancer = new JobLoadBalancer(secondJobId, loadBalancerId);
defaultStubs();
LoadBalancerTests.applyValidGetJobMock(v3JobOperations, firstJobId);
LoadBalancerTests.applyValidGetJobMock(v3JobOperations, secondJobId);
when(v3JobOperations.getTasks(firstJobId)).thenThrow(new RuntimeException());
List<Task> tasks = LoadBalancerTests.buildTasksStarted(5, secondJobId);
when(v3JobOperations.getTasks(secondJobId)).thenReturn(tasks);
assertTrue(loadBalancerStore.addOrUpdateLoadBalancer(firstLoadBalancer, JobLoadBalancer.State.ASSOCIATED)
.await(100, TimeUnit.MILLISECONDS));
assertTrue(loadBalancerStore.addOrUpdateLoadBalancer(secondLoadBalancer, JobLoadBalancer.State.ASSOCIATED)
.await(100, TimeUnit.MILLISECONDS));
LoadBalancerConfiguration configuration = LoadBalancerTests.mockConfiguration(MIN_TIME_IN_QUEUE_MS);
DefaultLoadBalancerService service = new DefaultLoadBalancerService(
runtime, configuration, client, loadBalancerStore, loadBalancerJobOperations, reconciler, validator, testScheduler);
AssertableSubscriber<Batch<TargetStateBatchable, String>> testSubscriber = service.events().test();
// first fails
assertTrue(service.removeLoadBalancer(firstJobId, loadBalancerId).await(100, TimeUnit.MILLISECONDS));
assertFalse(service.getJobLoadBalancers(firstJobId).toBlocking().getIterator().hasNext());
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(0);
verify(client, never()).deregisterAll(any(), any());
verify(client, never()).registerAll(any(), any());
verifyNoReconcilerIgnore();
// second succeeds
assertTrue(service.removeLoadBalancer(secondJobId, loadBalancerId).await(100, TimeUnit.MILLISECONDS));
assertFalse(service.getJobLoadBalancers(firstJobId).toBlocking().getIterator().hasNext());
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(1);
verify(client, never()).registerAll(eq(loadBalancerId), any());
verify(client).deregisterAll(eq(loadBalancerId), argThat(targets -> targets != null && targets.size() == tasks.size()));
verifyReconcilerIgnore(loadBalancerId, LoadBalancerTests.ipAddresses(tasks));
}
@Test
public void goneJobsAreSkipped() {
String jobId = UUID.randomUUID().toString();
String loadBalancerId = "lb-" + UUID.randomUUID().toString();
defaultStubs();
LoadBalancerTests.applyValidGetJobMock(v3JobOperations, jobId);
// job is gone somewhere in the middle after its pipeline starts
when(v3JobOperations.getTasks(jobId)).thenThrow(JobManagerException.jobNotFound(jobId));
LoadBalancerConfiguration configuration = LoadBalancerTests.mockConfiguration(MIN_TIME_IN_QUEUE_MS);
DefaultLoadBalancerService service = new DefaultLoadBalancerService(
runtime, configuration, client, loadBalancerStore, loadBalancerJobOperations, reconciler, validator, testScheduler);
AssertableSubscriber<Batch<TargetStateBatchable, String>> testSubscriber = service.events().test();
assertTrue(service.addLoadBalancer(jobId, loadBalancerId).await(100, TimeUnit.MILLISECONDS));
assertThat(service.getJobLoadBalancers(jobId).toBlocking().first()).isEqualTo(loadBalancerId);
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
// job errored and got skipped
testSubscriber.assertNoErrors().assertValueCount(0);
verify(client, never()).registerAll(any(), any());
verify(client, never()).deregisterAll(any(), any());
verifyNoReconcilerIgnore();
}
@Test
public void newTasksGetRegistered() {
String jobId = UUID.randomUUID().toString();
String taskId = UUID.randomUUID().toString();
String loadBalancerId = "lb-" + UUID.randomUUID().toString();
PublishSubject<JobManagerEvent<?>> taskEvents = PublishSubject.create();
when(client.registerAll(any(), any())).thenReturn(Completable.complete());
when(client.deregisterAll(any(), any())).thenReturn(Completable.complete());
when(v3JobOperations.observeJobs()).thenReturn(taskEvents);
when(v3JobOperations.getTasks(jobId)).thenReturn(Collections.emptyList());
LoadBalancerTests.applyValidGetJobMock(v3JobOperations, jobId);
LoadBalancerConfiguration configuration = LoadBalancerTests.mockConfiguration(MIN_TIME_IN_QUEUE_MS);
DefaultLoadBalancerService service = new DefaultLoadBalancerService(
runtime, configuration, client, loadBalancerStore, loadBalancerJobOperations, reconciler, validator, testScheduler);
AssertableSubscriber<Batch<TargetStateBatchable, String>> testSubscriber = service.events().test();
assertTrue(service.addLoadBalancer(jobId, loadBalancerId).await(100, TimeUnit.MILLISECONDS));
assertThat(service.getJobLoadBalancers(jobId).toBlocking().first()).isEqualTo(loadBalancerId);
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(0);
verify(client, never()).registerAll(any(), any());
verify(client, never()).deregisterAll(any(), any());
verifyNoReconcilerIgnore();
Task launched = ServiceJobTask.newBuilder()
.withJobId(jobId)
.withId(taskId)
.withStatus(TaskStatus.newBuilder().withState(TaskState.Launched).build())
.build();
Task startingWithIp = launched.toBuilder()
.withStatus(TaskStatus.newBuilder().withState(TaskState.StartInitiated).build())
.withTaskContext(CollectionsExt.asMap(
TaskAttributes.TASK_ATTRIBUTES_CONTAINER_IP, "1.2.3.4"
)).build();
Task started = startingWithIp.toBuilder()
.withStatus(TaskStatus.newBuilder().withState(TaskState.Started).build())
.build();
LoadBalancerTargetState expectedTarget = new LoadBalancerTargetState(
new LoadBalancerTarget(loadBalancerId, started.getId(), "1.2.3.4"),
REGISTERED
);
// events with no state transition gets ignored
taskEvents.onNext(TaskUpdateEvent.newTask(null, launched, callMetadata));
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(0);
verify(client, never()).registerAll(any(), any());
verify(client, never()).deregisterAll(any(), any());
verifyNoReconcilerIgnore();
// events to !Started states get ignored
taskEvents.onNext(TaskUpdateEvent.taskChange(null, startingWithIp, launched, callMetadata));
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(0);
verify(client, never()).registerAll(any(), any());
verify(client, never()).deregisterAll(any(), any());
verifyNoReconcilerIgnore();
// finally detect the task is UP and gets registered
taskEvents.onNext(TaskUpdateEvent.taskChange(null, started, startingWithIp, callMetadata));
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(1);
assertThat(loadBalancerStore.getLoadBalancerTargets(loadBalancerId).collectList().block())
.contains(expectedTarget);
verify(client).registerAll(eq(loadBalancerId), argThat(set -> set.contains("1.2.3.4")));
verify(client, never()).deregisterAll(eq(loadBalancerId), any());
verifyReconcilerIgnore(loadBalancerId, "1.2.3.4");
}
@Test
public void finishedTasksGetDeregistered() {
String jobId = UUID.randomUUID().toString();
String loadBalancerId = "lb-" + UUID.randomUUID().toString();
PublishSubject<JobManagerEvent<?>> taskEvents = PublishSubject.create();
when(client.registerAll(any(), any())).thenReturn(Completable.complete());
when(client.deregisterAll(any(), any())).thenReturn(Completable.complete());
when(v3JobOperations.observeJobs()).thenReturn(taskEvents);
when(v3JobOperations.getTasks(jobId)).thenReturn(Collections.emptyList());
LoadBalancerTests.applyValidGetJobMock(v3JobOperations, jobId);
LoadBalancerConfiguration configuration = LoadBalancerTests.mockConfiguration(MIN_TIME_IN_QUEUE_MS);
DefaultLoadBalancerService service = new DefaultLoadBalancerService(
runtime, configuration, client, loadBalancerStore, loadBalancerJobOperations, reconciler, validator, testScheduler);
AssertableSubscriber<Batch<TargetStateBatchable, String>> testSubscriber = service.events().test();
assertTrue(service.addLoadBalancer(jobId, loadBalancerId).await(100, TimeUnit.MILLISECONDS));
assertThat(service.getJobLoadBalancers(jobId).toBlocking().first()).isEqualTo(loadBalancerId);
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(0);
verify(client, never()).registerAll(any(), any());
verify(client, never()).deregisterAll(any(), any());
verifyNoReconcilerIgnore();
// a task that was prematurely killed before having an IP address associated to it should be ignored
Task noIp = ServiceJobTask.newBuilder()
.withJobId(jobId)
.withId(UUID.randomUUID().toString())
.withStatus(TaskStatus.newBuilder().withState(TaskState.KillInitiated).build())
.build();
Task noIpFinished = noIp.toBuilder()
.withStatus(TaskStatus.newBuilder().withState(TaskState.Finished).build())
.build();
taskEvents.onNext(TaskUpdateEvent.taskChange(null, noIpFinished, noIp, callMetadata));
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(0);
verify(client, never()).registerAll(any(), any());
verify(client, never()).deregisterAll(any(), any());
verifyNoReconcilerIgnore();
// 3 state transitions to 3 different terminal events
Task first = noIp.toBuilder()
.withId(UUID.randomUUID().toString())
.withStatus(TaskStatus.newBuilder().withState(TaskState.Started).build())
.withTaskContext(CollectionsExt.asMap(
TaskAttributes.TASK_ATTRIBUTES_CONTAINER_IP, "1.1.1.1"
)).build();
Task firstFinished = first.toBuilder()
.withStatus(TaskStatus.newBuilder().withState(TaskState.Finished).build())
.build();
LoadBalancerTargetState expectedFirstTarget = new LoadBalancerTargetState(
new LoadBalancerTarget(loadBalancerId, firstFinished.getId(), "1.1.1.1"),
DEREGISTERED
);
assertThat(loadBalancerStore.getLoadBalancerTargets(loadBalancerId).collectList().block())
.doesNotContain(expectedFirstTarget);
taskEvents.onNext(TaskUpdateEvent.taskChange(null, firstFinished, first, callMetadata));
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(1);
assertThat(loadBalancerStore.getLoadBalancerTargets(loadBalancerId).collectList().block())
.contains(expectedFirstTarget);
verify(client, never()).registerAll(eq(loadBalancerId), any());
verify(client).deregisterAll(eq(loadBalancerId), argThat(set -> set.contains("1.1.1.1")));
verifyReconcilerIgnore(loadBalancerId, "1.1.1.1");
Task second = first.toBuilder()
.withId(UUID.randomUUID().toString())
.withTaskContext(CollectionsExt.asMap(
TaskAttributes.TASK_ATTRIBUTES_CONTAINER_IP, "2.2.2.2"
)).build();
Task secondKilling = second.toBuilder()
.withStatus(TaskStatus.newBuilder().withState(TaskState.KillInitiated).build())
.build();
LoadBalancerTargetState expectedSecondTarget = new LoadBalancerTargetState(
new LoadBalancerTarget(loadBalancerId, secondKilling.getId(), "2.2.2.2"),
DEREGISTERED
);
assertThat(loadBalancerStore.getLoadBalancerTargets(loadBalancerId).collectList().block())
.doesNotContain(expectedSecondTarget);
taskEvents.onNext(TaskUpdateEvent.taskChange(null, secondKilling, second, callMetadata));
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(2);
assertThat(loadBalancerStore.getLoadBalancerTargets(loadBalancerId).collectList().block())
.contains(expectedSecondTarget);
verify(client, never()).registerAll(eq(loadBalancerId), any());
verify(client).deregisterAll(eq(loadBalancerId), argThat(set -> set.contains("2.2.2.2")));
verifyReconcilerIgnore(loadBalancerId, "2.2.2.2");
Task third = first.toBuilder()
.withId(UUID.randomUUID().toString())
.withTaskContext(CollectionsExt.asMap(
TaskAttributes.TASK_ATTRIBUTES_CONTAINER_IP, "3.3.3.3"
)).build();
Task thirdDisconnected = third.toBuilder()
.withStatus(TaskStatus.newBuilder().withState(TaskState.Disconnected).build())
.build();
LoadBalancerTargetState expectedThirdTarget = new LoadBalancerTargetState(
new LoadBalancerTarget(loadBalancerId, thirdDisconnected.getId(), "3.3.3.3"),
DEREGISTERED
);
assertThat(loadBalancerStore.getLoadBalancerTargets(loadBalancerId).collectList().block())
.doesNotContain(expectedThirdTarget);
taskEvents.onNext(TaskUpdateEvent.taskChange(null, thirdDisconnected, third, callMetadata));
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(3);
assertThat(loadBalancerStore.getLoadBalancerTargets(loadBalancerId).collectList().block())
.contains(expectedThirdTarget);
verify(client, never()).registerAll(eq(loadBalancerId), any());
verify(client).deregisterAll(eq(loadBalancerId), argThat(set -> set.contains("3.3.3.3")));
verifyReconcilerIgnore(loadBalancerId, "3.3.3.3");
}
@Test
public void movedTasks() {
String taskId = UUID.randomUUID().toString();
String sourceJobId = UUID.randomUUID().toString();
String targetJobId = UUID.randomUUID().toString();
String sourceLoadBalancerId = "lb-" + UUID.randomUUID().toString();
String targetLoadBalancerId = "lb-" + UUID.randomUUID().toString();
String commonLoadBalancerId = "lb-" + UUID.randomUUID().toString();
PublishSubject<JobManagerEvent<?>> taskEvents = PublishSubject.create();
when(client.registerAll(any(), any())).thenReturn(Completable.complete());
when(client.deregisterAll(any(), any())).thenReturn(Completable.complete());
when(v3JobOperations.observeJobs()).thenReturn(taskEvents);
LoadBalancerTests.applyValidGetJobMock(v3JobOperations, sourceJobId);
LoadBalancerTests.applyValidGetJobMock(v3JobOperations, targetJobId);
LoadBalancerConfiguration configuration = LoadBalancerTests.mockConfiguration(MIN_TIME_IN_QUEUE_MS);
DefaultLoadBalancerService service = new DefaultLoadBalancerService(
runtime, configuration, client, loadBalancerStore, loadBalancerJobOperations, reconciler, validator, testScheduler);
AssertableSubscriber<Batch<TargetStateBatchable, String>> testSubscriber = service.events().test();
assertTrue(service.addLoadBalancer(sourceJobId, sourceLoadBalancerId).await(100, TimeUnit.MILLISECONDS));
assertTrue(service.addLoadBalancer(sourceJobId, commonLoadBalancerId).await(100, TimeUnit.MILLISECONDS));
assertThat(service.getJobLoadBalancers(sourceJobId).toBlocking().toIterable())
.containsExactlyInAnyOrder(sourceLoadBalancerId, commonLoadBalancerId);
assertTrue(service.addLoadBalancer(targetJobId, targetLoadBalancerId).await(100, TimeUnit.MILLISECONDS));
assertTrue(service.addLoadBalancer(targetJobId, commonLoadBalancerId).await(100, TimeUnit.MILLISECONDS));
assertThat(service.getJobLoadBalancers(targetJobId).toBlocking().toIterable())
.containsExactlyInAnyOrder(targetLoadBalancerId, commonLoadBalancerId);
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(0);
verify(client, never()).registerAll(any(), any());
verify(client, never()).deregisterAll(any(), any());
verifyNoReconcilerIgnore();
Task moved = ServiceJobTask.newBuilder()
.withJobId(targetJobId)
.withId(taskId)
.withStatus(TaskStatus.newBuilder().withState(TaskState.Started).build())
.withTaskContext(CollectionsExt.asMap(
TaskAttributes.TASK_ATTRIBUTES_CONTAINER_IP, "1.2.3.4",
TaskAttributes.TASK_ATTRIBUTES_MOVED_FROM_JOB, sourceJobId
)).build();
// detect the task is moved, gets deregistered from the source and registered on the target
taskEvents.onNext(TaskUpdateEvent.newTaskFromAnotherJob(null, moved, callMetadata));
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(2);
verify(client).registerAll(eq(targetLoadBalancerId), argThat(set -> set.contains("1.2.3.4")));
verify(client).deregisterAll(eq(sourceLoadBalancerId), argThat(set -> set.contains("1.2.3.4")));
verifyReconcilerIgnore(targetLoadBalancerId, "1.2.3.4");
verifyReconcilerIgnore(sourceLoadBalancerId, "1.2.3.4");
// load balancers associated with both source and target jobs are not changed
verify(client, never()).registerAll(eq(commonLoadBalancerId), any());
verify(client, never()).deregisterAll(eq(commonLoadBalancerId), any());
verifyNoReconcilerIgnore(commonLoadBalancerId);
verifyNoReconcilerIgnore(commonLoadBalancerId);
}
@Test
public void movedTaskOnlyTargetAssociatedWithLoadBalancer() {
String taskId = UUID.randomUUID().toString();
String sourceJobId = UUID.randomUUID().toString();
String targetJobId = UUID.randomUUID().toString();
String targetLoadBalancerId = "lb-" + UUID.randomUUID().toString();
PublishSubject<JobManagerEvent<?>> taskEvents = PublishSubject.create();
when(client.registerAll(any(), any())).thenReturn(Completable.complete());
when(client.deregisterAll(any(), any())).thenReturn(Completable.complete());
when(v3JobOperations.observeJobs()).thenReturn(taskEvents);
LoadBalancerTests.applyValidGetJobMock(v3JobOperations, sourceJobId);
LoadBalancerTests.applyValidGetJobMock(v3JobOperations, targetJobId);
LoadBalancerConfiguration configuration = LoadBalancerTests.mockConfiguration(MIN_TIME_IN_QUEUE_MS);
DefaultLoadBalancerService service = new DefaultLoadBalancerService(
runtime, configuration, client, loadBalancerStore, loadBalancerJobOperations, reconciler, validator, testScheduler);
AssertableSubscriber<Batch<TargetStateBatchable, String>> testSubscriber = service.events().test();
assertThat(service.getJobLoadBalancers(sourceJobId).toBlocking().toIterable()).isEmpty();
assertTrue(service.addLoadBalancer(targetJobId, targetLoadBalancerId).await(100, TimeUnit.MILLISECONDS));
assertThat(service.getJobLoadBalancers(targetJobId).toBlocking().toIterable())
.containsExactlyInAnyOrder(targetLoadBalancerId);
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(0);
verify(client, never()).registerAll(any(), any());
verify(client, never()).deregisterAll(any(), any());
verifyNoReconcilerIgnore();
Task moved = ServiceJobTask.newBuilder()
.withJobId(targetJobId)
.withId(taskId)
.withStatus(TaskStatus.newBuilder().withState(TaskState.Started).build())
.withTaskContext(CollectionsExt.asMap(
TaskAttributes.TASK_ATTRIBUTES_CONTAINER_IP, "1.2.3.4",
TaskAttributes.TASK_ATTRIBUTES_MOVED_FROM_JOB, sourceJobId
)).build();
// detect the task is moved and gets registered on the target
taskEvents.onNext(TaskUpdateEvent.newTaskFromAnotherJob(null, moved, callMetadata));
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(1);
verify(client).registerAll(eq(targetLoadBalancerId), argThat(set -> set.contains("1.2.3.4")));
verify(client, never()).deregisterAll(any(), any());
verifyReconcilerIgnore(targetLoadBalancerId, "1.2.3.4");
}
@Test
public void movedTaskOnlySourceAssociatedWithLoadBalancer() {
String taskId = UUID.randomUUID().toString();
String sourceJobId = UUID.randomUUID().toString();
String targetJobId = UUID.randomUUID().toString();
String sourceLoadBalancerId = "lb-" + UUID.randomUUID().toString();
PublishSubject<JobManagerEvent<?>> taskEvents = PublishSubject.create();
when(client.registerAll(any(), any())).thenReturn(Completable.complete());
when(client.deregisterAll(any(), any())).thenReturn(Completable.complete());
when(v3JobOperations.observeJobs()).thenReturn(taskEvents);
LoadBalancerTests.applyValidGetJobMock(v3JobOperations, sourceJobId);
LoadBalancerTests.applyValidGetJobMock(v3JobOperations, targetJobId);
LoadBalancerConfiguration configuration = LoadBalancerTests.mockConfiguration(MIN_TIME_IN_QUEUE_MS);
DefaultLoadBalancerService service = new DefaultLoadBalancerService(
runtime, configuration, client, loadBalancerStore, loadBalancerJobOperations, reconciler, validator, testScheduler);
AssertableSubscriber<Batch<TargetStateBatchable, String>> testSubscriber = service.events().test();
assertTrue(service.addLoadBalancer(sourceJobId, sourceLoadBalancerId).await(100, TimeUnit.MILLISECONDS));
assertThat(service.getJobLoadBalancers(sourceJobId).toBlocking().toIterable())
.containsExactlyInAnyOrder(sourceLoadBalancerId);
assertThat(service.getJobLoadBalancers(targetJobId).toBlocking().toIterable()).isEmpty();
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(0);
verify(client, never()).registerAll(any(), any());
verify(client, never()).deregisterAll(any(), any());
verifyNoReconcilerIgnore();
Task moved = ServiceJobTask.newBuilder()
.withJobId(targetJobId)
.withId(taskId)
.withStatus(TaskStatus.newBuilder().withState(TaskState.Started).build())
.withTaskContext(CollectionsExt.asMap(
TaskAttributes.TASK_ATTRIBUTES_CONTAINER_IP, "1.2.3.4",
TaskAttributes.TASK_ATTRIBUTES_MOVED_FROM_JOB, sourceJobId
)).build();
// detect the task is moved and gets deregistered from the source
taskEvents.onNext(TaskUpdateEvent.newTaskFromAnotherJob(null, moved, callMetadata));
testScheduler.advanceTimeBy(FLUSH_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors().assertValueCount(1);
verify(client).deregisterAll(eq(sourceLoadBalancerId), argThat(set -> set.contains("1.2.3.4")));
verify(client, never()).registerAll(any(), any());
verifyReconcilerIgnore(sourceLoadBalancerId, "1.2.3.4");
}
@Test(timeout = 30_000)
public void backfillsCurrentTargetsToStore() {
String jobId = UUID.randomUUID().toString();
String associatedId = "lb-" + UUID.randomUUID().toString();
String dissociatedId = "lb-" + UUID.randomUUID().toString();
String removedId = "lb-" + UUID.randomUUID().toString();
LoadBalancerConfiguration configuration = LoadBalancerTests.mockConfiguration(MIN_TIME_IN_QUEUE_MS);
when(configuration.isTargetsToStoreBackfillEnabled()).thenReturn(true);
when(configuration.getStoreBackfillConcurrencyLimit()).thenReturn(10);
when(configuration.getStoreBackfillTimeoutMs()).thenReturn(5000L);
// current load balancer state (targets)
when(client.getLoadBalancer(associatedId)).thenReturn(Single.just(
new LoadBalancer(associatedId, LoadBalancer.State.ACTIVE, CollectionsExt.asSet(
"1.1.1.1", "2.2.2.2", "3.3.3.3"
))
));
when(client.getLoadBalancer(dissociatedId)).thenReturn(Single.just(
new LoadBalancer(dissociatedId, LoadBalancer.State.ACTIVE, CollectionsExt.asSet(
"4.4.4.4", "5.5.5.5", "6.6.6.6"
))
));
when(client.getLoadBalancer(removedId)).thenReturn(Single.just(
new LoadBalancer(removedId, LoadBalancer.State.REMOVED, Collections.emptySet())
));
// current load balancers we are managing
loadBalancerStore.addOrUpdateLoadBalancer(
new JobLoadBalancer(jobId, associatedId), JobLoadBalancer.State.ASSOCIATED
).await();
loadBalancerStore.addOrUpdateLoadBalancer(
new JobLoadBalancer(jobId, dissociatedId), JobLoadBalancer.State.DISSOCIATED
).await();
loadBalancerStore.addOrUpdateLoadBalancer(
new JobLoadBalancer(jobId, removedId), JobLoadBalancer.State.ASSOCIATED
).await();
DefaultLoadBalancerService service = new DefaultLoadBalancerService(runtime, configuration, client,
loadBalancerStore, loadBalancerJobOperations, reconciler, validator, testScheduler);
service.backfillTargetsToStore();
assertThat(loadBalancerStore.getLoadBalancerTargets(associatedId).collectList().block())
.containsExactlyInAnyOrder(
new LoadBalancerTargetState(
new LoadBalancerTarget(associatedId, "BACKFILLED", "1.1.1.1"), REGISTERED
),
new LoadBalancerTargetState(
new LoadBalancerTarget(associatedId, "BACKFILLED", "2.2.2.2"), REGISTERED
),
new LoadBalancerTargetState(
new LoadBalancerTarget(associatedId, "BACKFILLED", "3.3.3.3"), REGISTERED
)
);
assertThat(loadBalancerStore.getLoadBalancerTargets(dissociatedId).collectList().block())
.containsExactlyInAnyOrder(
new LoadBalancerTargetState(
new LoadBalancerTarget(dissociatedId, "BACKFILLED", "4.4.4.4"), REGISTERED
),
new LoadBalancerTargetState(
new LoadBalancerTarget(dissociatedId, "BACKFILLED", "5.5.5.5"), REGISTERED
),
new LoadBalancerTargetState(
new LoadBalancerTarget(dissociatedId, "BACKFILLED", "6.6.6.6"), REGISTERED
)
);
assertThat(loadBalancerStore.getLoadBalancerTargets(removedId).collectList().block()).isEmpty();
}
private void verifyReconcilerIgnore(String loadBalancerId, String... ipAddresses) {
Set<String> ipSet = CollectionsExt.asSet(ipAddresses);
verify(reconciler, times(ipAddresses.length)).activateCooldownFor(
argThat(target -> loadBalancerId.equals(target.getLoadBalancerId())
&& ipSet.contains(target.getIpAddress())),
anyLong(),
any()
);
}
private void verifyNoReconcilerIgnore() {
verify(reconciler, never()).activateCooldownFor(any(), anyLong(), any());
}
private void verifyNoReconcilerIgnore(String loadBalancerId) {
verify(reconciler, never()).activateCooldownFor(
argThat(target -> loadBalancerId.equals(target.getLoadBalancerId())),
anyLong(),
any()
);
}
}
| 9,977 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/loadbalancer | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/loadbalancer/service/LoadBalancerTests.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.loadbalancer.service;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import com.google.protobuf.Empty;
import com.netflix.titus.api.connector.cloud.LoadBalancerConnector;
import com.netflix.titus.api.jobmanager.TaskAttributes;
import com.netflix.titus.api.jobmanager.model.job.Container;
import com.netflix.titus.api.jobmanager.model.job.ContainerResources;
import com.netflix.titus.api.jobmanager.model.job.Image;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobState;
import com.netflix.titus.api.jobmanager.model.job.JobStatus;
import com.netflix.titus.api.jobmanager.model.job.ServiceJobTask;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.TaskStatus;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.api.jobmanager.service.V3JobOperations;
import com.netflix.titus.api.loadbalancer.model.sanitizer.LoadBalancerJobValidator;
import com.netflix.titus.api.loadbalancer.model.sanitizer.LoadBalancerValidationConfiguration;
import com.netflix.titus.api.loadbalancer.model.sanitizer.NoOpLoadBalancerJobValidator;
import com.netflix.titus.api.loadbalancer.service.LoadBalancerService;
import com.netflix.titus.api.loadbalancer.store.LoadBalancerStore;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.grpc.protogen.AddLoadBalancerRequest;
import com.netflix.titus.grpc.protogen.GetAllLoadBalancersRequest;
import com.netflix.titus.grpc.protogen.GetAllLoadBalancersResult;
import com.netflix.titus.grpc.protogen.GetJobLoadBalancersResult;
import com.netflix.titus.grpc.protogen.JobId;
import com.netflix.titus.grpc.protogen.LoadBalancerId;
import com.netflix.titus.grpc.protogen.Page;
import com.netflix.titus.grpc.protogen.RemoveLoadBalancerRequest;
import com.netflix.titus.runtime.store.v3.memory.InMemoryLoadBalancerStore;
import com.netflix.titus.testkit.grpc.TestStreamObserver;
import org.apache.commons.lang3.RandomStringUtils;
import org.mockito.stubbing.OngoingStubbing;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.schedulers.Schedulers;
import rx.schedulers.TestScheduler;
import rx.subjects.PublishSubject;
import static org.assertj.core.api.AssertionsForClassTypes.assertThat;
import static org.assertj.core.api.AssertionsForClassTypes.assertThatCode;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class LoadBalancerTests {
private static final Logger logger = LoggerFactory.getLogger(LoadBalancerTests.class);
private static final long TIMEOUT_MS = 30_000;
static public LoadBalancerService getMockLoadBalancerService() {
final TitusRuntime runtime = TitusRuntimes.internal();
final LoadBalancerConfiguration loadBalancerConfig = mockConfiguration(5_000);
final LoadBalancerConnector connector = mock(LoadBalancerConnector.class);
final V3JobOperations v3JobOperations = mock(V3JobOperations.class);
when(v3JobOperations.observeJobs()).thenReturn(PublishSubject.create());
final LoadBalancerJobOperations loadBalancerJobOperations = new LoadBalancerJobOperations(v3JobOperations);
final LoadBalancerReconciler reconciler = mock(LoadBalancerReconciler.class);
when(reconciler.events()).thenReturn(PublishSubject.create());
final LoadBalancerStore loadBalancerStore = new InMemoryLoadBalancerStore();
final LoadBalancerJobValidator validator = new NoOpLoadBalancerJobValidator();
final TestScheduler testScheduler = Schedulers.test();
return new DefaultLoadBalancerService(runtime, loadBalancerConfig, connector, loadBalancerStore, loadBalancerJobOperations,
reconciler, validator, testScheduler);
}
// Started tasks have IPs assigned to them
static List<Task> buildTasksStarted(int count, String jobId) {
return IntStream.range(0, count).mapToObj(i -> ServiceJobTask.newBuilder()
.withJobId(jobId)
.withId(UUID.randomUUID().toString())
.withStatus(TaskStatus.newBuilder().withState(TaskState.Started).build())
.withTaskContext(CollectionsExt.asMap(
TaskAttributes.TASK_ATTRIBUTES_CONTAINER_IP, String.format("%1$d.%1$d.%1$d.%1$d", i + 1)
))
.build()
).collect(Collectors.toList());
}
static List<Task> buildTasks(int count, String jobId, TaskState state) {
return IntStream.range(0, count).mapToObj(i -> ServiceJobTask.newBuilder()
.withJobId(jobId)
.withId(UUID.randomUUID().toString())
.withStatus(TaskStatus.newBuilder().withState(state).build())
.build()
).collect(Collectors.toList());
}
static LoadBalancerConfiguration mockConfiguration(long minTimeInQueueMs) {
final LoadBalancerConfiguration configuration = mock(LoadBalancerConfiguration.class);
// numbers close to Long.MAX_VALUE will trigger integer overflow bugs in the DefaultTokenBucket impl
when(configuration.getRateLimitBurst()).thenReturn(Long.MAX_VALUE / 100);
when(configuration.getRateLimitRefillPerSec()).thenReturn(Long.MAX_VALUE / 100);
when(configuration.getCooldownPeriodMs()).thenReturn(120_000L);
when(configuration.getReconciliationDelayMs()).thenReturn(30_000L);
when(configuration.getMaxTimeMs()).thenReturn(Long.MAX_VALUE);
when(configuration.getMinTimeMs()).thenReturn(minTimeInQueueMs);
when(configuration.getBucketSizeMs()).thenReturn(minTimeInQueueMs);
return configuration;
}
static LoadBalancerValidationConfiguration mockValidationConfig(int maxLbsPerJob) {
final LoadBalancerValidationConfiguration config = mock(LoadBalancerValidationConfiguration.class);
when(config.getMaxLoadBalancersPerJob()).thenReturn(maxLbsPerJob);
return config;
}
/**
* Common testing helper that gets load balancers for a job, ensures the gRPC request was
* successful, and returns the load balancer ids as a set.
*/
public static Set<LoadBalancerId> getLoadBalancersForJob(String jobIdStr,
BiConsumer<JobId, TestStreamObserver<GetJobLoadBalancersResult>> getJobLoadBalancers) {
JobId jobId = JobId.newBuilder().setId(jobIdStr).build();
TestStreamObserver<GetJobLoadBalancersResult> getResponse = new TestStreamObserver<>();
getJobLoadBalancers.accept(jobId, getResponse);
GetJobLoadBalancersResult result = null;
try {
result = getResponse.takeNext(TIMEOUT_MS, TimeUnit.MILLISECONDS);
} catch (Exception e) {
logger.error("Exception in getLoadBalancersForJob", e);
assert false;
}
assertThat(getResponse.hasError()).isFalse();
return new HashSet<>(result.getLoadBalancersList());
}
/**
* Common testing helper that gets all load balancers for a given page range, ensures the gRPC
* request was successful, and returns the page result.
*/
public static GetAllLoadBalancersResult getAllLoadBalancers(Supplier<Page> pageSupplier,
BiConsumer<GetAllLoadBalancersRequest, TestStreamObserver<GetAllLoadBalancersResult>> getAllLoadBalancers) {
GetAllLoadBalancersRequest request = GetAllLoadBalancersRequest.newBuilder()
.setPage(pageSupplier.get())
.build();
TestStreamObserver<GetAllLoadBalancersResult> getResponse = new TestStreamObserver<>();
getAllLoadBalancers.accept(request, getResponse);
GetAllLoadBalancersResult result = null;
try {
result = getResponse.takeNext(TIMEOUT_MS, TimeUnit.MILLISECONDS);
} catch (Exception e) {
logger.error("Exception in getAllLoadBalancers", e);
assert false;
}
assertThat(getResponse.hasError()).isFalse();
return result;
}
/**
* Common testing helper that adds a specified number of random load balancer ids to
* a specified number of jobs. The helper ensures the gRPC request was successful and
* returns the job ids and load balancer ids as a map.
*/
public static Map<String, Set<LoadBalancerId>> putLoadBalancersPerJob(int numJobs, int numLoadBalancersPerJob,
BiConsumer<AddLoadBalancerRequest, TestStreamObserver<Empty>> putLoadBalancer) {
// Create job entries
Map<String, Set<LoadBalancerId>> jobIdToLoadBalancersMap = new ConcurrentHashMap<>();
for (int i = 1; i <= numJobs; i++) {
jobIdToLoadBalancersMap.put("Titus-" + i, new HashSet<>());
}
// For each job, insert load balancers
jobIdToLoadBalancersMap.forEach((jobId, loadBalancerSet) -> {
for (int i = 0; i < numLoadBalancersPerJob; i++) {
LoadBalancerId loadBalancerId = LoadBalancerId.newBuilder()
.setId(RandomStringUtils.randomAlphanumeric(10))
.build();
AddLoadBalancerRequest request = AddLoadBalancerRequest.newBuilder()
.setJobId(jobId)
.setLoadBalancerId(loadBalancerId)
.build();
TestStreamObserver<Empty> addResponse = new TestStreamObserver<>();
putLoadBalancer.accept(request, addResponse);
assertThatCode(addResponse::awaitDone).doesNotThrowAnyException();
assertThat(addResponse.hasError()).isFalse();
loadBalancerSet.add(loadBalancerId);
}
});
return jobIdToLoadBalancersMap;
}
/**
* Common testing helper that removes a load balancer id from a job. The helper ensures the
* gRPC request was successful.
*/
public static void removeLoadBalancerFromJob(String jobId, LoadBalancerId loadBalancerId,
BiConsumer<RemoveLoadBalancerRequest, TestStreamObserver<Empty>> removeLoadBalancers) {
RemoveLoadBalancerRequest request = RemoveLoadBalancerRequest.newBuilder()
.setJobId(jobId)
.setLoadBalancerId(loadBalancerId)
.build();
TestStreamObserver<Empty> removeResponse = new TestStreamObserver<>();
removeLoadBalancers.accept(request, removeResponse);
assertThatCode(removeResponse::awaitDone).doesNotThrowAnyException();
assertThat(removeResponse.hasError()).isFalse();
}
/**
* Configures a V3 mock to return job from getJobs() that passes validation.
*/
static OngoingStubbing<?> applyValidGetJobMock(V3JobOperations mockedV3Ops, String jobId) {
return when(mockedV3Ops.getJob(jobId)).thenReturn(Optional.of(Job.<ServiceJobExt>newBuilder()
.withId(jobId)
.withStatus(JobStatus.newBuilder()
.withState(JobState.Accepted)
.build())
.withJobDescriptor(JobDescriptor.<ServiceJobExt>newBuilder()
.withExtensions(ServiceJobExt.newBuilder().build())
.withContainer(Container.newBuilder()
.withImage(Image.newBuilder().build())
.withContainerResources(ContainerResources.newBuilder()
.withAllocateIP(true)
.build())
.build())
.build())
.build()));
}
static String[] ipAddresses(List<Task> tasks) {
return tasks.stream()
.map(t -> t.getTaskContext().get(TaskAttributes.TASK_ATTRIBUTES_CONTAINER_IP))
.toArray(String[]::new);
}
public static Supplier<Page> buildPageSupplier(int pageNumber, int pageSize) {
return () -> Page.newBuilder().setPageNumber(pageNumber).setPageSize(pageSize).build();
}
public static Supplier<Page> buildPageSupplier(String cursor, int pageSize) {
return () -> Page.newBuilder().setCursor(cursor).setPageSize(pageSize).build();
}
}
| 9,978 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/loadbalancer | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/loadbalancer/service/DefaultLoadBalancerReconcilerTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.loadbalancer.service;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;
import com.netflix.spectator.api.NoopRegistry;
import com.netflix.titus.api.connector.cloud.LoadBalancer;
import com.netflix.titus.api.connector.cloud.LoadBalancerConnector;
import com.netflix.titus.api.jobmanager.TaskAttributes;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.service.JobManagerException;
import com.netflix.titus.api.jobmanager.service.V3JobOperations;
import com.netflix.titus.api.loadbalancer.model.JobLoadBalancer;
import com.netflix.titus.api.loadbalancer.model.JobLoadBalancer.State;
import com.netflix.titus.api.loadbalancer.model.JobLoadBalancerState;
import com.netflix.titus.api.loadbalancer.model.LoadBalancerTarget;
import com.netflix.titus.api.loadbalancer.model.LoadBalancerTargetState;
import com.netflix.titus.api.loadbalancer.store.LoadBalancerStore;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.rx.batch.Priority;
import com.netflix.titus.runtime.store.v3.memory.InMemoryLoadBalancerStore;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.stubbing.OngoingStubbing;
import rx.Completable;
import rx.Single;
import rx.observers.AssertableSubscriber;
import rx.schedulers.Schedulers;
import rx.schedulers.TestScheduler;
import static com.jayway.awaitility.Awaitility.await;
import static org.assertj.core.api.Assertions.assertThat;
import static org.hamcrest.Matchers.greaterThan;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.when;
public class DefaultLoadBalancerReconcilerTest {
private static final long TEST_TIMEOUT_MS = 10_0000;
private String loadBalancerId;
private String jobId;
private long delayMs;
private LoadBalancerStore store;
private LoadBalancerConnector connector;
private V3JobOperations v3JobOperations;
private TestScheduler testScheduler;
private LoadBalancerReconciler reconciler;
private AtomicLong reconciliationCount;
private AssertableSubscriber<TargetStateBatchable> subscriber;
@Before
public void setUp() throws Exception {
loadBalancerId = UUID.randomUUID().toString();
jobId = UUID.randomUUID().toString();
delayMs = 60_000L;/* 1 min */
store = new InMemoryLoadBalancerStore();
connector = mock(LoadBalancerConnector.class);
when(connector.getLoadBalancer(loadBalancerId)).thenReturn(Single.just(
new LoadBalancer(loadBalancerId, LoadBalancer.State.ACTIVE, Collections.emptySet())
));
v3JobOperations = mock(V3JobOperations.class);
testScheduler = Schedulers.test();
reconciliationCount = new AtomicLong(0);
reconciler = new DefaultLoadBalancerReconciler(mockConfigWithDelay(delayMs), store, connector,
new LoadBalancerJobOperations(v3JobOperations), () -> reconciliationCount.incrementAndGet(),
new NoopRegistry(), testScheduler);
subscriber = reconciler.events().test();
}
@After
public void shutdown() {
subscriber.unsubscribe();
reconciler.shutdown();
}
private void awaitReconciliationRuns(int n) {
for (int i = 0; i < n; i++) {
long startCount = reconciliationCount.get();
testScheduler.advanceTimeBy(delayMs, TimeUnit.MILLISECONDS);
subscriber.assertNoTerminalEvent();
await().atMost(2, TimeUnit.SECONDS).untilAtomic(reconciliationCount, greaterThan(startCount));
}
}
@Test(timeout = TEST_TIMEOUT_MS)
public void registerMissingTargets() {
List<Task> tasks = LoadBalancerTests.buildTasksStarted(5, jobId);
JobLoadBalancer jobLoadBalancer = new JobLoadBalancer(jobId, loadBalancerId);
JobLoadBalancerState association = new JobLoadBalancerState(jobLoadBalancer, State.ASSOCIATED);
when(v3JobOperations.getTasks(jobId)).thenReturn(tasks);
store.addOrUpdateLoadBalancer(association.getJobLoadBalancer(), association.getState()).await();
testScheduler.triggerActions();
subscriber.assertNotCompleted().assertNoValues();
awaitReconciliationRuns(1);
subscriber.assertNotCompleted().assertValueCount(5);
subscriber.getOnNextEvents().forEach(update -> {
assertThat(update.getState()).isEqualTo(LoadBalancerTarget.State.REGISTERED);
// reconciliation always generates Priority.Low events that can be replaced by higher priority reactive updates
assertThat(update.getPriority()).isEqualTo(Priority.LOW);
assertThat(update.getLoadBalancerId()).isEqualTo(loadBalancerId);
});
}
@Test(timeout = TEST_TIMEOUT_MS)
public void deregisterExtraTargetsPreviouslyRegisteredByUs() {
List<Task> tasks = LoadBalancerTests.buildTasksStarted(3, jobId);
JobLoadBalancer jobLoadBalancer = new JobLoadBalancer(jobId, loadBalancerId);
JobLoadBalancerState association = new JobLoadBalancerState(jobLoadBalancer, State.ASSOCIATED);
when(v3JobOperations.getTasks(jobId)).thenReturn(tasks);
reset(connector);
when(connector.getLoadBalancer(loadBalancerId)).thenReturn(Single.just(new LoadBalancer(
loadBalancerId,
LoadBalancer.State.ACTIVE,
CollectionsExt.asSet("1.1.1.1", "2.2.2.2", "3.3.3.3", "4.4.4.4", "5.5.5.5", "6.6.6.6")
)));
store.addOrUpdateLoadBalancer(association.getJobLoadBalancer(), association.getState()).await();
store.addOrUpdateTargets(
// 3 running tasks were previously registered by us and are in the load balancer
new LoadBalancerTargetState(
new LoadBalancerTarget(loadBalancerId, tasks.get(0).getId(), "1.1.1.1"),
LoadBalancerTarget.State.REGISTERED
),
new LoadBalancerTargetState(
new LoadBalancerTarget(loadBalancerId, tasks.get(1).getId(), "2.2.2.2"),
LoadBalancerTarget.State.REGISTERED
),
new LoadBalancerTargetState(
new LoadBalancerTarget(loadBalancerId, tasks.get(2).getId(), "3.3.3.3"),
LoadBalancerTarget.State.REGISTERED
),
// Next two ips were previously registered by us, but their tasks do not exist anymore
new LoadBalancerTargetState(
new LoadBalancerTarget(loadBalancerId, "some-dead-task", "4.4.4.4"),
LoadBalancerTarget.State.REGISTERED
),
new LoadBalancerTargetState(
new LoadBalancerTarget(loadBalancerId, "another-dead-task", "5.5.5.5"),
LoadBalancerTarget.State.DEREGISTERED
)
// no record for 6.6.6.6, that ip address was not registered by us, and won't be touched
).block();
testScheduler.triggerActions();
subscriber.assertNotCompleted().assertNoValues();
awaitReconciliationRuns(1);
subscriber.assertNotCompleted().assertValueCount(2);
subscriber.getOnNextEvents().forEach(update -> {
assertThat(update.getState()).isEqualTo(LoadBalancerTarget.State.DEREGISTERED);
assertThat(update.getPriority()).isEqualTo(Priority.LOW);
assertThat(update.getLoadBalancerId()).isEqualTo(loadBalancerId);
assertThat(update.getIdentifier().getTaskId()).isIn("some-dead-task", "another-dead-task");
assertThat(update.getIdentifier().getIpAddress()).isIn("4.4.4.4", "5.5.5.5");
});
}
@Test(timeout = TEST_TIMEOUT_MS)
public void updatesAreIgnoredWhileCooldownIsActive() {
long cooldownPeriodMs = 5 * delayMs;
List<Task> tasks = LoadBalancerTests.buildTasksStarted(5, jobId);
JobLoadBalancer jobLoadBalancer = new JobLoadBalancer(jobId, loadBalancerId);
JobLoadBalancerState association = new JobLoadBalancerState(jobLoadBalancer, State.ASSOCIATED);
when(v3JobOperations.getTasks(jobId)).thenReturn(tasks);
store.addOrUpdateLoadBalancer(association.getJobLoadBalancer(), association.getState()).await();
for (Task task : tasks) {
String ipAddress = task.getTaskContext().get(TaskAttributes.TASK_ATTRIBUTES_CONTAINER_IP);
LoadBalancerTarget target = new LoadBalancerTarget(loadBalancerId, task.getId(), ipAddress);
reconciler.activateCooldownFor(target, cooldownPeriodMs, TimeUnit.MILLISECONDS);
}
testScheduler.triggerActions();
subscriber.assertNotCompleted().assertNoValues();
// no updates while cooldown is active in the first iteration
awaitReconciliationRuns(1);
subscriber.assertNotCompleted().assertNoValues();
awaitReconciliationRuns(4);
subscriber.assertNotCompleted().assertValueCount(5);
subscriber.getOnNextEvents().forEach(update -> {
assertThat(update.getState()).isEqualTo(LoadBalancerTarget.State.REGISTERED);
assertThat(update.getPriority()).isEqualTo(Priority.LOW);
assertThat(update.getLoadBalancerId()).isEqualTo(loadBalancerId);
});
// try again since it still can't see updates applied on the connector
awaitReconciliationRuns(1);
subscriber.assertNotCompleted().assertValueCount(10);
}
@Test(timeout = TEST_TIMEOUT_MS)
public void jobsWithErrorsAreIgnored() {
List<Task> tasks = LoadBalancerTests.buildTasksStarted(5, jobId);
JobLoadBalancer jobLoadBalancer = new JobLoadBalancer(jobId, loadBalancerId);
JobLoadBalancerState association = new JobLoadBalancerState(jobLoadBalancer, State.ASSOCIATED);
when(v3JobOperations.getTasks(jobId))
.thenThrow(JobManagerException.class) // first fails
.thenReturn(tasks);
store.addOrUpdateLoadBalancer(association.getJobLoadBalancer(), association.getState()).await();
testScheduler.triggerActions();
subscriber.assertNotCompleted().assertNoValues();
awaitReconciliationRuns(1);
subscriber.assertNotCompleted().assertNoValues();
awaitReconciliationRuns(1);
subscriber.assertNotCompleted().assertValueCount(5);
subscriber.getOnNextEvents().forEach(update -> {
assertThat(update.getState()).isEqualTo(LoadBalancerTarget.State.REGISTERED);
assertThat(update.getPriority()).isEqualTo(Priority.LOW);
assertThat(update.getLoadBalancerId()).isEqualTo(loadBalancerId);
});
}
@Test(timeout = TEST_TIMEOUT_MS)
public void connectorErrorsDoNotHaltReconciliation() {
String failingLoadBalancerId = UUID.randomUUID().toString();
List<Task> tasks = LoadBalancerTests.buildTasksStarted(5, jobId);
JobLoadBalancer jobLoadBalancer = new JobLoadBalancer(jobId, loadBalancerId);
JobLoadBalancerState association = new JobLoadBalancerState(jobLoadBalancer, State.ASSOCIATED);
JobLoadBalancer failingJobLoadBalancer = new JobLoadBalancer(jobId, failingLoadBalancerId);
JobLoadBalancerState failingAssociation = new JobLoadBalancerState(failingJobLoadBalancer, State.ASSOCIATED);
when(v3JobOperations.getTasks(jobId)).thenReturn(tasks);
when(connector.getLoadBalancer(failingLoadBalancerId)).thenReturn(Single.error(new RuntimeException("rate limit")));
Completable.merge(
store.addOrUpdateLoadBalancer(failingAssociation.getJobLoadBalancer(), failingAssociation.getState()),
store.addOrUpdateLoadBalancer(association.getJobLoadBalancer(), association.getState())
).await();
testScheduler.triggerActions();
subscriber.assertNoErrors().assertNotCompleted().assertNoValues();
awaitReconciliationRuns(1);
// failingLoadBalancerId gets ignored
subscriber.assertNoErrors().assertNotCompleted().assertValueCount(5);
subscriber.getOnNextEvents().forEach(update -> {
assertThat(update.getState()).isEqualTo(LoadBalancerTarget.State.REGISTERED);
assertThat(update.getPriority()).isEqualTo(Priority.LOW);
assertThat(update.getLoadBalancerId()).isEqualTo(loadBalancerId);
});
}
@Test
public void orphanJobAssociationsAreSetAsDissociatedAndRemoved() {
JobLoadBalancer jobLoadBalancer = new JobLoadBalancer(jobId, loadBalancerId);
when(v3JobOperations.getTasks(jobId)).thenThrow(JobManagerException.jobNotFound(jobId));
when(v3JobOperations.getJob(jobId)).thenReturn(Optional.empty());
assertThat(store.addOrUpdateLoadBalancer(jobLoadBalancer, State.ASSOCIATED)
.await(5, TimeUnit.SECONDS)).isTrue();
testScheduler.triggerActions();
subscriber.assertNotCompleted().assertNoValues();
// let some reconciliation iterations run for:
// 1. mark as orphan
// 2. ensure no more targets are stored
// 3. sweep
awaitReconciliationRuns(3);
assertThat(store.getAssociations()).isEmpty();
assertThat(store.getAssociatedLoadBalancersSetForJob(jobId)).isEmpty();
}
@Test
public void orphanLoadBalancerAssociationsAreSetAsDissociatedAndRemoved() {
List<Task> tasks = LoadBalancerTests.buildTasksStarted(5, jobId);
JobLoadBalancer jobLoadBalancer = new JobLoadBalancer(jobId, loadBalancerId);
when(v3JobOperations.getTasks(jobId)).thenReturn(tasks);
reset(connector);
OngoingStubbing<Single<LoadBalancer>> ongoingStubbing = when(connector.getLoadBalancer(loadBalancerId))
.thenReturn(Single.just(new LoadBalancer(
loadBalancerId,
LoadBalancer.State.ACTIVE,
CollectionsExt.asSet("1.1.1.1", "2.2.2.2", "3.3.3.3", "4.4.4.4", "5.5.5.5")
)));
assertThat(store.addOrUpdateLoadBalancer(jobLoadBalancer, State.ASSOCIATED)
.await(5, TimeUnit.SECONDS)).isTrue();
// all targets were previously registered by us
store.addOrUpdateTargets(tasks.stream()
.map(task -> new LoadBalancerTargetState(
new LoadBalancerTarget(loadBalancerId, task.getId(),
task.getTaskContext().get(TaskAttributes.TASK_ATTRIBUTES_CONTAINER_IP)),
LoadBalancerTarget.State.REGISTERED))
.collect(Collectors.toList())
).block();
testScheduler.triggerActions();
subscriber.assertNotCompleted().assertNoValues();
// load balancer was removed outside of Titus
ongoingStubbing.thenReturn(Single.just(
new LoadBalancer(loadBalancerId, LoadBalancer.State.REMOVED, Collections.emptySet())
));
// Let a few iterations run so all phases can be executed:
// 1. mark as orphan
// 2. update targets as DEREGISTERED
awaitReconciliationRuns(2);
subscriber.awaitValueCount(5, TEST_TIMEOUT_MS / 2, TimeUnit.MILLISECONDS)
.assertNoErrors();
assertThat(subscriber.getOnNextEvents()).allMatch(update -> update.getState().equals(LoadBalancerTarget.State.DEREGISTERED));
// simulate all targets got DEREGISTERED
List<LoadBalancerTargetState> currentTargets = store.getLoadBalancerTargets(loadBalancerId).collectList().block();
assertThat(currentTargets).isNotNull();
store.addOrUpdateTargets(currentTargets.stream()
.map(targetState -> targetState.getLoadBalancerTarget().withState(LoadBalancerTarget.State.DEREGISTERED))
.collect(Collectors.toList())
).block();
// 3. update orphan as Dissociated
awaitReconciliationRuns(1);
assertThat(store.getAssociations()).containsOnly(new JobLoadBalancerState(jobLoadBalancer, State.DISSOCIATED));
// 4. sweep all targets
// 5. sweep all Dissociated
awaitReconciliationRuns(2);
assertThat(store.getLoadBalancerTargets(loadBalancerId).collectList().block()).isEmpty();
assertThat(store.getAssociations()).isEmpty();
assertThat(store.getAssociatedLoadBalancersSetForJob(jobId)).isEmpty();
}
@Test
public void dissociatedJobsAreNotRemovedUntilAllTargetsAreDeregisteredAndRemoved() throws InterruptedException {
JobLoadBalancer jobLoadBalancer = new JobLoadBalancer(jobId, loadBalancerId);
when(v3JobOperations.getTasks(jobId)).thenThrow(JobManagerException.jobNotFound(jobId));
when(v3JobOperations.getJob(jobId)).thenReturn(Optional.empty());
reset(connector);
when(connector.getLoadBalancer(loadBalancerId)).thenReturn(Single.just(
new LoadBalancer(loadBalancerId, LoadBalancer.State.ACTIVE, Collections.singleton("1.2.3.4"))
));
store.addOrUpdateTargets(new LoadBalancerTargetState(
new LoadBalancerTarget(loadBalancerId, "some-task", "1.2.3.4"),
LoadBalancerTarget.State.DEREGISTERED
)).block();
assertThat(store.addOrUpdateLoadBalancer(jobLoadBalancer, State.DISSOCIATED)
.await(5, TimeUnit.SECONDS)).isTrue();
testScheduler.triggerActions();
subscriber.assertNotCompleted().assertNoValues();
// 1. deregister
awaitReconciliationRuns(1);
subscriber.assertNoTerminalEvent().assertValueCount(1);
assertThat(store.getAssociations()).isNotEmpty().hasSize(1);
when(connector.getLoadBalancer(loadBalancerId)).thenReturn(Single.just(
new LoadBalancer(loadBalancerId, LoadBalancer.State.ACTIVE, Collections.emptySet())
));
// Let a few iterations run so the remaining phases have a chance to complete:
// 2. clean up target state
// 3. clean up association
awaitReconciliationRuns(3);
assertThat(store.getAssociations()).isEmpty();
assertThat(store.getLoadBalancerTargets(loadBalancerId).collectList().block()).isEmpty();
}
@Test(timeout = TEST_TIMEOUT_MS)
public void deregisteredTargetsAreCleanedUp() {
List<Task> tasks = LoadBalancerTests.buildTasksStarted(1, jobId);
JobLoadBalancer jobLoadBalancer = new JobLoadBalancer(jobId, loadBalancerId);
JobLoadBalancerState association = new JobLoadBalancerState(jobLoadBalancer, State.ASSOCIATED);
when(v3JobOperations.getTasks(jobId)).thenReturn(tasks);
reset(connector);
when(connector.getLoadBalancer(loadBalancerId)).thenReturn(Single.just(new LoadBalancer(
loadBalancerId,
LoadBalancer.State.ACTIVE,
CollectionsExt.asSet("1.1.1.1", "10.10.10.10")
)));
store.addOrUpdateLoadBalancer(association.getJobLoadBalancer(), association.getState()).await();
store.addOrUpdateTargets(
// running tasks was previously registered by us and are in the load balancer
new LoadBalancerTargetState(
new LoadBalancerTarget(loadBalancerId, tasks.get(0).getId(), "1.1.1.1"),
LoadBalancerTarget.State.REGISTERED
),
// Next three ips were previously registered by us, but their tasks do not exist anymore and are not in the load balancer anymore
new LoadBalancerTargetState(
new LoadBalancerTarget(loadBalancerId, "target-inconsistent", "2.2.2.2"),
LoadBalancerTarget.State.REGISTERED
),
new LoadBalancerTargetState(
new LoadBalancerTarget(loadBalancerId, "target-not-in-lb", "3.3.3.3"),
LoadBalancerTarget.State.DEREGISTERED
)
// no record for 10.10.10.10, that ip address was not registered by us, and won't be touched
).block();
// no reconciliation ran yet
testScheduler.triggerActions();
subscriber.assertNotCompleted().assertNoValues();
assertThat(store.getLoadBalancerTargets(loadBalancerId).collectList().block()).hasSize(3);
// first pass, the one stored as DEREGISTERED is cleaned up, the other in an inconsistent state is fixed
awaitReconciliationRuns(1);
subscriber.assertNotCompleted().assertValueCount(1);
TargetStateBatchable inconsistencyFix = subscriber.getOnNextEvents().get(0);
assertThat(inconsistencyFix.getState()).isEqualTo(LoadBalancerTarget.State.DEREGISTERED);
assertThat(inconsistencyFix.getLoadBalancerId()).isEqualTo(loadBalancerId);
assertThat(inconsistencyFix.getIpAddress()).isEqualTo("2.2.2.2");
List<LoadBalancerTargetState> storedTargets = store.getLoadBalancerTargets(loadBalancerId).collectList().block();
assertThat(storedTargets).hasSize(2);
assertThat(storedTargets).doesNotContain(new LoadBalancerTargetState(
new LoadBalancerTarget(loadBalancerId, "target-not-in-lb", "3.3.3.3"),
LoadBalancerTarget.State.DEREGISTERED
));
// update with fix not applied yet, keep trying
awaitReconciliationRuns(1);
subscriber.assertNotCompleted().assertValueCount(2);
TargetStateBatchable update2 = subscriber.getOnNextEvents().get(0);
assertThat(update2.getState()).isEqualTo(LoadBalancerTarget.State.DEREGISTERED);
assertThat(update2.getLoadBalancerId()).isEqualTo(loadBalancerId);
assertThat(update2.getIpAddress()).isEqualTo("2.2.2.2");
assertThat(store.getLoadBalancerTargets(loadBalancerId).collectList().block()).hasSize(2);
// simulate the update with the fix above being applied
store.addOrUpdateTargets(new LoadBalancerTargetState(
new LoadBalancerTarget(loadBalancerId, "target-inconsistent", "2.2.2.2"),
LoadBalancerTarget.State.DEREGISTERED
)).block();
// finally, corrected record is now cleaned up
awaitReconciliationRuns(1);
subscriber.assertNotCompleted().assertValueCount(2); // no changes needed
assertThat(store.getLoadBalancerTargets(loadBalancerId).collectList().block())
.containsOnly(new LoadBalancerTargetState(
new LoadBalancerTarget(loadBalancerId, tasks.get(0).getId(), "1.1.1.1"),
LoadBalancerTarget.State.REGISTERED
));
}
private LoadBalancerConfiguration mockConfigWithDelay(long delayMs) {
LoadBalancerConfiguration configuration = mock(LoadBalancerConfiguration.class);
when(configuration.getReconciliationDelayMs()).thenReturn(delayMs);
when(configuration.getReconciliationTimeoutMs()).thenReturn(10 * delayMs);
return configuration;
}
}
| 9,979 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/audit | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/audit/service/AuditEventDiskWriterTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.audit.service;
import java.io.File;
import java.util.List;
import java.util.concurrent.TimeUnit;
import com.netflix.titus.api.audit.model.AuditLogEvent;
import com.netflix.titus.api.audit.model.AuditLogEvent.Type;
import com.netflix.titus.api.audit.service.AuditLogService;
import com.netflix.titus.api.model.event.UserRequestEvent;
import com.netflix.titus.common.util.IOExt;
import com.netflix.titus.common.util.rx.eventbus.RxEventBus;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import rx.schedulers.Schedulers;
import rx.schedulers.TestScheduler;
import rx.subjects.PublishSubject;
import static com.netflix.titus.master.audit.service.AuditEventDiskWriter.LOG_FILE_NAME;
import static com.netflix.titus.master.audit.service.AuditEventDiskWriter.WRITE_INTERVAL_MS;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class AuditEventDiskWriterTest {
private static final String LOG_FOLDER = "build/auditLogs";
private static final File LOG_FILE = new File(LOG_FOLDER, LOG_FILE_NAME);
private TestScheduler testScheduler = Schedulers.test();
private final RxEventBus rxEventBus = mock(RxEventBus.class);
private final AuditLogConfiguration config = mock(AuditLogConfiguration.class);
private final AuditLogService auditLogService = mock(AuditLogService.class);
private AuditEventDiskWriter auditEventDiskWriter;
private PublishSubject<AuditLogEvent> eventSubject = PublishSubject.create();
private PublishSubject<UserRequestEvent> rxEventSubject = PublishSubject.create();
@Before
public void setUp() throws Exception {
LOG_FILE.delete();
assertThat(LOG_FILE.exists()).isFalse();
when(config.getAuditLogFolder()).thenReturn(LOG_FOLDER);
when(auditLogService.auditLogEvents()).thenReturn(eventSubject);
when(rxEventBus.listen(AuditEventDiskWriter.class.getSimpleName(), UserRequestEvent.class)).thenReturn(rxEventSubject);
auditEventDiskWriter = new AuditEventDiskWriter(config, auditLogService, rxEventBus, testScheduler);
}
@After
public void tearDown() throws Exception {
auditEventDiskWriter.shutdown();
}
@Test
public void testLogWrite() throws Exception {
eventSubject.onNext(createEvent());
rxEventSubject.onNext(createHttpEvent());
testScheduler.advanceTimeBy(WRITE_INTERVAL_MS, TimeUnit.MILLISECONDS);
List<String> lines = IOExt.readLines(LOG_FILE);
assertThat(lines).hasSize(2);
}
private AuditLogEvent createEvent() {
return new AuditLogEvent(Type.JOB_SUBMIT, "operand", "data", System.currentTimeMillis());
}
private UserRequestEvent createHttpEvent() {
return new UserRequestEvent("POST /api/v2/jobs", "userX", "jobId=123", System.currentTimeMillis());
}
} | 9,980 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/audit | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/audit/service/DefaultAuditLogServiceTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.audit.service;
import java.util.concurrent.TimeUnit;
import com.netflix.titus.api.audit.model.AuditLogEvent;
import com.netflix.titus.testkit.rx.ExtTestSubscriber;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class DefaultAuditLogServiceTest {
private static final int TIMEOUT_MS = 5000;
private final DefaultAuditLogService auditLogService = new DefaultAuditLogService();
@Test
public void testEventPropagation() throws Exception {
ExtTestSubscriber<AuditLogEvent> testSubscriber = new ExtTestSubscriber<>();
auditLogService.auditLogEvents().subscribe(testSubscriber);
auditLogService.submit(createEvent());
assertThat(testSubscriber.takeNext(TIMEOUT_MS, TimeUnit.MILLISECONDS)).isNotNull();
}
private AuditLogEvent createEvent() {
return new AuditLogEvent(AuditLogEvent.Type.JOB_SUBMIT, "operand", "data", System.currentTimeMillis());
}
} | 9,981 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/endpoint/v2 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/endpoint/v2/rest/LeaderResourceTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.endpoint.v2.rest;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import com.netflix.titus.api.endpoint.v2.rest.representation.LeaderRepresentation;
import com.netflix.titus.api.supervisor.service.MasterDescription;
import com.netflix.titus.api.supervisor.service.MasterMonitor;
import com.netflix.titus.runtime.endpoint.common.rest.JsonMessageReaderWriter;
import com.netflix.titus.runtime.endpoint.common.rest.TitusExceptionMapper;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.jaxrs.JaxRsServerResource;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.springframework.test.web.reactive.server.WebTestClient;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@Category(IntegrationTest.class)
public class LeaderResourceTest {
private static final MasterDescription LATEST_MASTER = new MasterDescription(
"masterHost", "127.0.0.1", 7001, "/api/status", System.currentTimeMillis()
);
private static MasterMonitor masterMonitor = mock(MasterMonitor.class);
private static final LeaderResource restService = new LeaderResource(masterMonitor);
@ClassRule
public static final JaxRsServerResource<LeaderResource> jaxRsServer = JaxRsServerResource.newBuilder(restService)
.withProviders(new JsonMessageReaderWriter(), new TitusExceptionMapper())
.build();
private static WebTestClient testClient;
@BeforeClass
public static void setUpClass() {
testClient = WebTestClient.bindToServer()
.baseUrl(jaxRsServer.getBaseURI())
.defaultHeader(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON)
.build();
}
@Test(timeout = 30_000)
public void testLeaderReply() {
when(masterMonitor.getLatestLeader()).thenReturn(LATEST_MASTER);
testClient.get().uri("/api/v2/leader").exchange()
.expectBody(LeaderRepresentation.class)
.value(result -> {
assertThat(result.getHostname()).isEqualTo("masterHost");
assertThat(result.getHostIP()).isEqualTo("127.0.0.1");
assertThat(result.getApiPort()).isEqualTo(7001);
assertThat(result.getApiStatusUri()).isEqualTo("/api/status");
});
}
} | 9,982 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/endpoint/v2 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/endpoint/v2/rest/ReservationUsageCalculatorTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.endpoint.v2.rest;
import java.util.Arrays;
import java.util.Map;
import com.netflix.titus.api.endpoint.v2.rest.representation.ReservationUsage;
import com.netflix.titus.api.jobmanager.model.job.ContainerResources;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.model.ApplicationSLA;
import com.netflix.titus.common.data.generator.DataGenerator;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.master.service.management.ApplicationSlaManagementService;
import com.netflix.titus.testkit.model.job.JobComponentStub;
import com.netflix.titus.testkit.model.job.JobGenerator;
import org.junit.Before;
import org.junit.Test;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.oneTaskBatchJobDescriptor;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class ReservationUsageCalculatorTest {
private static final JobDescriptor<BatchJobExt> JOB_DESCRIPTOR = oneTaskBatchJobDescriptor().but(JobFunctions.ofBatchSize(2));
private static final ContainerResources CONTAINER_RESOURCES = JOB_DESCRIPTOR.getContainer().getContainerResources();
private final TitusRuntime titusRuntime = TitusRuntimes.internal();
private final JobComponentStub jobComponentStub = new JobComponentStub(titusRuntime);
private ApplicationSlaManagementService capacityManagementService = mock(ApplicationSlaManagementService.class);
private final ReservationUsageCalculator calculator = new ReservationUsageCalculator(
jobComponentStub.getJobOperations(),
capacityManagementService
);
@Before
public void setUp() throws Exception {
when(capacityManagementService.getApplicationSLAs()).thenReturn(Arrays.asList(
ApplicationSLA.newBuilder().withAppName("cg1").build(),
ApplicationSLA.newBuilder().withAppName("cg2").build()
));
when(capacityManagementService.getApplicationSLA("cg1")).thenReturn(
ApplicationSLA.newBuilder().withAppName("cg1").build()
);
addJobsInCapacityGroup("cg1", 2);
addJobsInCapacityGroup("cg2", 4);
}
@Test
public void testBuildUsage() {
Map<String, ReservationUsage> usage = calculator.buildUsage();
ReservationUsage cg1Usage = usage.get("cg1");
ReservationUsage cg2Usage = usage.get("cg2");
assertThat(cg1Usage).isNotNull();
assertThat(cg2Usage).isNotNull();
assertThat(cg1Usage.getCpu()).isEqualTo(CONTAINER_RESOURCES.getCpu() * 4);
assertThat(cg1Usage.getCpu() * 2).isEqualTo(cg2Usage.getCpu());
assertThat(cg1Usage.getMemoryMB()).isEqualTo(CONTAINER_RESOURCES.getMemoryMB() * 4);
assertThat(cg1Usage.getMemoryMB() * 2).isEqualTo(cg2Usage.getMemoryMB());
assertThat(cg1Usage.getDiskMB()).isEqualTo(CONTAINER_RESOURCES.getDiskMB() * 4);
assertThat(cg1Usage.getDiskMB() * 2).isEqualTo(cg2Usage.getDiskMB());
assertThat(cg1Usage.getNetworkMbs()).isEqualTo(CONTAINER_RESOURCES.getNetworkMbps() * 4);
assertThat(cg1Usage.getNetworkMbs() * 2).isEqualTo(cg2Usage.getNetworkMbs());
}
@Test
public void testBuildCapacityGroupUsage() {
ReservationUsage cg1Usage = calculator.buildCapacityGroupUsage("cg1");
assertThat(cg1Usage).isNotNull();
assertThat(cg1Usage.getCpu()).isEqualTo(CONTAINER_RESOURCES.getCpu() * 4);
assertThat(cg1Usage.getMemoryMB()).isEqualTo(CONTAINER_RESOURCES.getMemoryMB() * 4);
assertThat(cg1Usage.getDiskMB()).isEqualTo(CONTAINER_RESOURCES.getDiskMB() * 4);
assertThat(cg1Usage.getNetworkMbs()).isEqualTo(CONTAINER_RESOURCES.getNetworkMbps() * 4);
}
private void addJobsInCapacityGroup(String capacityGroup, int jobCount) {
DataGenerator<Job<BatchJobExt>> jobGenerator = JobGenerator.batchJobs(JOB_DESCRIPTOR
.but(jd -> jd.toBuilder().withCapacityGroup(capacityGroup))
);
jobGenerator.getValues(jobCount).forEach(job -> {
jobComponentStub.createJob(job);
jobComponentStub.createDesiredTasks(job);
jobComponentStub.getJobOperations().getTasks(job.getId()).forEach(task -> jobComponentStub.moveTaskToState(task, TaskState.Started));
});
}
} | 9,983 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/endpoint/v2 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/endpoint/v2/rest/ServerStatusResourceTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.endpoint.v2.rest;
import java.util.Collections;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import com.netflix.titus.api.endpoint.v2.rest.representation.ServerStatusRepresentation;
import com.netflix.titus.common.util.guice.ActivationLifecycle;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.master.config.CellInfoResolver;
import com.netflix.titus.master.health.service.DefaultHealthService;
import com.netflix.titus.api.supervisor.service.LeaderActivator;
import com.netflix.titus.runtime.endpoint.common.rest.JsonMessageReaderWriter;
import com.netflix.titus.runtime.endpoint.common.rest.TitusExceptionMapper;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.jaxrs.JaxRsServerResource;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.springframework.test.web.reactive.server.WebTestClient;
import static com.netflix.titus.master.endpoint.v2.rest.ServerStatusResource.NOT_APPLICABLE;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.when;
@Category(IntegrationTest.class)
public class ServerStatusResourceTest {
private static final CellInfoResolver cellInfoResolver = mock(CellInfoResolver.class);
private static final ActivationLifecycle activationLifecycle = mock(ActivationLifecycle.class);
private static final LeaderActivator leaderActivator = mock(LeaderActivator.class);
private static final ServerStatusResource restService = new ServerStatusResource(
new DefaultHealthService(cellInfoResolver, activationLifecycle, leaderActivator)
);
@ClassRule
public static final JaxRsServerResource<ServerStatusResource> jaxRsServer = JaxRsServerResource.newBuilder(restService)
.withProviders(new JsonMessageReaderWriter(), new TitusExceptionMapper())
.build();
private static WebTestClient testClient;
@BeforeClass
public static void setUpClass() {
testClient = WebTestClient.bindToServer()
.baseUrl(jaxRsServer.getBaseURI())
.defaultHeader(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON)
.build();
}
@Before
public void setUp() throws Exception {
reset(cellInfoResolver, activationLifecycle, leaderActivator);
when(cellInfoResolver.getCellName()).thenReturn("cellN");
}
@Test
public void testActivatedServer() {
when(leaderActivator.isLeader()).thenReturn(true);
when(leaderActivator.isActivated()).thenReturn(true);
when(activationLifecycle.getServiceActionTimesMs()).thenReturn(Collections.singletonList(Pair.of("myService", 90L)));
testClient.get().uri(ServerStatusResource.PATH_API_V2_STATUS).exchange()
.expectBody(ServerStatusRepresentation.class)
.value(result -> {
assertThat(result.isLeader()).isTrue();
assertThat(result.isActive()).isTrue();
assertThat(result.getServiceActivationTimes()).hasSize(1);
assertThat(result.getServiceActivationTimes().get(0).getDuration()).isEqualTo("90ms");
assertThat(result.getServiceActivationOrder()).hasSize(1);
});
}
@Test
public void tesNotLeaderServer() {
testClient.get().uri(ServerStatusResource.PATH_API_V2_STATUS).exchange()
.expectBody(ServerStatusRepresentation.class)
.value(result -> {
assertThat(result.isLeader()).isFalse();
assertThat(result.isActive()).isFalse();
assertThat(result.getActivationTime()).isEqualTo(NOT_APPLICABLE);
assertThat(result.getServiceActivationTimes()).hasSize(0);
assertThat(result.getServiceActivationTimes()).hasSize(0);
assertThat(result.getServiceActivationOrder()).hasSize(0);
});
}
@Test
public void testUnactivatedServer() {
when(leaderActivator.isLeader()).thenReturn(true);
when(activationLifecycle.getActivationTimeMs()).thenReturn(-1L);
testClient.get().uri(ServerStatusResource.PATH_API_V2_STATUS).exchange()
.expectBody(ServerStatusRepresentation.class)
.value(result -> {
assertThat(result.isLeader()).isTrue();
assertThat(result.isActive()).isFalse();
assertThat(result.getActivationTime()).isEqualTo(NOT_APPLICABLE);
assertThat(result.getServiceActivationTimes()).hasSize(0);
assertThat(result.getServiceActivationTimes()).hasSize(0);
assertThat(result.getServiceActivationOrder()).hasSize(0);
});
}
} | 9,984 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/endpoint/v2 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/endpoint/v2/rest/ApplicationSlaManagementResourceTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.endpoint.v2.rest;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import com.netflix.titus.api.endpoint.v2.rest.representation.ApplicationSlaRepresentation;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.api.model.ApplicationSLA;
import com.netflix.titus.common.util.archaius2.Archaius2Ext;
import com.netflix.titus.master.config.MasterConfiguration;
import com.netflix.titus.master.service.management.ApplicationSlaManagementService;
import com.netflix.titus.runtime.endpoint.common.rest.JsonMessageReaderWriter;
import com.netflix.titus.runtime.endpoint.common.rest.TitusExceptionMapper;
import com.netflix.titus.runtime.endpoint.rest.ErrorResponse;
import com.netflix.titus.testkit.data.core.ApplicationSlaSample;
import com.netflix.titus.testkit.junit.category.IntegrationTest;
import com.netflix.titus.testkit.junit.jaxrs.JaxRsServerResource;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
import org.springframework.core.ParameterizedTypeReference;
import org.springframework.http.HttpStatus;
import org.springframework.test.web.reactive.server.WebTestClient;
import org.springframework.web.reactive.function.BodyInserters;
import rx.Observable;
import static com.netflix.titus.api.model.SchedulerConstants.SCHEDULER_NAME_KUBE_SCHEDULER;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
/**
* Runs tests in the embedded jetty jaxRsServer, as we want to verify that providers and annotations are applied
* as expected.
*/
@Category(IntegrationTest.class)
public class ApplicationSlaManagementResourceTest {
private static final ParameterizedTypeReference<List<ApplicationSlaRepresentation>> APPLICATION_SLA_REP_LIST_TR =
new ParameterizedTypeReference<List<ApplicationSlaRepresentation>>() {
};
private static final ParameterizedTypeReference<List<ApplicationSlaRepresentation>> APPLICATION_SLA_KUBE_SCHEDULER_REP_LIST_TR =
new ParameterizedTypeReference<List<ApplicationSlaRepresentation>>() {
};
private static final ApplicationSLA SAMPLE_SLA = ApplicationSlaSample.CriticalSmall.build();
private static final ApplicationSLA SAMPLE_SLA_MANAGED_BY_KUBESCHEDULER = ApplicationSlaSample.CriticalSmallKubeScheduler.build();
private static final ApplicationSlaRepresentation SAMPLE_SLA_REPRESENTATION = Representation2ModelConvertions.asRepresentation(SAMPLE_SLA);
private static final ApplicationSlaManagementService capacityManagementService = mock(ApplicationSlaManagementService.class);
private static ReadOnlyJobOperations jobOperations = mock(ReadOnlyJobOperations.class);
private static final ApplicationSlaManagementResource restService = new ApplicationSlaManagementResource(
Archaius2Ext.newConfiguration(MasterConfiguration.class),
capacityManagementService,
new ReservationUsageCalculator(jobOperations, capacityManagementService)
);
@ClassRule
public static final JaxRsServerResource<ApplicationSlaManagementResource> jaxRsServer = JaxRsServerResource.newBuilder(restService)
.withProviders(new JsonMessageReaderWriter(), new TitusExceptionMapper())
.build();
private static String baseURI;
private static WebTestClient testClient;
@BeforeClass
public static void setUpClass() {
when(jobOperations.getJobsAndTasks()).thenReturn(Collections.emptyList());
baseURI = jaxRsServer.getBaseURI() + ApplicationSlaManagementEndpoint.PATH_API_V2_MANAGEMENT_APPLICATIONS + '/';
testClient = WebTestClient.bindToServer()
.baseUrl(baseURI)
.defaultHeader(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON)
.build();
}
@Before
public void setUp() throws Exception {
Mockito.reset(capacityManagementService);
}
@Test
public void addApplication() {
when(capacityManagementService.getApplicationSLA(any())).thenReturn(null);
when(capacityManagementService.addApplicationSLA(any())).thenReturn(Observable.empty());
testClient.post().body(BodyInserters.fromObject(SAMPLE_SLA_REPRESENTATION)).exchange()
.expectStatus().is2xxSuccessful()
.expectHeader().valueEquals(HttpHeaders.LOCATION, baseURI + SAMPLE_SLA.getAppName())
.expectBody().isEmpty();
verify(capacityManagementService, times(1)).getApplicationSLA(any());
verify(capacityManagementService, times(1)).addApplicationSLA(any());
}
@Test
public void addExistingApplicationFails() {
when(capacityManagementService.getApplicationSLA(any())).thenReturn(SAMPLE_SLA);
testClient.post().body(BodyInserters.fromObject(SAMPLE_SLA_REPRESENTATION)).exchange()
.expectStatus().isEqualTo(HttpStatus.CONFLICT);
verify(capacityManagementService, times(1)).getApplicationSLA(any());
verify(capacityManagementService, times(0)).addApplicationSLA(any());
}
@Test
public void getAllApplications() {
when(capacityManagementService.getApplicationSLAs()).thenReturn(Arrays.asList(
ApplicationSlaSample.CriticalSmall.build(), ApplicationSlaSample.CriticalLarge.build()
));
testClient.get().exchange()
.expectStatus().isOk()
.expectBody(APPLICATION_SLA_REP_LIST_TR).value(result -> assertThat(result).hasSize(2));
verify(capacityManagementService, times(1)).getApplicationSLAs();
}
@Test
public void getApplicationSlaBySchedulerName() {
when(capacityManagementService.getApplicationSLAsForScheduler(SCHEDULER_NAME_KUBE_SCHEDULER)).thenReturn(Arrays.asList(SAMPLE_SLA_MANAGED_BY_KUBESCHEDULER));
testClient.get()
.uri(uriBuilder -> uriBuilder.queryParam("schedulerName", SCHEDULER_NAME_KUBE_SCHEDULER).build())
.exchange()
.expectStatus().isOk()
.expectBody(APPLICATION_SLA_KUBE_SCHEDULER_REP_LIST_TR).value(result -> assertThat(result).hasSize(1));
verify(capacityManagementService, times(1)).getApplicationSLAsForScheduler(SCHEDULER_NAME_KUBE_SCHEDULER);
}
@Test
public void getApplicationByName() {
String targetName = SAMPLE_SLA.getAppName();
when(capacityManagementService.getApplicationSLA(targetName)).thenReturn(SAMPLE_SLA);
testClient.get().uri(targetName).exchange()
.expectStatus().isOk()
.expectBody(ApplicationSlaRepresentation.class).value(result -> assertThat(result.getAppName()).isEqualTo(targetName));
verify(capacityManagementService, times(1)).getApplicationSLA(targetName);
}
@Test
public void getNonExistingApplicationByNameFails() {
String myMissingApp = "myMissingApp";
when(capacityManagementService.getApplicationSLA(any())).thenReturn(null);
testClient.get().uri(myMissingApp).exchange()
.expectStatus().isNotFound();
verify(capacityManagementService, times(1)).getApplicationSLA(myMissingApp);
}
@Test
public void updateApplication() {
String targetName = SAMPLE_SLA.getAppName();
when(capacityManagementService.getApplicationSLA(targetName)).thenReturn(SAMPLE_SLA);
when(capacityManagementService.addApplicationSLA(any())).thenReturn(Observable.empty());
testClient.put().uri(targetName).body(BodyInserters.fromObject(SAMPLE_SLA_REPRESENTATION)).exchange()
.expectStatus().isNoContent()
.expectBody().isEmpty();
verify(capacityManagementService, times(1)).getApplicationSLA(targetName);
}
@Test
public void updateNonExistingApplicationFails() {
String targetAppName = SAMPLE_SLA.getAppName();
when(capacityManagementService.getApplicationSLA(targetAppName)).thenReturn(null);
testClient.put().uri(targetAppName).body(BodyInserters.fromObject(SAMPLE_SLA_REPRESENTATION)).exchange()
.expectStatus().isNotFound()
.expectBody(ErrorResponse.class).value(error -> assertThat(error.getMessage()).contains("SLA not defined for"));
verify(capacityManagementService, times(1)).getApplicationSLA(targetAppName);
verify(capacityManagementService, never()).addApplicationSLA(any());
}
@Test
public void removeApplication() {
String targetAppName = SAMPLE_SLA.getAppName();
when(capacityManagementService.getApplicationSLA(targetAppName)).thenReturn(SAMPLE_SLA);
when(capacityManagementService.removeApplicationSLA(targetAppName)).thenReturn(Observable.empty());
testClient.delete().uri(targetAppName).exchange()
.expectStatus().isNoContent();
verify(capacityManagementService, times(1)).removeApplicationSLA(targetAppName);
}
@Test
public void removeNonExistingApplicationFails() {
when(capacityManagementService.getApplicationSLA(anyString())).thenReturn(null);
testClient.delete().uri("/myMissingApp").exchange()
.expectStatus().isNotFound();
verify(capacityManagementService, times(1)).getApplicationSLA(any());
verify(capacityManagementService, never()).removeApplicationSLA(any());
}
} | 9,985 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/model/ResourceDimensionsTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.model;
import com.netflix.titus.api.model.ResourceDimension;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.testkit.data.core.ResourceDimensionSample;
import org.junit.Test;
import static org.assertj.core.api.AssertionsForClassTypes.assertThat;
public class ResourceDimensionsTest {
@Test
public void testResourceDimensionAddition() throws Exception {
ResourceDimension small = ResourceDimensionSample.SmallWithGpuAndOpportunistic.build();
ResourceDimension expected = ResourceDimensionSample.SmallWithGpuAndOpportunisticX2.build();
assertThat(ResourceDimensions.add(small, small)).isEqualTo(expected);
}
@Test
public void testResourceDimensionSubtraction() throws Exception {
ResourceDimension large = ResourceDimensionSample.SmallWithGpuAndOpportunisticX2.build();
ResourceDimension small = ResourceDimensionSample.SmallWithGpuAndOpportunistic.build();
assertThat(ResourceDimensions.subtractPositive(large, small)).isEqualTo(small);
}
@Test
public void testResourceDimensionMultiplication() throws Exception {
ResourceDimension small = ResourceDimensionSample.SmallWithGpuAndOpportunistic.build();
ResourceDimension expected = ResourceDimensionSample.SmallWithGpuAndOpportunisticX2.build();
assertThat(ResourceDimensions.multiply(small, 2)).isEqualTo(expected);
}
@Test
public void testResourceDimensionDivide() throws Exception {
ResourceDimension large = ResourceDimensionSample.SmallWithGpuAndOpportunisticX2.build();
ResourceDimension small = ResourceDimensionSample.SmallWithGpuAndOpportunistic.build();
Pair<Long, ResourceDimension> result = ResourceDimensions.divide(large, small);
assertThat(result.getLeft()).isEqualTo(2);
assertThat(result.getRight()).isEqualTo(ResourceDimension.empty());
}
@Test
public void testResourceDimensionDivideAndRoundUp() throws Exception {
ResourceDimension large = ResourceDimensionSample.SmallWithGpuAndOpportunisticX2.build();
ResourceDimension largePlus = ResourceDimensionSample.SmallWithGpuAndOpportunisticX2.builder().withCpus(large.getCpu() + 1).build();
ResourceDimension small = ResourceDimensionSample.SmallWithGpuAndOpportunistic.build();
assertThat(ResourceDimensions.divideAndRoundUp(large, small)).isEqualTo(2);
assertThat(ResourceDimensions.divideAndRoundUp(largePlus, small)).isEqualTo(3);
}
@Test
public void testAligningUpToHigherCPU() throws Exception {
ResourceDimension small2X = ResourceDimensionSample.SmallWithGpuAndOpportunisticX2.build();
ResourceDimension original = ResourceDimensionSample.SmallWithGpuAndOpportunistic.builder().withCpus(small2X.getCpu()).build();
assertThat(ResourceDimensions.alignUp(original, small2X)).isEqualTo(small2X);
}
@Test
public void testAligningUpToHigherMemory() throws Exception {
ResourceDimension small2X = ResourceDimensionSample.SmallWithGpuAndOpportunisticX2.build();
ResourceDimension original = ResourceDimensionSample.SmallWithGpuAndOpportunistic.builder().withMemoryMB(small2X.getMemoryMB()).build();
assertThat(ResourceDimensions.alignUp(original, small2X)).isEqualTo(small2X);
}
} | 9,986 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/service/management | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/service/management/internal/ResourceConsumptionEvaluatorTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.service.management.internal;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import com.netflix.titus.api.jobmanager.model.job.ContainerResources;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.service.V3JobOperations;
import com.netflix.titus.api.model.ResourceDimension;
import com.netflix.titus.api.model.Tier;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.master.model.ResourceDimensions;
import com.netflix.titus.master.service.management.ApplicationSlaManagementService;
import com.netflix.titus.master.service.management.CompositeResourceConsumption;
import com.netflix.titus.master.service.management.ResourceConsumption;
import com.netflix.titus.testkit.model.job.JobComponentStub;
import com.netflix.titus.testkit.model.job.JobDescriptorGenerator;
import org.junit.Before;
import org.junit.Test;
import static com.netflix.titus.master.service.management.ResourceConsumptions.findConsumption;
import static com.netflix.titus.master.service.management.internal.ResourceConsumptionEvaluator.perTaskResourceDimension;
import static java.util.Arrays.asList;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class ResourceConsumptionEvaluatorTest {
private static final ContainerResources CONTAINER_RESOURCES = ContainerResources.newBuilder()
.withCpu(1)
.withMemoryMB(1024)
.withDiskMB(512)
.withNetworkMbps(128)
.build();
private final TitusRuntime titusRuntime = TitusRuntimes.test();
private final ApplicationSlaManagementService applicationSlaManagementService = mock(ApplicationSlaManagementService.class);
private final V3JobOperations v3JobOperations = mock(V3JobOperations.class);
private final JobComponentStub jobComponentStub = new JobComponentStub(titusRuntime);
private final V3JobOperations jobOperations = jobComponentStub.getJobOperations();
@Before
public void setUp() throws Exception {
when(v3JobOperations.getJobsAndTasks()).then(invocation -> jobOperations.getJobsAndTasks());
}
@Test
public void testEvaluation() {
when(applicationSlaManagementService.getApplicationSLAs()).thenReturn(asList(ConsumptionModelGenerator.DEFAULT_SLA, ConsumptionModelGenerator.CRITICAL_SLA_1, ConsumptionModelGenerator.NOT_USED_SLA));
// Job with defined capacity group SLA
Pair<Job, List<Task>> goodCapacity = newServiceJob(
"goodCapacityJob",
jd -> jd.toBuilder().withCapacityGroup(ConsumptionModelGenerator.CRITICAL_SLA_1.getAppName()).build()
);
Job goodCapacityJob = goodCapacity.getLeft();
// Job without appName defined
Pair<Job, List<Task>> noAppName = newServiceJob(
"badCapacityJob",
jd -> jd.toBuilder()
.withApplicationName("")
.withCapacityGroup(ConsumptionModelGenerator.DEFAULT_SLA.getAppName())
.build()
);
Job noAppNameJob = noAppName.getLeft();
// Job with capacity group for which SLA is not defined
Pair<Job, List<Task>> badCapacity = newServiceJob(
"goodCapacityJob",
jd -> jd.toBuilder().withCapacityGroup("missingCapacityGroup").build()
);
Job badCapacityJob = badCapacity.getLeft();
// Evaluate
ResourceConsumptionEvaluator evaluator = new ResourceConsumptionEvaluator(applicationSlaManagementService, v3JobOperations);
Set<String> undefined = evaluator.getUndefinedCapacityGroups();
assertThat(undefined).contains("missingCapacityGroup");
CompositeResourceConsumption systemConsumption = evaluator.getSystemConsumption();
Map<String, ResourceConsumption> tierConsumptions = systemConsumption.getContributors();
assertThat(tierConsumptions).containsKeys(Tier.Critical.name(), Tier.Flex.name());
// Critical capacity group
CompositeResourceConsumption criticalConsumption = (CompositeResourceConsumption) findConsumption(
systemConsumption, Tier.Critical.name(), ConsumptionModelGenerator.CRITICAL_SLA_1.getAppName()
).get();
assertThat(criticalConsumption.getCurrentConsumption()).isEqualTo(perTaskResourceDimension(goodCapacityJob)); // We have single worker in Started state
assertThat(criticalConsumption.getAllowedConsumption()).isEqualTo(ConsumptionModelGenerator.capacityGroupLimit(ConsumptionModelGenerator.CRITICAL_SLA_1));
assertThat(criticalConsumption.isAboveLimit()).isTrue();
// Default capacity group
CompositeResourceConsumption defaultConsumption = (CompositeResourceConsumption) findConsumption(
systemConsumption, Tier.Flex.name(), ConsumptionModelGenerator.DEFAULT_SLA.getAppName()
).get();
assertThat(defaultConsumption.getCurrentConsumption()).isEqualTo(ResourceDimensions.add(
perTaskResourceDimension(noAppNameJob),
perTaskResourceDimension(badCapacityJob)
));
assertThat(defaultConsumption.getAllowedConsumption()).isEqualTo(ConsumptionModelGenerator.capacityGroupLimit(ConsumptionModelGenerator.DEFAULT_SLA));
assertThat(defaultConsumption.isAboveLimit()).isFalse();
// Not used capacity group
CompositeResourceConsumption notUsedConsumption = (CompositeResourceConsumption) findConsumption(
systemConsumption, Tier.Critical.name(), ConsumptionModelGenerator.NOT_USED_SLA.getAppName()
).get();
assertThat(notUsedConsumption.getCurrentConsumption()).isEqualTo(ResourceDimension.empty());
assertThat(notUsedConsumption.getAllowedConsumption()).isEqualTo(ConsumptionModelGenerator.capacityGroupLimit(ConsumptionModelGenerator.NOT_USED_SLA));
assertThat(notUsedConsumption.isAboveLimit()).isFalse();
}
@SuppressWarnings("unchecked")
@Test
public void batchJobWithMultipleTasks() {
when(applicationSlaManagementService.getApplicationSLAs()).thenReturn(asList(ConsumptionModelGenerator.DEFAULT_SLA, ConsumptionModelGenerator.CRITICAL_SLA_1, ConsumptionModelGenerator.NOT_USED_SLA));
// Job with defined capacity group SLA
Job<BatchJobExt> goodCapacityJob = newBatchJob(
"goodCapacityJob",
jd -> jd.toBuilder()
.withExtensions(jd.getExtensions().toBuilder().withSize(2).build())
.withCapacityGroup(ConsumptionModelGenerator.CRITICAL_SLA_1.getAppName())
.build()
).getLeft();
List<Task> goodCapacityTasks = jobComponentStub.getJobOperations().getTasks(goodCapacityJob.getId());
// Job without appName defined
Job<BatchJobExt> noAppNameJob = newBatchJob(
"badCapacityJob",
jd -> jd.toBuilder()
.withApplicationName("")
.withExtensions(jd.getExtensions().toBuilder().withSize(2).build())
.withCapacityGroup(ConsumptionModelGenerator.DEFAULT_SLA.getAppName())
.build()
).getLeft();
List<Task> noAppNameTasks = jobComponentStub.getJobOperations().getTasks(noAppNameJob.getId());
// Job with capacity group for which SLA is not defined
Job<BatchJobExt> badCapacityJob = newBatchJob(
"badCapacityJob",
jd -> jd.toBuilder()
.withExtensions(jd.getExtensions().toBuilder().withSize(2).build())
.withCapacityGroup("missingCapacityGroup")
.build()
).getLeft();
List<Task> badCapacityTasks = jobComponentStub.getJobOperations().getTasks(badCapacityJob.getId());
// Evaluate
ResourceConsumptionEvaluator evaluator = new ResourceConsumptionEvaluator(applicationSlaManagementService, v3JobOperations);
Set<String> undefined = evaluator.getUndefinedCapacityGroups();
assertThat(undefined).contains("missingCapacityGroup");
CompositeResourceConsumption systemConsumption = evaluator.getSystemConsumption();
Map<String, ResourceConsumption> tierConsumptions = systemConsumption.getContributors();
assertThat(tierConsumptions).containsKeys(Tier.Critical.name(), Tier.Flex.name());
// Critical capacity group
CompositeResourceConsumption criticalConsumption = (CompositeResourceConsumption) findConsumption(
systemConsumption, Tier.Critical.name(), ConsumptionModelGenerator.CRITICAL_SLA_1.getAppName()
).get();
assertThat(criticalConsumption.getCurrentConsumption()).isEqualTo(expectedCurrentConsumptionForBatchJob(goodCapacityJob, goodCapacityTasks));
assertThat(criticalConsumption.getMaxConsumption()).isEqualTo(expectedMaxConsumptionForBatchJob(goodCapacityJob));
assertThat(criticalConsumption.getAllowedConsumption()).isEqualTo(ConsumptionModelGenerator.capacityGroupLimit(ConsumptionModelGenerator.CRITICAL_SLA_1));
assertThat(criticalConsumption.isAboveLimit()).isTrue();
// Default capacity group
CompositeResourceConsumption defaultConsumption = (CompositeResourceConsumption) findConsumption(
systemConsumption, Tier.Flex.name(), ConsumptionModelGenerator.DEFAULT_SLA.getAppName()
).get();
assertThat(defaultConsumption.getCurrentConsumption()).isEqualTo(ResourceDimensions.add(
expectedCurrentConsumptionForBatchJob(noAppNameJob, noAppNameTasks),
expectedCurrentConsumptionForBatchJob(badCapacityJob, badCapacityTasks)
));
assertThat(defaultConsumption.getMaxConsumption()).isEqualTo(ResourceDimensions.add(
expectedMaxConsumptionForBatchJob(noAppNameJob),
expectedMaxConsumptionForBatchJob(badCapacityJob)
));
assertThat(defaultConsumption.getAllowedConsumption()).isEqualTo(ConsumptionModelGenerator.capacityGroupLimit(ConsumptionModelGenerator.DEFAULT_SLA));
assertThat(defaultConsumption.isAboveLimit()).isFalse();
// Not used capacity group
CompositeResourceConsumption notUsedConsumption = (CompositeResourceConsumption) findConsumption(
systemConsumption, Tier.Critical.name(), ConsumptionModelGenerator.NOT_USED_SLA.getAppName()
).get();
assertThat(notUsedConsumption.getCurrentConsumption()).isEqualTo(ResourceDimension.empty());
assertThat(notUsedConsumption.getAllowedConsumption()).isEqualTo(ConsumptionModelGenerator.capacityGroupLimit(ConsumptionModelGenerator.NOT_USED_SLA));
assertThat(notUsedConsumption.isAboveLimit()).isFalse();
}
private Pair<Job, List<Task>> newServiceJob(String name, Function<JobDescriptor, JobDescriptor> transformer) {
jobComponentStub.addJobTemplate(name, JobDescriptorGenerator.serviceJobDescriptors()
.map(jd -> jd.but(self -> self.getContainer().but(c -> CONTAINER_RESOURCES)))
.map(transformer::apply)
);
return jobComponentStub.createJobAndTasks(
name,
(job, tasks) -> jobComponentStub.moveTaskToState(tasks.get(0), TaskState.Started)
);
}
private Pair<Job, List<Task>> newBatchJob(String name, Function<JobDescriptor<BatchJobExt>, JobDescriptor<BatchJobExt>> transformer) {
jobComponentStub.addJobTemplate(name, JobDescriptorGenerator.batchJobDescriptors()
.map(jd -> jd.but(self -> self.getContainer().but(c -> CONTAINER_RESOURCES)))
.map(transformer::apply)
);
return jobComponentStub.createJobAndTasks(
name,
(job, tasks) -> tasks.forEach(task -> jobComponentStub.moveTaskToState(task, TaskState.Started))
);
}
private static ResourceDimension expectedCurrentConsumptionForBatchJob(Job<BatchJobExt> job, List<Task> tasks) {
return ResourceDimensions.multiply(perTaskResourceDimension(job), job.getJobDescriptor().getExtensions().getSize());
}
private static ResourceDimension expectedMaxConsumptionForBatchJob(Job<BatchJobExt> job) {
return ResourceDimensions.multiply(perTaskResourceDimension(job), job.getJobDescriptor().getExtensions().getSize());
}
} | 9,987 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/service/management | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/service/management/internal/ConsumptionModelGenerator.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.service.management.internal;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import com.google.common.base.Preconditions;
import com.netflix.titus.api.model.ApplicationSLA;
import com.netflix.titus.api.model.ResourceDimension;
import com.netflix.titus.api.model.Tier;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.master.model.ResourceDimensions;
import com.netflix.titus.master.service.management.CompositeResourceConsumption;
import com.netflix.titus.master.service.management.ResourceConsumption;
import com.netflix.titus.master.service.management.ResourceConsumption.ConsumptionLevel;
import com.netflix.titus.master.service.management.ResourceConsumptionEvents.ResourceConsumptionEvent;
import com.netflix.titus.master.service.management.ResourceConsumptions;
import com.netflix.titus.testkit.data.core.ApplicationSlaSample;
/**
* A helper class to generate test data for capacity groups and resource consumption.
*/
class ConsumptionModelGenerator {
static final ApplicationSLA CRITICAL_SLA_1 = ApplicationSlaSample.CriticalSmall.build();
static final ApplicationSLA NOT_USED_SLA = ApplicationSlaSample.CriticalLarge.build();
static final ApplicationSLA DEFAULT_SLA = ApplicationSlaSample.DefaultFlex.build();
private final Map<String, ApplicationSLA> capacityGroupMap = new HashMap<>();
private final Map<String, Map<String, ResourceDimension>> actualConsumptionByGroupAndApp = new HashMap<>();
private final Map<String, Map<String, ResourceDimension>> maxConsumptionByApp = new HashMap<>();
public ConsumptionModelGenerator() {
capacityGroupMap.put(CRITICAL_SLA_1.getAppName(), CRITICAL_SLA_1);
capacityGroupMap.put(NOT_USED_SLA.getAppName(), NOT_USED_SLA);
capacityGroupMap.put(DEFAULT_SLA.getAppName(), DEFAULT_SLA);
}
void addConsumption(String capacityGroup, String appName, ResourceDimension actualConsumption, ResourceDimension maxConsumption) {
addConsumption(capacityGroup, appName, actualConsumption, actualConsumptionByGroupAndApp);
addConsumption(capacityGroup, appName, maxConsumption, maxConsumptionByApp);
}
private static void addConsumption(String capacityGroup, String appName, ResourceDimension consumption, Map<String, Map<String, ResourceDimension>> output) {
Map<String, ResourceDimension> appsMap = output.computeIfAbsent(capacityGroup, k -> new HashMap<>());
if (appsMap.get(appName) == null) {
appsMap.put(appName, consumption);
} else {
appsMap.put(
appName,
ResourceDimensions.add(appsMap.get(appName), consumption)
);
}
}
Map<String, ApplicationSLA> getCapacityGroupMap() {
return new HashMap<>(capacityGroupMap);
}
Set<String> getDefinedCapacityGroupNames() {
return capacityGroupMap.keySet();
}
void removeCapacityGroup(String capacityGroup) {
Preconditions.checkState(capacityGroupMap.containsKey(capacityGroup));
capacityGroupMap.remove(capacityGroup);
}
DefaultResourceConsumptionService.ConsumptionEvaluationResult getEvaluation() {
Map<String, CompositeResourceConsumption> groupConsumptionMap = new HashMap<>();
Set<String> definedCapacityGroups = new HashSet<>(capacityGroupMap.keySet());
// Used capacity groups
Set<String> capacityGroupNames = actualConsumptionByGroupAndApp.keySet();
for (String capacityGroupName : capacityGroupNames) {
List<ResourceConsumption> appConsumptions = buildApplicationConsumptions(capacityGroupName);
CompositeResourceConsumption groupConsumption = ResourceConsumptions.aggregate(
capacityGroupName,
ConsumptionLevel.CapacityGroup,
appConsumptions,
capacityGroupLimit(capacityGroupMap.getOrDefault(capacityGroupName, DEFAULT_SLA))
);
groupConsumptionMap.put(capacityGroupName, groupConsumption);
}
// Unused capacity groups
CollectionsExt.copyAndRemove(definedCapacityGroups, capacityGroupNames).forEach(capacityGroup -> {
ApplicationSLA sla = capacityGroupMap.getOrDefault(capacityGroup, DEFAULT_SLA);
ResourceDimension limit = capacityGroupLimit(sla);
groupConsumptionMap.put(capacityGroup, new CompositeResourceConsumption(
capacityGroup,
ConsumptionLevel.CapacityGroup,
ResourceDimension.empty(),
ResourceDimension.empty(),
limit,
Collections.emptyMap(),
Collections.emptyMap(),
false
));
});
// Undefined capacity groups
Set<String> undefinedCapacityGroups = CollectionsExt.copyAndRemove(capacityGroupNames, definedCapacityGroups);
// Tier consumption
Map<Tier, List<CompositeResourceConsumption>> tierCapacityGroups = groupConsumptionMap.values().stream()
.collect(Collectors.groupingBy(rc -> {
ApplicationSLA sla = capacityGroupMap.get(rc.getConsumerName());
if (sla == null) {
sla = capacityGroupMap.get(DEFAULT_SLA.getAppName());
}
return sla.getTier();
}));
Map<String, CompositeResourceConsumption> tierConsumptions = new HashMap<>();
tierCapacityGroups.forEach((tier, consumptions) ->
tierConsumptions.put(
tier.name(),
ResourceConsumptions.aggregate(
tier.name(),
ConsumptionLevel.Tier,
consumptions
)
));
// System consumption
CompositeResourceConsumption systemConsumption = ResourceConsumptions.aggregate(
ResourceConsumption.SYSTEM_CONSUMER,
ConsumptionLevel.System,
tierConsumptions.values()
);
return new DefaultResourceConsumptionService.ConsumptionEvaluationResult(
definedCapacityGroups,
undefinedCapacityGroups,
systemConsumption
);
}
private List<ResourceConsumption> buildApplicationConsumptions(String capacityGroupName) {
Map<String, ResourceDimension> actual = actualConsumptionByGroupAndApp.get(capacityGroupName);
Map<String, ResourceDimension> max = maxConsumptionByApp.get(capacityGroupName);
List<ResourceConsumption> appConsumptions = new ArrayList<>();
for (String appName : actual.keySet()) {
ResourceConsumption byInstanceType = new ResourceConsumption(
appName,
ConsumptionLevel.InstanceType,
actual.get(appName),
max.get(appName),
Collections.emptyMap()
);
appConsumptions.add(
new CompositeResourceConsumption(
appName,
ConsumptionLevel.Application,
actual.get(appName),
max.get(appName),
max.get(appName),
Collections.emptyMap(),
Collections.singletonMap("itype.test", byInstanceType),
!ResourceDimensions.isBigger(max.get(appName), actual.get(appName))
)
);
}
return appConsumptions;
}
static ResourceDimension capacityGroupLimit(ApplicationSLA sla) {
return ResourceDimensions.multiply(sla.getResourceDimension(), sla.getInstanceCount());
}
static <E extends ResourceConsumptionEvent> Optional<E> findEvent(List<ResourceConsumptionEvent> events, Class<E> eventClass, String capacityGroup) {
for (ResourceConsumptionEvent event : events) {
if (eventClass.isAssignableFrom(event.getClass()) && event.getCapacityGroup().equals(capacityGroup)) {
return Optional.of((E) event);
}
}
return Optional.empty();
}
}
| 9,988 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/service/management | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/service/management/internal/ResourceConsumptionLogTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.service.management.internal;
import java.util.Collections;
import com.netflix.titus.api.model.ResourceDimension;
import com.netflix.titus.master.service.management.CompositeResourceConsumption;
import com.netflix.titus.master.service.management.ResourceConsumption;
import com.netflix.titus.master.service.management.ResourceConsumptionEvents.CapacityGroupAllocationEvent;
import com.netflix.titus.master.service.management.ResourceConsumptionEvents.CapacityGroupRemovedEvent;
import com.netflix.titus.master.service.management.ResourceConsumptionEvents.CapacityGroupUndefinedEvent;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class ResourceConsumptionLogTest {
@Test
public void testAllocationEventLogging() throws Exception {
CompositeResourceConsumption consumption = new CompositeResourceConsumption(
"myCapacityGroup",
ResourceConsumption.ConsumptionLevel.CapacityGroup,
new ResourceDimension(1, 0, 1, 10, 10, 0), // actual
new ResourceDimension(2, 0, 2, 20, 20, 0), // max
new ResourceDimension(3, 0, 3, 30, 30, 0), // limit
Collections.singletonMap("attrKey", "attrValue"),
Collections.emptyMap(),
false
);
CapacityGroupAllocationEvent event = new CapacityGroupAllocationEvent(
"myCapacityGroup",
System.currentTimeMillis(),
consumption
);
String result = ResourceConsumptionLog.doLog(event);
String expected = "Resource consumption change: group=myCapacityGroup [below limit] actual=[cpu=1.0, memoryMB=1, diskMB=10, networkMbs=10, gpu=0, opportunisticCpu=0], max=[cpu=2.0, memoryMB=2, diskMB=20, networkMbs=20, gpu=0, opportunisticCpu=0], limit=[cpu=3.0, memoryMB=3, diskMB=30, networkMbs=30, gpu=0, opportunisticCpu=0], attrs={attrKey=attrValue}";
assertThat(result).isEqualTo(expected);
}
@Test
public void testGroupUndefinedEvent() throws Exception {
CapacityGroupUndefinedEvent event = new CapacityGroupUndefinedEvent("myCapacityGroup", System.currentTimeMillis());
String result = ResourceConsumptionLog.doLog(event);
String expected = "Capacity group not defined: group=myCapacityGroup";
assertThat(result).isEqualTo(expected);
}
@Test
public void testGroupRemovedEvent() throws Exception {
CapacityGroupRemovedEvent event = new CapacityGroupRemovedEvent("myCapacityGroup", System.currentTimeMillis());
String result = ResourceConsumptionLog.doLog(event);
String expected = "Capacity group no longer defined: group=myCapacityGroup";
assertThat(result).isEqualTo(expected);
}
} | 9,989 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/kubernetes/KubeUtilTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.kubernetes;
import java.time.OffsetDateTime;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import com.netflix.titus.runtime.kubernetes.KubeConstants;
import io.kubernetes.client.openapi.models.V1ContainerState;
import io.kubernetes.client.openapi.models.V1ContainerStateRunning;
import io.kubernetes.client.openapi.models.V1ContainerStateTerminated;
import io.kubernetes.client.openapi.models.V1ContainerStatus;
import io.kubernetes.client.openapi.models.V1Node;
import io.kubernetes.client.openapi.models.V1NodeSpec;
import io.kubernetes.client.openapi.models.V1ObjectMeta;
import io.kubernetes.client.openapi.models.V1Pod;
import io.kubernetes.client.openapi.models.V1PodStatus;
import org.junit.Test;
import static java.util.Arrays.asList;
import static org.assertj.core.api.Assertions.assertThat;
public class KubeUtilTest {
private static final String FARZONE_A = "farzoneA";
private static final String FARZONE_B = "farzoneB";
private static final String NOT_FARZONE = "notFarzone";
private static final List<String> FARZONES = asList(FARZONE_A, FARZONE_B);
private static final V1Node NODE_WITHOUT_ZONE = new V1Node().metadata(new V1ObjectMeta().labels(Collections.emptyMap()));
@Test
public void testIsFarzone() {
assertThat(KubeUtil.isFarzoneNode(FARZONES, newNodeInZone(FARZONE_A))).isTrue();
assertThat(KubeUtil.isFarzoneNode(asList(FARZONE_A, "farzoneB"), newNodeInZone(NOT_FARZONE))).isFalse();
assertThat(KubeUtil.isFarzoneNode(asList(FARZONE_A, "farzoneB"), NODE_WITHOUT_ZONE)).isFalse();
}
@Test
public void testEstimatePodSize() {
assertThat(KubeUtil.estimatePodSize(new V1Pod())).isGreaterThan(0);
}
@Test
public void testFindFinishedTimestamp() {
// Test running pod
V1Pod pod = new V1Pod().status(new V1PodStatus().containerStatuses(new ArrayList<>()));
pod.getStatus().getContainerStatuses().add(new V1ContainerStatus()
.state(new V1ContainerState().running(new V1ContainerStateRunning()))
);
assertThat(KubeUtil.findFinishedTimestamp(pod)).isEmpty();
// Test finished pod
pod.getStatus().getContainerStatuses().add(new V1ContainerStatus()
.state(new V1ContainerState().terminated(new V1ContainerStateTerminated()))
);
assertThat(KubeUtil.findFinishedTimestamp(pod)).isEmpty();
OffsetDateTime now = OffsetDateTime.now();
pod.getStatus().getContainerStatuses().get(1).getState().getTerminated().finishedAt(now);
assertThat(KubeUtil.findFinishedTimestamp(pod)).contains(now.toInstant().toEpochMilli());
}
private V1Node newNodeInZone(String zoneId) {
return new V1Node()
.metadata(new V1ObjectMeta().labels(Collections.singletonMap(KubeConstants.NODE_LABEL_ZONE, zoneId)))
.spec(new V1NodeSpec().taints(new ArrayList<>()));
}
} | 9,990 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/kubernetes/KubeObjectFormatterTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.kubernetes;
import io.kubernetes.client.openapi.models.V1Node;
import io.kubernetes.client.openapi.models.V1Pod;
import org.junit.Test;
import static com.netflix.titus.master.kubernetes.NodeDataGenerator.andIpAddress;
import static com.netflix.titus.master.kubernetes.NodeDataGenerator.andNodeAllocatableResources;
import static com.netflix.titus.master.kubernetes.NodeDataGenerator.andNodeLabels;
import static com.netflix.titus.master.kubernetes.NodeDataGenerator.andNodePhase;
import static com.netflix.titus.master.kubernetes.NodeDataGenerator.andTaint;
import static com.netflix.titus.master.kubernetes.NodeDataGenerator.newNode;
import static com.netflix.titus.master.kubernetes.PodDataGenerator.andLabel;
import static com.netflix.titus.master.kubernetes.PodDataGenerator.andNodeName;
import static com.netflix.titus.master.kubernetes.PodDataGenerator.andPhase;
import static com.netflix.titus.master.kubernetes.PodDataGenerator.andReason;
import static com.netflix.titus.master.kubernetes.PodDataGenerator.newPod;
import static org.assertj.core.api.Assertions.assertThat;
public class KubeObjectFormatterTest {
@Test
public void testFormatPodEssentials() {
V1Pod pod = newPod("testPod",
andLabel("labelA", "valueA"),
andPhase("RUNNING"), andReason("reason started"),
andNodeName("someNode")
);
String result = KubeObjectFormatter.formatPodEssentials(pod);
assertThat(result).isEqualTo("{name=testPod, labels={labelA=valueA}, nodeName=someNode, phase=RUNNING, reason=reason started}");
}
@Test
public void testFormatNodeEssentials() {
V1Node node = newNode("testNode",
andNodeAllocatableResources(64, 8192, 16384, 512),
andNodePhase("READY"),
andNodeLabels("labelA", "valueA"),
andIpAddress("nodeIpAddress"),
andTaint("taintKey", "taintValue", "NoExecute")
);
String result = KubeObjectFormatter.formatNodeEssentials(node);
assertThat(result).isEqualTo("{name=testNode, labels={labelA=valueA}, taints=[{key=taintKey, value=taintValue, effect=NoExecute], phase=READY, allocatableResources={disk=16384000000, memory=8192000000, cpu=64, network=536870912}}");
}
} | 9,991 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/kubernetes/PodToTaskMapperTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.kubernetes;
import java.time.Duration;
import java.util.Optional;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.TaskStatus;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.archaius2.Archaius2Ext;
import com.netflix.titus.common.util.time.TestClock;
import com.netflix.titus.common.util.tuple.Either;
import com.netflix.titus.master.kubernetes.client.model.PodWrapper;
import com.netflix.titus.runtime.kubernetes.KubeConstants;
import com.netflix.titus.testkit.model.job.JobGenerator;
import io.kubernetes.client.openapi.models.V1Node;
import io.kubernetes.client.openapi.models.V1ObjectMeta;
import io.kubernetes.client.openapi.models.V1Pod;
import org.junit.Before;
import org.junit.Test;
import static com.netflix.titus.api.jobmanager.model.job.TaskStatus.REASON_LOCAL_SYSTEM_ERROR;
import static com.netflix.titus.api.jobmanager.model.job.TaskStatus.REASON_NORMAL;
import static com.netflix.titus.api.jobmanager.model.job.TaskStatus.REASON_TASK_KILLED;
import static com.netflix.titus.api.jobmanager.model.job.TaskStatus.REASON_TASK_LOST;
import static com.netflix.titus.master.kubernetes.PodDataGenerator.andDeletionTimestamp;
import static com.netflix.titus.master.kubernetes.PodDataGenerator.andMessage;
import static com.netflix.titus.master.kubernetes.PodDataGenerator.andPhase;
import static com.netflix.titus.master.kubernetes.PodDataGenerator.andReason;
import static com.netflix.titus.master.kubernetes.PodDataGenerator.andRunning;
import static com.netflix.titus.master.kubernetes.PodDataGenerator.andScheduled;
import static com.netflix.titus.master.kubernetes.PodDataGenerator.andTerminated;
import static com.netflix.titus.master.kubernetes.PodDataGenerator.andWaiting;
import static com.netflix.titus.master.kubernetes.PodDataGenerator.newPod;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class PodToTaskMapperTest {
private static final V1Node NODE = new V1Node().metadata(new V1ObjectMeta().name(NodeDataGenerator.NODE_NAME));
private final TitusRuntime titusRuntime = TitusRuntimes.test();
private final TestClock clock = (TestClock) titusRuntime.getClock();
private final ContainerResultCodeResolver containerResultCodeResolver = mock(ContainerResultCodeResolver.class);
private final KubernetesConfiguration configuration = Archaius2Ext.newConfiguration(KubernetesConfiguration.class);
@Before
public void setUp() throws Exception {
when(containerResultCodeResolver.resolve(any(), any(), any())).thenAnswer(invocation -> {
PodWrapper podWrapper = invocation.getArgument(2);
String message = podWrapper.getMessage();
if (message.contains("system")) {
return Optional.of(TaskStatus.REASON_LOCAL_SYSTEM_ERROR);
}
return Optional.empty();
});
}
@Test
public void testUpdatesIgnoredWhenTaskFinished() {
Task task = newTask(TaskState.Finished);
V1Pod pod = newPod(andPhase("Pending"));
Either<TaskStatus, String> result = updateMapper(task, pod).getNewTaskStatus();
assertErrorMessage(result, "task already marked as finished");
}
@Test
public void testPodCreated() {
Task task = newTask(TaskState.Accepted);
V1Pod pod = newPod(andPhase("Pending"));
Either<TaskStatus, String> result = updateMapper(task, pod).getNewTaskStatus();
assertErrorMessage(result, "pod notification does not change task state");
}
@Test
public void testPodPendingAndScheduledButNotInWaitingState() {
Task task = newTask(TaskState.Accepted);
V1Pod pod = newPod(andPhase("Pending"), andMessage("junit"), andScheduled());
Either<TaskStatus, String> result = updateMapper(task, pod).getNewTaskStatus();
assertValue(result, TaskState.Launched, TaskStatus.REASON_POD_SCHEDULED);
}
@Test
public void testPodScheduledAndLaunched() {
Task task = newTask(TaskState.Accepted);
V1Pod pod = newPod(andPhase("Pending"), andMessage("junit"), andWaiting(), andScheduled());
Either<TaskStatus, String> result = updateMapper(task, pod).getNewTaskStatus();
assertValue(result, TaskState.Launched, TaskStatus.REASON_POD_SCHEDULED);
}
@Test
public void testPodScheduledAndStartInitiated() {
Task task = newTask(TaskState.Launched);
V1Pod pod = newPod(andPhase("Pending"), andMessage("junit"), andWaiting(), andScheduled(), andReason(PodToTaskMapper.TASK_STARTING));
Either<TaskStatus, String> result = updateMapper(task, pod).getNewTaskStatus();
assertValue(result, TaskState.StartInitiated, PodToTaskMapper.TASK_STARTING);
}
@Test
public void testTaskStateAheadOfPodInPendingState() {
Task task = newTask(TaskState.KillInitiated);
V1Pod pod = newPod(andPhase("Pending"), andScheduled(), andWaiting(), andReason(PodToTaskMapper.TASK_STARTING));
Either<TaskStatus, String> result = updateMapper(task, pod).getNewTaskStatus();
assertErrorMessage(result, "pod in state not consistent with the task state");
}
@Test
public void testPodRunning() {
Task task = newTask(TaskState.StartInitiated);
V1Pod pod = newPod(andPhase("Running"), andMessage("junit"), andScheduled(), andRunning(), andReason(TaskStatus.REASON_NORMAL));
Either<TaskStatus, String> result = updateMapper(task, pod).getNewTaskStatus();
assertValue(result, TaskState.Started, TaskStatus.REASON_NORMAL);
}
@Test
public void testTaskStateAheadOfPodInRunningState() {
Task task = newTask(TaskState.KillInitiated);
V1Pod pod = newPod(andPhase("Running"), andScheduled(), andRunning(), andReason(TaskStatus.REASON_NORMAL));
Either<TaskStatus, String> result = updateMapper(task, pod).getNewTaskStatus();
assertErrorMessage(result, "pod state (Running) not consistent with the task state");
}
@Test
public void testPodSucceeded() {
Task task = newTask(TaskState.Started);
V1Pod pod = newPod(andPhase("Succeeded"), andMessage("junit"), andScheduled(), andTerminated(), andReason(TaskStatus.REASON_NORMAL));
Either<TaskStatus, String> result = updateMapper(task, pod).getNewTaskStatus();
assertValue(result, TaskState.Finished, TaskStatus.REASON_NORMAL);
}
@Test
public void testPodFailed() {
Task task = newTask(TaskState.Started);
V1Pod pod = newPod(andPhase("Failed"), andMessage("junit"), andScheduled(), andTerminated(), andReason("exit -1"));
Either<TaskStatus, String> result = updateMapper(task, pod).getNewTaskStatus();
assertValue(result, TaskState.Finished, TaskStatus.REASON_FAILED);
}
@Test
public void testPodFailedWithKillReason() {
Task task = newTask(TaskState.Started);
V1Pod pod = newPod(andPhase("Failed"), andMessage("junit"), andScheduled(), andTerminated(), andReason(REASON_TASK_KILLED), andDeletionTimestamp());
Either<TaskStatus, String> result = updateMapper(task, pod).getNewTaskStatus();
assertValue(result, TaskState.Finished, REASON_TASK_KILLED);
}
@Test
public void testPodStuckInStateClassifiedAsTransientSystemError() {
Task task = JobFunctions.changeTaskStatus(
newTask(TaskState.KillInitiated),
TaskStatus.newBuilder().withState(TaskState.KillInitiated).withReasonCode(TaskStatus.REASON_STUCK_IN_STATE).build()
);
V1Pod pod = newPod(andPhase("Succeeded"), andMessage("junit"), andScheduled(), andTerminated(), andReason("terminated"));
Either<TaskStatus, String> result = updateMapper(task, pod).getNewTaskStatus();
assertValue(result, TaskState.Finished, TaskStatus.REASON_TRANSIENT_SYSTEM_ERROR);
}
@Test
public void testPodDeletedWhenNodeLost() {
Task task = newTask(TaskState.Started);
V1Pod pod = newPod(andPhase("Running"), andReason(KubeConstants.NODE_LOST));
Either<TaskStatus, String> result = deleteMapper(task, pod).getNewTaskStatus();
assertValue(result, TaskState.Finished, REASON_TASK_KILLED, "The host running the container was unexpectedly terminated");
}
@Test
public void testPodDeletedWhenUnexpectedlyTerminated() {
Task task = newTask(TaskState.Started);
V1Pod pod = newPod(andPhase("Running"), andReason("kubectl_terminate"));
Either<TaskStatus, String> result = deleteMapper(task, pod).getNewTaskStatus();
assertValue(result, TaskState.Finished, REASON_TASK_KILLED, "Container was terminated without going through the Titus API");
}
@Test
public void testPodPendingDeletedWhenTaskStuckInState() {
testPodDeletedWhenTaskStuckInState("Pending");
}
@Test
public void testPodStartedDeletedWhenTaskStuckInState() {
testPodDeletedWhenTaskStuckInState("Running");
}
@Test
public void testPodSucceededDeletedWhenTaskStuckInState() {
testPodDeletedWhenTaskStuckInState("Succeeded");
}
@Test
public void testPodUpdateWithSystemErrorResolution() {
Task task = newTask(TaskState.Started);
V1Pod pod = newPod(andPhase("Failed"), andMessage("system error"));
Either<TaskStatus, String> result = updateMapper(task, pod).getNewTaskStatus();
assertValue(result, TaskState.Finished, REASON_LOCAL_SYSTEM_ERROR, "system error");
}
@Test
public void testPodDeleteWithSystemErrorResolution() {
Task task = newTask(TaskState.Started);
V1Pod pod = newPod(andPhase("Running"), andMessage("system error"));
Either<TaskStatus, String> result = deleteMapper(task, pod).getNewTaskStatus();
assertValue(result, TaskState.Finished, REASON_LOCAL_SYSTEM_ERROR, "Container was terminated without going through the Titus API");
}
@Test
public void testNodeLost() {
Task task = newTask(TaskState.Accepted);
V1Pod pod = newPod(
andPhase("Running"),
andReason("NodeLost"),
andMessage("Node i-lostnode which was running pod lostpod is unresponsive")
);
// Check that the task is moved out of Accepted state first
Either<TaskStatus, String> result1 = updateMapper(task, pod).getNewTaskStatus();
assertValue(result1, TaskState.Launched, REASON_NORMAL,
"The pod is scheduled but the communication with its node is lost. If not recovered in 10min, the task will be marked as failed"
);
// Now check that there is no change before the node lost deadline is reached.
task = task.toBuilder().withStatus(result1.getValue()).build();
Either<TaskStatus, String> result2 = updateMapper(task, pod).getNewTaskStatus();
assertThat(result2.hasValue()).isTrue();
assertThat(result2.getValue()).isEqualTo(result1.getValue());
// Move time past the deadline
clock.advanceTime(Duration.ofMillis(configuration.getNodeLostTimeoutMs()));
Either<TaskStatus, String> result3 = updateMapper(task, pod).getNewTaskStatus();
assertValue(result3, TaskState.Finished, REASON_TASK_LOST, "The node where task was scheduled is lost");
}
private void testPodDeletedWhenTaskStuckInState(String podPhase) {
Task task = JobFunctions.changeTaskStatus(
newTask(TaskState.KillInitiated),
TaskStatus.newBuilder().withState(TaskState.KillInitiated).withReasonCode(TaskStatus.REASON_STUCK_IN_STATE).build()
);
V1Pod pod = newPod(andPhase(podPhase), andMessage("junit"), andScheduled());
Either<TaskStatus, String> result = deleteMapper(task, pod).getNewTaskStatus();
assertValue(result, TaskState.Finished, TaskStatus.REASON_TRANSIENT_SYSTEM_ERROR, "junit");
}
private Task newTask(TaskState taskState) {
return JobFunctions.changeTaskStatus(JobGenerator.oneBatchTask(), TaskStatus.newBuilder().withState(taskState).build());
}
private PodToTaskMapper updateMapper(Task task, V1Pod v1Pod) {
return new PodToTaskMapper(configuration, new PodWrapper(v1Pod), Optional.of(NODE), task, false, containerResultCodeResolver, titusRuntime);
}
private PodToTaskMapper deleteMapper(Task task, V1Pod v1Pod) {
return new PodToTaskMapper(configuration, new PodWrapper(v1Pod), Optional.of(NODE), task, true, containerResultCodeResolver, titusRuntime);
}
private void assertValue(Either<TaskStatus, String> result, TaskState expectedState, String expectedReason) {
assertValue(result, expectedState, expectedReason, "junit");
}
private void assertValue(Either<TaskStatus, String> result, TaskState expectedState, String expectedReason, String expectedReasonMessage) {
assertThat(result.hasValue()).isTrue();
assertThat(result.getValue().getState()).isEqualTo(expectedState);
assertThat(result.getValue().getReasonCode()).isEqualTo(expectedReason);
assertThat(result.getValue().getReasonMessage()).isEqualTo(expectedReasonMessage);
}
private void assertErrorMessage(Either<TaskStatus, String> result, String expected) {
assertThat(result.hasError()).isTrue();
assertThat(result.getError()).contains(expected);
}
} | 9,992 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/kubernetes/NodeDataGenerator.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.kubernetes;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import com.netflix.titus.common.util.CollectionsExt;
import io.kubernetes.client.custom.Quantity;
import io.kubernetes.client.openapi.models.V1Node;
import io.kubernetes.client.openapi.models.V1NodeAddress;
import io.kubernetes.client.openapi.models.V1NodeSpec;
import io.kubernetes.client.openapi.models.V1NodeStatus;
import io.kubernetes.client.openapi.models.V1ObjectMeta;
import io.kubernetes.client.openapi.models.V1Taint;
public class NodeDataGenerator {
public static final String NODE_NAME = "node1";
@SafeVarargs
public static V1Node newNode(String nodeName, Function<V1Node, V1Node>... transformers) {
V1Node node = new V1Node()
.metadata(new V1ObjectMeta().name(nodeName))
.status(new V1NodeStatus());
transform(node, transformers);
return node;
}
@SafeVarargs
public static V1Node newNode(Function<V1Node, V1Node>... transformers) {
return newNode(NODE_NAME, transformers);
}
@SafeVarargs
public static void transform(V1Node node, Function<V1Node, V1Node>... transformers) {
for (Function<V1Node, V1Node> transformer : transformers) {
transformer.apply(node);
}
}
public static Function<V1Node, V1Node> andNodePhase(String phase) {
return node -> {
if (node.getStatus() == null) {
node.status(new V1NodeStatus());
}
node.getStatus().phase(phase);
return node;
};
}
public static Function<V1Node, V1Node> andNodeAllocatableResources(int cpu, int memoryMB, int diskMB, int networkMbps) {
return node -> {
if (node.getStatus() == null) {
node.status(new V1NodeStatus());
}
Map<String, Quantity> allocatable = new HashMap<>();
allocatable.put("cpu", new Quantity(cpu + ""));
allocatable.put("memory", new Quantity(memoryMB + "M"));
allocatable.put("disk", new Quantity(diskMB + "M"));
allocatable.put("network", new Quantity(networkMbps + "Mi"));
node.getStatus().allocatable(allocatable);
return node;
};
}
public static V1Node andIpAddress(String ipAddress, V1Node node) {
return andIpAddress(ipAddress).apply(node);
}
public static Function<V1Node, V1Node> andIpAddress(String ipAddress) {
return node -> {
if (node.getStatus() == null) {
node.status(new V1NodeStatus());
}
node.getStatus().addresses(
Collections.singletonList(new V1NodeAddress().address(ipAddress).type(KubeUtil.TYPE_INTERNAL_IP))
);
return node;
};
}
public static V1Node andNodeAnnotations(V1Node node, String... keyValuePairs) {
return andNodeAnnotations(keyValuePairs).apply(node);
}
public static Function<V1Node, V1Node> andNodeLabels(String... keyValuePairs) {
return node -> {
Map<String, String> labels = CollectionsExt.copyAndAdd(
CollectionsExt.nonNull(node.getMetadata().getLabels()),
CollectionsExt.asMap(keyValuePairs)
);
node.getMetadata().labels(labels);
return node;
};
}
public static Function<V1Node, V1Node> andNodeAnnotations(String... keyValuePairs) {
return node -> {
Map<String, String> annotations = CollectionsExt.copyAndAdd(
CollectionsExt.nonNull(node.getMetadata().getAnnotations()),
CollectionsExt.asMap(keyValuePairs)
);
node.getMetadata().annotations(annotations);
return node;
};
}
public static Function<V1Node, V1Node> andTaint(String key, String value, String effect) {
return node -> {
if (node.getSpec() == null) {
node.spec(new V1NodeSpec());
}
List<V1Taint> taints = node.getSpec().getTaints();
if (taints == null) {
node.getSpec().taints(taints = new ArrayList<>());
}
taints.add(new V1Taint().key(key).value(value).effect(effect));
return node;
};
}
}
| 9,993 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/kubernetes/PodDataGenerator.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.kubernetes;
import java.time.OffsetDateTime;
import java.util.Collections;
import java.util.Map;
import java.util.function.Function;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.DateTimeExt;
import io.kubernetes.client.openapi.models.V1ContainerState;
import io.kubernetes.client.openapi.models.V1ContainerStateRunning;
import io.kubernetes.client.openapi.models.V1ContainerStateTerminated;
import io.kubernetes.client.openapi.models.V1ContainerStateWaiting;
import io.kubernetes.client.openapi.models.V1ContainerStatus;
import io.kubernetes.client.openapi.models.V1ObjectMeta;
import io.kubernetes.client.openapi.models.V1Pod;
import io.kubernetes.client.openapi.models.V1PodSpec;
import io.kubernetes.client.openapi.models.V1PodStatus;
public class PodDataGenerator {
@SafeVarargs
public static V1Pod newPod(String taskId, Function<V1Pod, V1Pod>... transformers) {
V1Pod pod = new V1Pod()
.metadata(new V1ObjectMeta()
.name(taskId)
)
.spec(new V1PodSpec())
.status(new V1PodStatus()
.addContainerStatusesItem(new V1ContainerStatus())
);
return transform(pod, transformers);
}
@SafeVarargs
public static V1Pod newPod(Function<V1Pod, V1Pod>... transformers) {
return newPod("task1", transformers);
}
@SafeVarargs
public static V1Pod transform(V1Pod pod, Function<V1Pod, V1Pod>... transformers) {
for (Function<V1Pod, V1Pod> transformer : transformers) {
transformer.apply(pod);
}
return pod;
}
public static V1Pod andPodAnnotations(V1Pod pod, String... keyValuePairs) {
return andPodAnnotations(keyValuePairs).apply(pod);
}
public static Function<V1Pod, V1Pod> andPodAnnotations(String... keyValuePairs) {
return pod -> {
Map<String, String> annotations = CollectionsExt.copyAndAdd(
CollectionsExt.nonNull(pod.getMetadata().getAnnotations()),
CollectionsExt.asMap(keyValuePairs)
);
pod.getMetadata().annotations(annotations);
return pod;
};
}
public static V1Pod andPhase(String phase, V1Pod pod) {
return andPhase(phase).apply(pod);
}
public static Function<V1Pod, V1Pod> andPhase(String phase) {
return pod -> {
pod.getStatus().phase(phase);
return pod;
};
}
public static V1Pod andNodeName(String nodeName, V1Pod pod) {
return andNodeName(nodeName).apply(pod);
}
public static Function<V1Pod, V1Pod> andNodeName(String nodeName) {
return pod -> {
pod.getSpec().setNodeName(nodeName);
return pod;
};
}
public static V1Pod andPodIp(String podIp, V1Pod pod) {
return andPodIp(podIp).apply(pod);
}
public static Function<V1Pod, V1Pod> andPodIp(String podIp) {
return pod -> {
pod.getStatus().setPodIP(podIp);
return pod;
};
}
public static V1Pod andScheduled(V1Pod pod) {
return andScheduled().apply(pod);
}
public static Function<V1Pod, V1Pod> andScheduled() {
return pod -> {
pod.getSpec().nodeName(NodeDataGenerator.NODE_NAME);
return pod;
};
}
public static V1Pod andWaiting(V1Pod pod) {
return andWaiting().apply(pod);
}
public static Function<V1Pod, V1Pod> andWaiting() {
return pod -> {
pod.getStatus().containerStatuses(Collections.singletonList(
new V1ContainerStatus().state(new V1ContainerState().waiting(new V1ContainerStateWaiting()))
));
return pod;
};
}
public static V1Pod andStartedAt(long timestamp, V1Pod pod) {
return andStartedAt(timestamp).apply(pod);
}
public static Function<V1Pod, V1Pod> andStartedAt(long timestamp) {
return pod -> {
pod.getStatus().containerStatuses(Collections.singletonList(
new V1ContainerStatus().state(
new V1ContainerState().running(
new V1ContainerStateRunning().startedAt(DateTimeExt.fromMillis(timestamp))
)
)
));
return pod;
};
}
public static V1Pod andRunning(V1Pod pod) {
return andRunning().apply(pod);
}
public static Function<V1Pod, V1Pod> andRunning() {
return pod -> andStartedAt(System.currentTimeMillis(), pod);
}
public static V1Pod andDeletionTimestamp(V1Pod pod) {
return andDeletionTimestamp().apply(pod);
}
public static Function<V1Pod, V1Pod> andDeletionTimestamp() {
return pod -> {
if (pod.getMetadata() == null) {
pod.metadata(new V1ObjectMeta());
}
pod.getMetadata().deletionTimestamp(OffsetDateTime.now());
return pod;
};
}
public static V1Pod andTerminated(V1Pod pod) {
return andTerminated().apply(pod);
}
public static Function<V1Pod, V1Pod> andTerminated() {
return pod -> {
pod.getStatus().containerStatuses(Collections.singletonList(
new V1ContainerStatus().state(new V1ContainerState().terminated(new V1ContainerStateTerminated()))
));
return pod;
};
}
public static V1Pod andReason(String reason, V1Pod pod) {
return andReason(reason).apply(pod);
}
public static Function<V1Pod, V1Pod> andReason(String reason) {
return pod -> {
pod.getStatus().reason(reason);
return pod;
};
}
public static V1Pod andMessage(String message, V1Pod pod) {
return andMessage(message).apply(pod);
}
public static Function<V1Pod, V1Pod> andMessage(String message) {
return pod -> {
pod.getStatus().message(message);
return pod;
};
}
public static Function<V1Pod, V1Pod> andLabel(String... keyValuePairs) {
return pod -> {
Map<String, String> labels = CollectionsExt.copyAndAdd(
CollectionsExt.nonNull(pod.getMetadata().getLabels()),
CollectionsExt.asMap(keyValuePairs)
);
pod.getMetadata().labels(labels);
return pod;
};
}
}
| 9,994 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/kubernetes | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/kubernetes/pod/DefaultPodAffinityFactoryTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.kubernetes.pod;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import com.netflix.titus.api.FeatureActivationConfiguration;
import com.netflix.titus.api.jobmanager.JobAttributes;
import com.netflix.titus.api.jobmanager.JobConstraints;
import com.netflix.titus.api.jobmanager.model.job.Container;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.ServiceJobTask;
import com.netflix.titus.api.jobmanager.model.job.disruptionbudget.SelfManagedDisruptionBudgetPolicy;
import com.netflix.titus.api.jobmanager.model.job.ebs.EbsVolume;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.api.jobmanager.model.job.vpc.SignedIpAddressAllocation;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.master.kubernetes.pod.affinity.DefaultPodAffinityFactory;
import com.netflix.titus.master.kubernetes.pod.KubePodConfiguration;
import com.netflix.titus.master.kubernetes.pod.resourcepool.ExplicitJobPodResourcePoolResolver;
import com.netflix.titus.runtime.kubernetes.KubeConstants;
import com.netflix.titus.testkit.model.job.JobEbsVolumeGenerator;
import com.netflix.titus.testkit.model.job.JobGenerator;
import com.netflix.titus.testkit.model.job.JobIpAllocationGenerator;
import io.kubernetes.client.openapi.models.V1Affinity;
import io.kubernetes.client.openapi.models.V1LabelSelectorRequirement;
import io.kubernetes.client.openapi.models.V1NodeSelector;
import io.kubernetes.client.openapi.models.V1NodeSelectorRequirement;
import io.kubernetes.client.openapi.models.V1PodAffinityTerm;
import io.kubernetes.client.openapi.models.V1WeightedPodAffinityTerm;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.when;
public class DefaultPodAffinityFactoryTest {
private static final String DEFAULT_GPU_INSTANCE_TYPE = "p3.2xlarge";
private static final String SPECIFIC_GPU_INSTANCE_TYPE = "p4.2xlarge";
private final KubePodConfiguration configuration = Mockito.mock(KubePodConfiguration.class);
private final FeatureActivationConfiguration featureConfiguration = Mockito.mock(FeatureActivationConfiguration.class);
private final DefaultPodAffinityFactory factory = new DefaultPodAffinityFactory(configuration, featureConfiguration, new ExplicitJobPodResourcePoolResolver(), TitusRuntimes.test());
@Before
public void setUp() throws Exception {
when(configuration.getDefaultGpuInstanceTypes()).thenReturn(Collections.singletonList(DEFAULT_GPU_INSTANCE_TYPE));
when(featureConfiguration.isRelocationBinpackingEnabled()).thenReturn(true);
}
@Test
public void testInstanceTypeAffinity() {
Pair<V1Affinity, Map<String, String>> affinityWithAnnotations = factory.buildV1Affinity(
newJobWithHardConstraint(JobConstraints.MACHINE_TYPE.toUpperCase(), "r5.metal"), JobGenerator.oneBatchTask()
);
V1NodeSelector nodeSelector = affinityWithAnnotations.getLeft().getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution();
assertThat(nodeSelector.getNodeSelectorTerms()).hasSize(1);
}
@Test
public void testKubeBackendAffinity() {
Pair<V1Affinity, Map<String, String>> affinityWithAnnotations = factory.buildV1Affinity(
newJobWithHardConstraint(JobConstraints.KUBE_BACKEND, "kublet"), JobGenerator.oneBatchTask()
);
V1NodeSelector nodeSelector = affinityWithAnnotations.getLeft().getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution();
assertThat(nodeSelector.getNodeSelectorTerms()).hasSize(1);
V1NodeSelectorRequirement requirement = nodeSelector.getNodeSelectorTerms().get(0).getMatchExpressions().get(0);
assertThat(requirement.getKey()).isEqualTo(KubeConstants.TAINT_KUBE_BACKEND);
assertThat(requirement.getValues()).contains("kublet");
}
@Test
public void testEmptyInstanceTypeIsIgnored() {
Pair<V1Affinity, Map<String, String>> affinityWithAnnotations = factory.buildV1Affinity(
newJobWithHardConstraint(JobConstraints.MACHINE_TYPE, ""), JobGenerator.oneBatchTask()
);
assertThat(affinityWithAnnotations.getLeft().getNodeAffinity()).isNull();
}
@Test
public void testDefaultGpuInstanceAssignment() {
Pair<V1Affinity, Map<String, String>> affinityWithAnnotations = factory.buildV1Affinity(
newGpuJob(Collections.emptyMap()), JobGenerator.oneBatchTask()
);
V1NodeSelector nodeSelector = affinityWithAnnotations.getLeft().getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution();
assertThat(nodeSelector.getNodeSelectorTerms()).hasSize(1);
assertThat(nodeSelector.getNodeSelectorTerms().get(0).getMatchExpressions().get(0).getValues().get(0)).isEqualTo(DEFAULT_GPU_INSTANCE_TYPE);
}
@Test
public void testSpecificGpuInstanceAssignment() {
Pair<V1Affinity, Map<String, String>> affinityWithAnnotations = factory.buildV1Affinity(newGpuJob(Collections.singletonMap(
JobConstraints.MACHINE_TYPE, SPECIFIC_GPU_INSTANCE_TYPE
)), JobGenerator.oneBatchTask());
V1NodeSelector nodeSelector = affinityWithAnnotations.getLeft().getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution();
assertThat(nodeSelector.getNodeSelectorTerms()).hasSize(1);
assertThat(nodeSelector.getNodeSelectorTerms().get(0).getMatchExpressions().get(0).getValues().get(0)).isEqualTo(SPECIFIC_GPU_INSTANCE_TYPE);
}
@Test
public void testResourcePoolAffinity() {
Job<BatchJobExt> job = JobGenerator.oneBatchJob();
job = job.toBuilder().withJobDescriptor(JobFunctions.appendJobDescriptorAttributes(job.getJobDescriptor(),
Collections.singletonMap(JobAttributes.JOB_PARAMETER_RESOURCE_POOLS, "elastic"))
).build();
Pair<V1Affinity, Map<String, String>> affinityWithAnnotations = factory.buildV1Affinity(job, JobGenerator.oneBatchTask());
V1NodeSelector nodeSelector = affinityWithAnnotations.getLeft().getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution();
assertThat(nodeSelector.getNodeSelectorTerms()).hasSize(1);
assertThat(nodeSelector.getNodeSelectorTerms().get(0).getMatchExpressions().get(0).getKey()).isEqualTo(KubeConstants.NODE_LABEL_RESOURCE_POOL);
assertThat(nodeSelector.getNodeSelectorTerms().get(0).getMatchExpressions().get(0).getValues().get(0)).isEqualTo("elastic");
}
@Test
public void testEbsVolumeAzAffinity() {
Job<BatchJobExt> job = JobGenerator.oneBatchJob();
List<EbsVolume> ebsVolumes = JobEbsVolumeGenerator.jobEbsVolumes(1).toList();
Map<String, String> ebsVolumeAttributes = JobEbsVolumeGenerator.jobEbsVolumesToAttributes(ebsVolumes);
job = job.toBuilder().withJobDescriptor(JobFunctions.jobWithEbsVolumes(job.getJobDescriptor(), ebsVolumes, ebsVolumeAttributes)).build();
Pair<V1Affinity, Map<String, String>> affinityWithAnnotations = factory.buildV1Affinity(job, JobEbsVolumeGenerator.appendEbsVolumeAttribute(JobGenerator.oneBatchTask(), ebsVolumes.get(0).getVolumeId()));
V1NodeSelector nodeSelector = affinityWithAnnotations.getLeft().getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution();
assertThat(nodeSelector.getNodeSelectorTerms()).hasSize(1);
assertThat(nodeSelector.getNodeSelectorTerms().get(0).getMatchExpressions().get(0).getKey()).isEqualTo(KubeConstants.NODE_LABEL_ZONE);
assertThat(nodeSelector.getNodeSelectorTerms().get(0).getMatchExpressions().get(0).getValues().get(0)).isEqualTo(ebsVolumes.get(0).getVolumeAvailabilityZone());
}
@Test
public void testIpAllocationAzAffinity() {
Job<BatchJobExt> job = JobGenerator.oneBatchJob();
List<SignedIpAddressAllocation> ipAddressAllocations = JobIpAllocationGenerator.jobIpAllocations(1).toList();
job = job.toBuilder().withJobDescriptor(JobFunctions.jobWithIpAllocations(job.getJobDescriptor(), ipAddressAllocations)).build();
Pair<V1Affinity, Map<String, String>> affinityWithAnnotations = factory.buildV1Affinity(job, JobIpAllocationGenerator.appendIpAllocationAttribute(JobGenerator.oneBatchTask(), ipAddressAllocations.get(0).getIpAddressAllocation().getAllocationId()));
V1NodeSelector nodeSelector = affinityWithAnnotations.getLeft().getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution();
assertThat(nodeSelector.getNodeSelectorTerms()).hasSize(1);
assertThat(nodeSelector.getNodeSelectorTerms().get(0).getMatchExpressions().get(0).getKey()).isEqualTo(KubeConstants.NODE_LABEL_ZONE);
assertThat(nodeSelector.getNodeSelectorTerms().get(0).getMatchExpressions().get(0).getValues().get(0)).isEqualTo(ipAddressAllocations.get(0).getIpAddressAllocation().getIpAddressLocation().getAvailabilityZone());
}
@Test
public void relocationBinPacking() {
Job<ServiceJobExt> job = JobGenerator.oneServiceJob();
job = job.toBuilder().withJobDescriptor(job.getJobDescriptor().but(
jd -> jd.getDisruptionBudget().toBuilder()
.withDisruptionBudgetPolicy(SelfManagedDisruptionBudgetPolicy.newBuilder().build())
)).build();
ServiceJobTask task = JobGenerator.oneServiceTask();
V1Affinity affinity = factory.buildV1Affinity(job, task).getLeft();
List<V1WeightedPodAffinityTerm> podAffinityTerms = affinity.getPodAffinity().getPreferredDuringSchedulingIgnoredDuringExecution();
assertThat(podAffinityTerms).hasSize(1);
V1PodAffinityTerm podAffinityTerm = podAffinityTerms.get(0).getPodAffinityTerm();
assertThat(podAffinityTerm.getTopologyKey()).isEqualTo(KubeConstants.NODE_LABEL_MACHINE_ID);
assertThat(podAffinityTerm.getLabelSelector().getMatchExpressions()).hasSize(1);
V1LabelSelectorRequirement affinityRequirement = podAffinityTerm.getLabelSelector().getMatchExpressions().get(0);
assertThat(affinityRequirement.getKey()).isEqualTo(KubeConstants.POD_LABEL_RELOCATION_BINPACK);
assertThat(affinityRequirement.getOperator()).isEqualTo(KubeConstants.SELECTOR_OPERATOR_EXISTS);
List<V1WeightedPodAffinityTerm> antiAffinityTerms = affinity.getPodAntiAffinity().getPreferredDuringSchedulingIgnoredDuringExecution();
assertThat(antiAffinityTerms).hasSize(1);
V1PodAffinityTerm antiAffinityTerm = antiAffinityTerms.get(0).getPodAffinityTerm();
assertThat(antiAffinityTerm.getTopologyKey()).isEqualTo(KubeConstants.NODE_LABEL_MACHINE_ID);
assertThat(antiAffinityTerm.getLabelSelector().getMatchExpressions()).hasSize(1);
V1LabelSelectorRequirement antiAffinityRequirement = antiAffinityTerm.getLabelSelector().getMatchExpressions().get(0);
assertThat(antiAffinityRequirement.getKey()).isEqualTo(KubeConstants.POD_LABEL_RELOCATION_BINPACK);
assertThat(antiAffinityRequirement.getOperator()).isEqualTo(KubeConstants.SELECTOR_OPERATOR_DOES_NOT_EXIST);
}
@Test
public void relocationBinPackingNegative() {
Job<ServiceJobExt> job = JobGenerator.oneServiceJob();
ServiceJobTask task = JobGenerator.oneServiceTask();
V1Affinity affinity = factory.buildV1Affinity(job, task).getLeft();
assertThat(affinity.getPodAffinity()).isNull();
assertThat(affinity.getPodAntiAffinity()).isNull();
}
private Job<BatchJobExt> newJobWithHardConstraint(String name, String value) {
return JobFunctions.appendHardConstraint(JobGenerator.oneBatchJob(), name, value);
}
private Job<BatchJobExt> newGpuJob(Map<String, String> hardConstraints) {
Job<BatchJobExt> template = JobGenerator.oneBatchJob();
JobDescriptor<BatchJobExt> jobDescriptor = template.getJobDescriptor();
Container container = jobDescriptor.getContainer();
return template.toBuilder()
.withJobDescriptor(jobDescriptor.toBuilder()
.withContainer(container.toBuilder()
.withContainerResources(container.getContainerResources().toBuilder().withGpu(1).build())
.withHardConstraints(hardConstraints)
.build()
)
.build()
)
.build();
}
} | 9,995 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/kubernetes | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/kubernetes/pod/KubePodUtilTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.kubernetes.pod;
import com.netflix.titus.api.jobmanager.JobAttributes;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.runtime.kubernetes.KubeConstants;
import com.netflix.titus.testkit.model.job.JobGenerator;
import org.junit.Test;
import static com.google.common.truth.Truth.assertThat;
public class KubePodUtilTest {
@Test
public void testCreatePodAnnotationsFromJobParameters() {
Job<BatchJobExt> job = JobGenerator.oneBatchJob();
job = JobFunctions.appendContainerAttribute(job, JobAttributes.JOB_CONTAINER_ATTRIBUTE_ACCOUNT_ID, "myAccount");
job = JobFunctions.appendContainerAttribute(job, JobAttributes.JOB_CONTAINER_ATTRIBUTE_SUBNETS, "subnet1,subnet2");
assertThat(KubePodUtil.createPodAnnotationsFromJobParameters(job)).containsEntry(KubeConstants.POD_LABEL_ACCOUNT_ID, "myAccount");
assertThat(KubePodUtil.createPodAnnotationsFromJobParameters(job)).containsEntry(KubeConstants.POD_LABEL_SUBNETS, "subnet1,subnet2");
}
@Test
public void testFilterPodAnnotations() {
Job<BatchJobExt> job = JobGenerator.oneBatchJob();
JobDescriptor<BatchJobExt> jobDescriptor = JobFunctions.appendJobSecurityAttributes(
job.getJobDescriptor(),
CollectionsExt.asMap(JobAttributes.JOB_SECURITY_ATTRIBUTE_METATRON_AUTH_CONTEXT, "someAuthContext")
);
assertThat(KubePodUtil.filterPodJobDescriptor(jobDescriptor).getContainer().getSecurityProfile().getAttributes().containsKey(JobAttributes.JOB_SECURITY_ATTRIBUTE_METATRON_AUTH_CONTEXT))
.isFalse();
}
@Test
public void testSanitizeVolumeName() {
String name = "Ab9bac3e:6ea1:4bc3:a803:e0070ca434c3/";
assertThat(KubePodUtil.sanitizeVolumeName(name)).matches("ab9bac3e-6ea1-4bc3-a803-e0070ca434c3--vol");
}
} | 9,996 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/kubernetes/pod | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/kubernetes/pod/v1/V1SpecPodFactoryTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.kubernetes.pod.v1;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import com.netflix.titus.api.jobmanager.TaskAttributes;
import com.netflix.titus.api.jobmanager.model.job.BasicContainer;
import com.netflix.titus.api.jobmanager.model.job.BatchJobTask;
import com.netflix.titus.api.jobmanager.model.job.Container;
import com.netflix.titus.api.jobmanager.model.job.ContainerResources;
import com.netflix.titus.api.jobmanager.model.job.Image;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.LogStorageInfo;
import com.netflix.titus.api.jobmanager.model.job.NetworkConfiguration;
import com.netflix.titus.api.jobmanager.model.job.PlatformSidecar;
import com.netflix.titus.api.jobmanager.model.job.ServiceJobTask;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.VolumeMount;
import com.netflix.titus.api.jobmanager.model.job.disruptionbudget.SelfManagedDisruptionBudgetPolicy;
import com.netflix.titus.api.jobmanager.model.job.ebs.EbsVolume;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.api.jobmanager.model.job.volume.SharedContainerVolumeSource;
import com.netflix.titus.api.jobmanager.model.job.volume.Volume;
import com.netflix.titus.api.model.ApplicationSLA;
import com.netflix.titus.api.model.EfsMount;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.master.kubernetes.pod.KubePodConfiguration;
import com.netflix.titus.master.kubernetes.pod.affinity.PodAffinityFactory;
import com.netflix.titus.master.kubernetes.pod.legacy.DefaultAggregatingContainerEnvFactory;
import com.netflix.titus.master.kubernetes.pod.legacy.TitusProvidedContainerEnvFactory;
import com.netflix.titus.master.kubernetes.pod.legacy.UserProvidedContainerEnvFactory;
import com.netflix.titus.master.kubernetes.pod.taint.TaintTolerationFactory;
import com.netflix.titus.master.kubernetes.pod.topology.TopologyFactory;
import com.netflix.titus.master.scheduler.SchedulerConfiguration;
import com.netflix.titus.master.service.management.ApplicationSlaManagementService;
import com.netflix.titus.runtime.kubernetes.KubeConstants;
import com.netflix.titus.testkit.model.job.JobGenerator;
import io.kubernetes.client.openapi.models.V1AWSElasticBlockStoreVolumeSource;
import io.kubernetes.client.openapi.models.V1Affinity;
import io.kubernetes.client.openapi.models.V1Container;
import io.kubernetes.client.openapi.models.V1FlexVolumeSource;
import io.kubernetes.client.openapi.models.V1Pod;
import io.kubernetes.client.openapi.models.V1Volume;
import io.kubernetes.client.openapi.models.V1VolumeMount;
import org.junit.Before;
import org.junit.Test;
import static com.netflix.titus.common.kube.Annotations.AnnotationKeySuffixSidecars;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class V1SpecPodFactoryTest {
String NONE_MOUNT_PROPAGATION = com.netflix.titus.grpc.protogen.VolumeMount.MountPropagation.MountPropagationNone.toString();
private final KubePodConfiguration configuration = mock(KubePodConfiguration.class);
private final SchedulerConfiguration schedulerConfiguration = mock(SchedulerConfiguration.class);
private final ApplicationSlaManagementService capacityGroupManagement = mock(ApplicationSlaManagementService.class);
private final PodAffinityFactory podAffinityFactory = mock(PodAffinityFactory.class);
private final TaintTolerationFactory taintTolerationFactory = mock(TaintTolerationFactory.class);
private final TitusRuntime titusRuntime = TitusRuntimes.internal();
private final DefaultAggregatingContainerEnvFactory defaultAggregatingContainerEnvFactory =
new DefaultAggregatingContainerEnvFactory(titusRuntime,
UserProvidedContainerEnvFactory.getInstance(),
TitusProvidedContainerEnvFactory.getInstance());
private final TopologyFactory topologyFactory = mock(TopologyFactory.class);
private final LogStorageInfo<Task> logStorageInfo = mock(LogStorageInfo.class);
private V1SpecPodFactory podFactory;
@Before
public void setUp() throws Exception {
podFactory = new V1SpecPodFactory(
configuration,
capacityGroupManagement,
podAffinityFactory,
taintTolerationFactory,
topologyFactory,
defaultAggregatingContainerEnvFactory,
logStorageInfo,
schedulerConfiguration
);
}
@Test
public void relocationLabel() {
Job<ServiceJobExt> job = JobGenerator.oneServiceJob();
Job<ServiceJobExt> selfManagedJob = job.toBuilder().withJobDescriptor(job.getJobDescriptor().but(
jd -> jd.getDisruptionBudget().toBuilder()
.withDisruptionBudgetPolicy(SelfManagedDisruptionBudgetPolicy.newBuilder().build())
)).build();
ServiceJobTask task = JobGenerator.oneServiceTask();
when(podAffinityFactory.buildV1Affinity(any(), eq(task))).thenReturn(Pair.of(new V1Affinity(), new HashMap<>()));
V1Pod pod = podFactory.buildV1Pod(job, task);
assertThat(pod.getMetadata().getLabels()).doesNotContainKey(KubeConstants.POD_LABEL_RELOCATION_BINPACK);
V1Pod selfManagedPod = podFactory.buildV1Pod(selfManagedJob, task);
assertThat(selfManagedPod.getMetadata().getLabels()).containsEntry(KubeConstants.POD_LABEL_RELOCATION_BINPACK, "SelfManaged");
}
@Test
public void testCapacityGroupAssignment() {
Job<BatchJobExt> job = JobGenerator.oneBatchJob();
BatchJobTask task = JobGenerator.oneBatchTask();
job = job.toBuilder().withJobDescriptor(job.getJobDescriptor().toBuilder().withCapacityGroup("myGroup").build()).build();
when(capacityGroupManagement.getApplicationSLA("myGroup")).thenReturn(
ApplicationSLA.newBuilder().withAppName("myGroup").build()
);
when(podAffinityFactory.buildV1Affinity(job, task)).thenReturn(Pair.of(new V1Affinity(), new HashMap<>()));
V1Pod pod = podFactory.buildV1Pod(job, task);
assertThat(pod.getMetadata().getLabels()).containsEntry(KubeConstants.LABEL_CAPACITY_GROUP, "mygroup");
}
@Test
public void basicMainContainerTranslation() {
Job<BatchJobExt> job = JobGenerator.oneBatchJob();
BatchJobTask task = JobGenerator.oneBatchTask();
job = job.toBuilder().withJobDescriptor(job.getJobDescriptor().toBuilder().build()).build();
when(podAffinityFactory.buildV1Affinity(job, task)).thenReturn(Pair.of(new V1Affinity(), new HashMap<>()));
V1Pod pod = podFactory.buildV1Pod(job, task);
V1Container mainContainer = pod.getSpec().getContainers().get(0);
String mainContainerImageTag = pod.getMetadata().getAnnotations().get("pod.titus.netflix.com/image-tag-main");
assertThat(mainContainerImageTag).isEqualTo("latest");
assertThat(mainContainer.getImage()).contains("titusops/alpine@");
}
@Test
public void multipleContainers() {
Job<BatchJobExt> job = JobGenerator.oneBatchJob();
BatchJobTask task = JobGenerator.oneBatchTask();
Image testImage = Image.newBuilder().withName("testImage").withDigest("123").build();
List<BasicContainer> extraContainers = Arrays.asList(
new BasicContainer("extraContainer1", testImage, Collections.emptyList(), Collections.emptyList(), new HashMap<>(), Collections.emptyList()),
new BasicContainer("extraContainer2", testImage, Collections.emptyList(), Collections.emptyList(), new HashMap<>(), Collections.emptyList())
);
job = job.toBuilder().withJobDescriptor(job.getJobDescriptor().toBuilder().withExtraContainers(extraContainers).build()).build();
when(podAffinityFactory.buildV1Affinity(job, task)).thenReturn(Pair.of(new V1Affinity(), new HashMap<>()));
V1Pod pod = podFactory.buildV1Pod(job, task);
List<V1Container> containers = Objects.requireNonNull(pod.getSpec()).getContainers();
// 3 containers here, 1 from the main container, 2 from the extras
assertThat(containers.size()).isEqualTo(1 + extraContainers.size());
}
@Test
public void podMainContainerHasVolumeMounts() {
Job<BatchJobExt> job = JobGenerator.oneBatchJob();
BatchJobTask task = JobGenerator.oneBatchTask();
List<VolumeMount> volumeMounts = Arrays.asList(
new VolumeMount("volume1", "", NONE_MOUNT_PROPAGATION, false, ""),
new VolumeMount("volume2", "", NONE_MOUNT_PROPAGATION, false, "")
);
Container container = job.getJobDescriptor().getContainer().toBuilder().withVolumeMounts(volumeMounts).build();
job = job.toBuilder().withJobDescriptor(
job.getJobDescriptor().toBuilder().withContainer(container).build()
).build();
when(podAffinityFactory.buildV1Affinity(job, task)).thenReturn(Pair.of(new V1Affinity(), new HashMap<>()));
V1Pod pod = podFactory.buildV1Pod(job, task);
V1Container mainContainer = pod.getSpec().getContainers().get(0);
List<V1VolumeMount> mounts = mainContainer.getVolumeMounts();
// dev-shm is third volume
assertThat(mounts.size()).isEqualTo(3);
assertThat(mounts.get(0).getName()).isEqualTo("volume1");
assertThat(mounts.get(1).getName()).isEqualTo("volume2");
}
@Test
public void podGetsSharedFlexVolumes() {
Job<BatchJobExt> job = JobGenerator.oneBatchJob();
BatchJobTask task = JobGenerator.oneBatchTask();
List<Volume> volumes = Arrays.asList(
new Volume("volume1", new SharedContainerVolumeSource("main", "/main-root")),
new Volume("volume2", new SharedContainerVolumeSource("main", "/main-root"))
);
job = job.toBuilder().withJobDescriptor(job.getJobDescriptor().toBuilder().withVolumes(volumes).build()).build();
when(podAffinityFactory.buildV1Affinity(job, task)).thenReturn(Pair.of(new V1Affinity(), new HashMap<>()));
V1Pod pod = podFactory.buildV1Pod(job, task);
List<V1Volume> podVolumes = Objects.requireNonNull(pod.getSpec()).getVolumes();
// dev-shm is third volume
assertThat(podVolumes.size()).isEqualTo(3);
V1Volume mainSharedVolume = podVolumes.get(0);
assertThat(mainSharedVolume.getName()).isEqualTo("volume1");
V1FlexVolumeSource flexVolume = mainSharedVolume.getFlexVolume();
assertThat(flexVolume.getDriver()).isEqualTo("SharedContainerVolumeSource");
Map<String, String> flexVolumeOptions = flexVolume.getOptions();
assertThat(flexVolumeOptions.get("sourceContainer")).isEqualTo("main");
assertThat(flexVolumeOptions.get("sourcePath")).isEqualTo("/main-root");
}
@Test
public void podHasSidecarAnnotations() {
Job<BatchJobExt> job = JobGenerator.oneBatchJob();
BatchJobTask task = JobGenerator.oneBatchTask();
String json_args = "{\"foo\":true,\"bar\":3.0}";
List<PlatformSidecar> platformSidecars = Arrays.asList(
new PlatformSidecar("mysidecar", "stable", json_args)
);
job = job.toBuilder().withJobDescriptor(job.getJobDescriptor().toBuilder().withPlatformSidecars(platformSidecars).build()).build();
when(podAffinityFactory.buildV1Affinity(job, task)).thenReturn(Pair.of(new V1Affinity(), new HashMap<>()));
V1Pod pod = podFactory.buildV1Pod(job, task);
Map<String, String> annotations = pod.getMetadata().getAnnotations();
String expectedSidecarAnnotation = "mysidecar" + "." + AnnotationKeySuffixSidecars;
assertThat(annotations.get(expectedSidecarAnnotation)).isEqualTo("true");
String expectedChannelAnnotation = "mysidecar" + "." + AnnotationKeySuffixSidecars + "/channel";
assertThat(annotations.get(expectedChannelAnnotation)).isEqualTo("stable");
String expectedArgsAnnotation = "mysidecar" + "." + AnnotationKeySuffixSidecars + "/arguments";
assertThat(annotations.get(expectedArgsAnnotation)).isEqualTo(json_args);
}
@Test
public void testNetworkConfigurationRespectsBeingSet() {
Job<BatchJobExt> job = JobGenerator.oneBatchJob();
BatchJobTask task = JobGenerator.oneBatchTask();
NetworkConfiguration networkConfiguration = new NetworkConfiguration(3);
job = job.toBuilder().withJobDescriptor(job.getJobDescriptor().toBuilder().withNetworkConfiguration(networkConfiguration).build()).build();
when(podAffinityFactory.buildV1Affinity(job, task)).thenReturn(Pair.of(new V1Affinity(), new HashMap<>()));
V1Pod pod = podFactory.buildV1Pod(job, task);
String networkModeAnnotationValue = pod.getMetadata().getAnnotations().get("network.netflix.com/network-mode");
assertThat(networkModeAnnotationValue).isEqualTo("Ipv6AndIpv4Fallback");
}
@Test
public void testNetworkConfigurationIsNotNullAndSetToUnknownByDefault() {
Job<BatchJobExt> job = JobGenerator.oneBatchJob();
BatchJobTask task = JobGenerator.oneBatchTask();
job = job.toBuilder().withJobDescriptor(job.getJobDescriptor().toBuilder().build()).build();
when(podAffinityFactory.buildV1Affinity(job, task)).thenReturn(Pair.of(new V1Affinity(), new HashMap<>()));
V1Pod pod = podFactory.buildV1Pod(job, task);
String networkModeAnnotationValue = pod.getMetadata().getAnnotations().get("network.netflix.com/network-mode");
assertThat(networkModeAnnotationValue).isEqualTo("UnknownNetworkMode");
}
@Test
public void testEFSMountsGetTransformedSafely() {
Job<BatchJobExt> job = JobGenerator.oneBatchJob();
BatchJobTask task = JobGenerator.oneBatchTask();
EfsMount newEfsMount = new EfsMount("1.2.3.4", "/mountpoint", EfsMount.MountPerm.RO, "/relative/");
Container newContainer = job.getJobDescriptor().getContainer();
ContainerResources newContainerResources = newContainer.getContainerResources();
Container newContainerWithEFS = newContainer.toBuilder().withContainerResources(newContainerResources.newBuilder()
.withEfsMounts(Collections.singletonList(newEfsMount))
.build()).build();
job = job.toBuilder().withJobDescriptor(job.getJobDescriptor().toBuilder()
.withContainer(newContainerWithEFS).build())
.build();
when(podAffinityFactory.buildV1Affinity(job, task)).thenReturn(Pair.of(new V1Affinity(), new HashMap<>()));
V1Pod pod = podFactory.buildV1Pod(job, task);
// Part 1: the volume section needs to be well-formed
List<V1Volume> volumes = pod.getSpec().getVolumes();
assertThat(volumes.size()).isEqualTo(2); // one for nfs, one for shm
V1Volume v1NFSVolume = volumes.get(0);
assertThat(v1NFSVolume.getName()).isEqualTo("1-2-3-4-relative--vol");
assertThat(v1NFSVolume.getNfs().getServer()).isEqualTo("1.2.3.4");
assertThat(v1NFSVolume.getNfs().getPath()).isEqualTo("/relative/");
assertThat(v1NFSVolume.getNfs().getReadOnly()).isEqualTo(false);
// Part 2: the volume mount section needs to applied to the first container in the podspec
List<V1VolumeMount> vms = pod.getSpec().getContainers().get(0).getVolumeMounts();
assertThat(vms.size()).isEqualTo(2); // one for nfs, one for shm
V1VolumeMount v1NFSvm = vms.get(0);
assertThat(v1NFSvm.getName()).isEqualTo("1-2-3-4-relative--vol");
assertThat(v1NFSvm.getMountPath()).isEqualTo("/mountpoint");
assertThat(v1NFSvm.getReadOnly()).isEqualTo(true);
}
@Test
public void testEFSMountsHandlesDuplicateVolumes() {
Job<BatchJobExt> job = JobGenerator.oneBatchJob();
BatchJobTask task = JobGenerator.oneBatchTask();
EfsMount newEfsMount = new EfsMount("1.2.3.4", "/mountpoint", EfsMount.MountPerm.RO, "/relative");
EfsMount newEfsMount2 = new EfsMount("1.2.3.4", "/mountpoint2", EfsMount.MountPerm.RO, "/relative");
EfsMount newEfsMount3 = new EfsMount("1.2.3.4", "/mountpoint3", EfsMount.MountPerm.RW, "/relative");
Container newContainer = job.getJobDescriptor().getContainer();
ContainerResources newContainerResources = newContainer.getContainerResources();
Container newContainerWithEFS = newContainer.toBuilder().withContainerResources(newContainerResources.newBuilder()
.withEfsMounts(Arrays.asList(newEfsMount, newEfsMount2, newEfsMount3))
.build()).build();
job = job.toBuilder().withJobDescriptor(job.getJobDescriptor().toBuilder()
.withContainer(newContainerWithEFS).build())
.build();
when(podAffinityFactory.buildV1Affinity(job, task)).thenReturn(Pair.of(new V1Affinity(), new HashMap<>()));
V1Pod pod = podFactory.buildV1Pod(job, task);
// Part 1: There should only be *one* EFS volume to share
List<V1Volume> volumes = pod.getSpec().getVolumes();
assertThat(volumes.size()).isEqualTo(2); // one for nfs, one for shm
V1Volume v1NFSVolume = volumes.get(0);
assertThat(v1NFSVolume.getName()).isEqualTo("1-2-3-4-relative-vol");
assertThat(v1NFSVolume.getNfs().getServer()).isEqualTo("1.2.3.4");
assertThat(v1NFSVolume.getNfs().getPath()).isEqualTo("/relative");
// All NFS volumes that are generated like this should be RW, and
// delegating the actual RO/RW state to the volume *mount*.
assertThat(v1NFSVolume.getNfs().getReadOnly()).isEqualTo(false);
// Part 2: there should be *3* volume mounts, all sharing the volume
List<V1VolumeMount> vms = pod.getSpec().getContainers().get(0).getVolumeMounts();
assertThat(vms.size()).isEqualTo(4); // 3 for nfs, one for shm
V1VolumeMount v1NFSvm1 = vms.get(0);
assertThat(v1NFSvm1.getName()).isEqualTo("1-2-3-4-relative-vol");
assertThat(v1NFSvm1.getMountPath()).isEqualTo("/mountpoint");
assertThat(v1NFSvm1.getReadOnly()).isTrue();
V1VolumeMount v1NFSvm2 = vms.get(1);
assertThat(v1NFSvm2.getName()).isEqualTo("1-2-3-4-relative-vol");
assertThat(v1NFSvm2.getMountPath()).isEqualTo("/mountpoint2");
assertThat(v1NFSvm2.getReadOnly()).isTrue();
V1VolumeMount v1NFSvm3 = vms.get(2);
assertThat(v1NFSvm3.getName()).isEqualTo("1-2-3-4-relative-vol");
assertThat(v1NFSvm3.getMountPath()).isEqualTo("/mountpoint3");
assertThat(v1NFSvm3.getReadOnly()).isFalse();
}
@Test
public void testEbsVolumeInfo() {
String volName1 = "vol-1";
String volName2 = "vol-2";
String fsType = "xfs";
String mountPath = "/mnt";
EbsVolume.MountPerm mountPerm = EbsVolume.MountPerm.RW;
EbsVolume vol1 = EbsVolume.newBuilder()
.withVolumeId(volName1)
.withMountPath(mountPath)
.withMountPermissions(mountPerm)
.withFsType(fsType)
.withVolumeAvailabilityZone("us-east-1c")
.withVolumeCapacityGB(10)
.build();
EbsVolume vol2 = EbsVolume.newBuilder()
.withVolumeId(volName2)
.withMountPath(mountPath)
.withMountPermissions(mountPerm)
.withFsType(fsType)
.withVolumeAvailabilityZone("us-east-1d")
.withVolumeCapacityGB(20)
.build();
Job<BatchJobExt> job = JobGenerator.oneBatchJob();
job = Job.<BatchJobExt>newBuilder()
.withJobDescriptor(job.getJobDescriptor().toBuilder()
.withContainer(job.getJobDescriptor().getContainer().toBuilder()
.withContainerResources(job.getJobDescriptor().getContainer().getContainerResources().toBuilder()
.withEbsVolumes(Arrays.asList(vol1, vol2))
.build())
.build())
.build())
.build();
Task task = JobGenerator.batchTasks(job).getValue();
task = task.toBuilder()
.addToTaskContext(TaskAttributes.TASK_ATTRIBUTES_EBS_VOLUME_ID, volName2)
.build();
when(podAffinityFactory.buildV1Affinity(job, task)).thenReturn(Pair.of(new V1Affinity(), new HashMap<>()));
V1Pod v1Pod = podFactory.buildV1Pod(job, task);
V1Volume v1Volume = v1Pod.getSpec().getVolumes().get(0);
assertThat(v1Volume.getName()).isEqualTo(volName2);
V1AWSElasticBlockStoreVolumeSource ebsVolumeSource = v1Volume.getAwsElasticBlockStore();
assertThat(ebsVolumeSource.getFsType()).isEqualTo(fsType);
V1VolumeMount v1VolumeMount = v1Pod.getSpec().getContainers().get(0).getVolumeMounts().get(0);
assertThat(v1VolumeMount.getName()).isEqualTo(volName2);
assertThat(v1VolumeMount.getMountPath()).isEqualTo(mountPath);
assertThat(v1VolumeMount.getReadOnly()).isFalse();
}
} | 9,997 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/kubernetes/pod | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/kubernetes/pod/topology/DefaultTopologyFactoryTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.kubernetes.pod.topology;
import java.util.Collections;
import java.util.List;
import com.netflix.titus.api.FeatureActivationConfiguration;
import com.netflix.titus.api.jobmanager.JobAttributes;
import com.netflix.titus.api.jobmanager.JobConstraints;
import com.netflix.titus.api.jobmanager.model.job.Capacity;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.disruptionbudget.DisruptionBudget;
import com.netflix.titus.api.jobmanager.model.job.disruptionbudget.SelfManagedDisruptionBudgetPolicy;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.master.kubernetes.pod.KubePodConfiguration;
import com.netflix.titus.runtime.kubernetes.KubeConstants;
import com.netflix.titus.testkit.model.job.JobDescriptorGenerator;
import com.netflix.titus.testkit.model.job.JobGenerator;
import io.kubernetes.client.openapi.models.V1TopologySpreadConstraint;
import org.junit.Before;
import org.junit.Test;
import static com.netflix.titus.testkit.model.eviction.DisruptionBudgetGenerator.budget;
import static com.netflix.titus.testkit.model.eviction.DisruptionBudgetGenerator.officeHourTimeWindow;
import static com.netflix.titus.testkit.model.eviction.DisruptionBudgetGenerator.percentageOfHealthyPolicy;
import static com.netflix.titus.testkit.model.eviction.DisruptionBudgetGenerator.unlimitedRate;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class DefaultTopologyFactoryTest {
private static final DisruptionBudget PERCENTAGE_OF_HEALTH_POLICY = budget(
percentageOfHealthyPolicy(95), unlimitedRate(), Collections.singletonList(officeHourTimeWindow())
);
private final KubePodConfiguration configuration = mock(KubePodConfiguration.class);
private final FeatureActivationConfiguration features = mock(FeatureActivationConfiguration.class);
private TopologyFactory topologyFactory;
@Before
public void setUp() throws Exception {
topologyFactory = new DefaultTopologyFactory(configuration, features);
when(configuration.getDisabledJobSpreadingPattern()).thenReturn("NONE");
when(configuration.getJobSpreadingSkewAlpha()).thenReturn(3.0);
when(configuration.getJobSpreadingMaxSkew()).thenReturn(48);
when(features.isRelocationBinpackingEnabled()).thenReturn(true);
}
@Test
public void testHardConstraintNameIsCaseInsensitive() {
testConstraintNameIsCaseInsensitive(JobFunctions.appendHardConstraint(JobGenerator.oneBatchJob(), "ZoneBalance", "true"));
}
@Test
public void testSoftConstraintNameIsCaseInsensitive() {
testConstraintNameIsCaseInsensitive(JobFunctions.appendSoftConstraint(JobGenerator.oneBatchJob(), "ZoneBalance", "true"));
}
@Test
public void testBatchJobSpreading() {
// By default no job spreading
Job<BatchJobExt> job = JobGenerator.oneBatchJob();
List<V1TopologySpreadConstraint> constraints = topologyFactory.buildTopologySpreadConstraints(job);
assertThat(constraints).isEmpty();
// Enable via job attribute
job = JobFunctions.appendJobDescriptorAttribute(job, JobAttributes.JOB_ATTRIBUTES_SPREADING_ENABLED, "true");
job = JobFunctions.appendJobDescriptorAttribute(job, JobAttributes.JOB_ATTRIBUTES_SPREADING_MAX_SKEW, "10");
constraints = topologyFactory.buildTopologySpreadConstraints(job);
assertThat(constraints).hasSize(1);
assertThat(constraints.get(0).getMaxSkew()).isEqualTo(10);
// And now add zone constraint
job = JobFunctions.appendSoftConstraint(job, JobConstraints.ZONE_BALANCE, "true");
constraints = topologyFactory.buildTopologySpreadConstraints(job);
assertThat(constraints).hasSize(2);
}
@Test
public void testServiceJobSpreadingWithAvailabilityPercentageDisruptionBudget() {
Job<ServiceJobExt> job = JobGenerator.serviceJobs(JobDescriptorGenerator.oneTaskServiceJobDescriptor()).getValue();
job = JobFunctions.changeServiceJobCapacity(job, Capacity.newBuilder().withDesired(100).withMax(100).build());
job = JobFunctions.changeDisruptionBudget(job, PERCENTAGE_OF_HEALTH_POLICY);
List<V1TopologySpreadConstraint> constraints = topologyFactory.buildTopologySpreadConstraints(job);
assertThat(constraints).hasSize(1);
assertThat(constraints.get(0).getMaxSkew()).isEqualTo(33);
// And now add zone constraint
job = JobFunctions.appendSoftConstraint(job, JobConstraints.ZONE_BALANCE, "true");
job = JobFunctions.appendJobDescriptorAttribute(job, JobAttributes.JOB_ATTRIBUTES_SPREADING_MAX_SKEW, "10");
constraints = topologyFactory.buildTopologySpreadConstraints(job);
assertThat(constraints).hasSize(2);
assertThat(constraints.get(1).getMaxSkew()).isEqualTo(10);
// Disable via job attribute
job = JobFunctions.appendJobDescriptorAttribute(job, JobAttributes.JOB_ATTRIBUTES_SPREADING_ENABLED, "false");
constraints = topologyFactory.buildTopologySpreadConstraints(job);
assertThat(constraints).hasSize(1);
}
@Test
public void testJobSpreadingDisabledConfiguration() {
Job<ServiceJobExt> job = JobGenerator.serviceJobs(JobDescriptorGenerator.oneTaskServiceJobDescriptor()).getValue();
job = JobFunctions.changeServiceJobCapacity(job, Capacity.newBuilder().withDesired(100).withMax(100).build());
job = JobFunctions.changeDisruptionBudget(job, PERCENTAGE_OF_HEALTH_POLICY);
assertThat(topologyFactory.buildTopologySpreadConstraints(job)).hasSize(1);
when(configuration.getDisabledJobSpreadingPattern()).thenReturn(".*");
assertThat(topologyFactory.buildTopologySpreadConstraints(job)).isEmpty();
}
@Test
public void jobSpreadingDisabledWhenBinpackingForRelocation() {
JobDescriptor<ServiceJobExt> jobDescriptor = JobDescriptorGenerator.oneTaskServiceJobDescriptor().but(
jd -> jd.getDisruptionBudget().toBuilder()
.withDisruptionBudgetPolicy(SelfManagedDisruptionBudgetPolicy.newBuilder().build())
);
Job<ServiceJobExt> job = JobGenerator.serviceJobs(jobDescriptor).getValue();
assertThat(topologyFactory.buildTopologySpreadConstraints(job)).isEmpty();
}
private void testConstraintNameIsCaseInsensitive(Job<BatchJobExt> job) {
List<V1TopologySpreadConstraint> constraints = topologyFactory.buildTopologySpreadConstraints(job);
assertThat(constraints).hasSize(1);
assertThat(constraints.get(0).getTopologyKey()).isEqualTo(KubeConstants.NODE_LABEL_ZONE);
}
} | 9,998 |
0 | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/kubernetes/pod | Create_ds/titus-control-plane/titus-server-master/src/test/java/com/netflix/titus/master/kubernetes/pod/taint/DefaultTaintTolerationFactoryTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.master.kubernetes.pod.taint;
import java.util.List;
import com.netflix.titus.api.jobmanager.JobConstraints;
import com.netflix.titus.api.jobmanager.model.job.Container;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.master.kubernetes.pod.KubePodConfiguration;
import com.netflix.titus.master.service.management.ApplicationSlaManagementService;
import com.netflix.titus.runtime.kubernetes.KubeConstants;
import com.netflix.titus.testkit.model.job.JobGenerator;
import io.kubernetes.client.openapi.models.V1Toleration;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
public class DefaultTaintTolerationFactoryTest {
private final KubePodConfiguration configuration = mock(KubePodConfiguration.class);
private final ApplicationSlaManagementService capacityManagement = mock(ApplicationSlaManagementService.class);
private final DefaultTaintTolerationFactory factory = new DefaultTaintTolerationFactory(
configuration,
capacityManagement
);
@Test
public void decommissioningNodesAreTolerated() {
List<V1Toleration> tolerations = factory.buildV1Toleration(JobGenerator.oneBatchJob(), JobGenerator.oneBatchTask());
assertThat(tolerations).contains(Tolerations.TOLERATION_DECOMMISSIONING);
List<V1Toleration> withConstraints = factory.buildV1Toleration(newJobWithConstraint(JobConstraints.ACTIVE_HOST, "true"), JobGenerator.oneBatchTask());
assertThat(withConstraints).doesNotContain(Tolerations.TOLERATION_DECOMMISSIONING);
}
@Test
public void testGpuInstanceAssignment() {
List<V1Toleration> tolerations = factory.buildV1Toleration(newGpuJob(), JobGenerator.oneBatchTask());
V1Toleration gpuToleration = tolerations.stream().filter(t -> t.getKey().equals(KubeConstants.TAINT_GPU_INSTANCE)).findFirst().orElse(null);
assertThat(gpuToleration).isEqualTo(Tolerations.TOLERATION_GPU_INSTANCE);
}
@Test
public void testKubeBackendToleration() {
List<V1Toleration> tolerations = factory.buildV1Toleration(newJobWithConstraint(JobConstraints.KUBE_BACKEND, "kublet"), JobGenerator.oneBatchTask());
V1Toleration gpuToleration = tolerations.stream().filter(t -> t.getKey().equals(KubeConstants.TAINT_KUBE_BACKEND)).findFirst().orElse(null);
assertThat(gpuToleration.getKey()).isEqualTo(KubeConstants.TAINT_KUBE_BACKEND);
assertThat(gpuToleration.getValue()).isEqualTo("kublet");
}
private Job newJobWithConstraint(String name, String value) {
return JobFunctions.appendHardConstraint(JobGenerator.oneBatchJob(), name, value);
}
private Job<BatchJobExt> newGpuJob() {
Job<BatchJobExt> template = JobGenerator.oneBatchJob();
JobDescriptor<BatchJobExt> jobDescriptor = template.getJobDescriptor();
Container container = jobDescriptor.getContainer();
return template.toBuilder()
.withJobDescriptor(jobDescriptor.toBuilder()
.withContainer(container.toBuilder()
.withContainerResources(container.getContainerResources().toBuilder().withGpu(1).build())
.build()
)
.build()
)
.build();
}
} | 9,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.