index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/metadata/CompositeCallMetadataResolver.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.endpoint.metadata;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import java.util.List;
import java.util.Optional;
public class CompositeCallMetadataResolver implements CallMetadataResolver {
private final List<CallMetadataResolver> resolvers;
public CompositeCallMetadataResolver(List<CallMetadataResolver> resolvers) {
this.resolvers = resolvers;
}
@Override
public Optional<CallMetadata> resolve() {
for (CallMetadataResolver resolver : resolvers) {
Optional<CallMetadata> next = resolver.resolve();
if (next.isPresent()) {
return next;
}
}
return Optional.empty();
}
}
| 1,300 |
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/metadata/SimpleGrpcCallMetadataResolver.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.endpoint.metadata;
import java.util.Collections;
import java.util.Optional;
import javax.inject.Singleton;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import com.netflix.titus.api.model.callmetadata.Caller;
import com.netflix.titus.api.model.callmetadata.CallerType;
import com.netflix.titus.common.util.CollectionsExt;
import io.grpc.Context;
import static com.netflix.titus.common.util.Evaluators.getOrDefault;
import static java.util.Arrays.asList;
@Singleton
public class SimpleGrpcCallMetadataResolver implements CallMetadataResolver {
@Override
public Optional<CallMetadata> resolve() {
if (Context.current() == Context.ROOT) {
// Not in GRPC server call.
return Optional.empty();
}
CallMetadata forwardedCallMetadata = V3HeaderInterceptor.CALL_METADATA_CONTEXT_KEY.get();
Caller directCaller = resolveDirectCallerInternal();
// If we have CallMetadata instance, we can safely ignore other headers, except the direct caller.
if (forwardedCallMetadata != null) {
return Optional.of(forwardedCallMetadata.toBuilder()
.withCallPath(CollectionsExt.copyAndAdd(forwardedCallMetadata.getCallPath(), directCaller.getId()))
.withCallers(CollectionsExt.copyAndAdd(forwardedCallMetadata.getCallers(), directCaller))
.build());
}
// No CellMetadata in header, so we must built it here.
String callerId = V3HeaderInterceptor.CALLER_ID_CONTEXT_KEY.get();
String callReason = getOrDefault(V3HeaderInterceptor.CALL_REASON_CONTEXT_KEY.get(), "reason not given");
CallMetadata.Builder callMetadataBuilder = CallMetadata.newBuilder().withCallReason(callReason);
if (callerId == null) {
callMetadataBuilder
.withCallerId(directCaller.getId())
.withCallPath(Collections.singletonList(directCaller.getId()))
.withCallers(Collections.singletonList(directCaller));
} else {
Caller originalCaller = Caller.newBuilder()
.withId(callerId)
.withCallerType(CallerType.parseCallerType(callerId, V3HeaderInterceptor.CALLER_TYPE_CONTEXT_KEY.get()))
.build();
callMetadataBuilder
.withCallerId(callerId)
.withCallPath(asList(callerId, directCaller.getId()))
.withCallers(asList(originalCaller, directCaller));
}
return Optional.of(callMetadataBuilder.build());
}
protected Optional<Caller> resolveDirectCaller() {
return Optional.empty();
}
private Caller resolveDirectCallerInternal() {
return resolveDirectCaller().orElseGet(() ->
{
String directCallerId = getOrDefault(V3HeaderInterceptor.DIRECT_CALLER_ID_CONTEXT_KEY.get(), CommonCallMetadataUtils.UNKNOWN_CALLER_ID);
CallerType directCallerType = CallerType.parseCallerType(directCallerId, V3HeaderInterceptor.DIRECT_CALLER_TYPE_CONTEXT_KEY.get());
return Caller.newBuilder()
.withId(directCallerId)
.withCallerType(directCallerType)
.withContext(V3HeaderInterceptor.CALLER_CONTEXT_CONTEXT_KEY.get())
.build();
}
);
}
}
| 1,301 |
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/metadata/SimpleHttpCallMetadataResolver.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.endpoint.metadata;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import javax.inject.Inject;
import javax.inject.Singleton;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import com.netflix.titus.api.model.callmetadata.Caller;
import com.netflix.titus.api.model.callmetadata.CallerType;
import com.netflix.titus.common.util.StringExt;
import static com.netflix.titus.common.util.Evaluators.getOrDefault;
import static java.util.Arrays.asList;
@Singleton
public class SimpleHttpCallMetadataResolver implements CallMetadataResolver {
private final ThreadLocal<CallMetadata> callMetadataThreadLocal = new ThreadLocal<>();
@Override
public Optional<CallMetadata> resolve() {
return Optional.ofNullable(callMetadataThreadLocal.get());
}
protected Optional<Caller> resolveDirectCaller(HttpServletRequest httpServletRequest) {
return Optional.empty();
}
protected Map<String, String> getContextFromServletRequest(HttpServletRequest httpServletRequest) {
Map<String, String> context = new HashMap<>();
context.put(CallMetadataHeaders.DIRECT_CALLER_CONTEXT_SERVICE_NAME, httpServletRequest.getRequestURI());
context.put(CallMetadataHeaders.DIRECT_CALLER_CONTEXT_SERVICE_METHOD, httpServletRequest.getMethod());
context.put(CallMetadataHeaders.DIRECT_CALLER_CONTEXT_TRANSPORT_TYPE, "HTTP");
context.put(CallMetadataHeaders.DIRECT_CALLER_CONTEXT_TRANSPORT_SECURE, "" + httpServletRequest.isSecure());
return context;
}
private void interceptBefore(HttpServletRequest httpServletRequest) {
Caller directCaller = resolveDirectCaller(httpServletRequest).orElseGet(() -> resolveDirectCallerFromServletRequest(httpServletRequest));
String callReason = getOrDefault(httpServletRequest.getHeader(CallMetadataHeaders.CALL_REASON_HEADER), "reason not given");
String originalCallerId = httpServletRequest.getHeader(CallMetadataHeaders.CALLER_ID_HEADER);
CallMetadata.Builder callMetadataBuilder = CallMetadata.newBuilder().withCallReason(callReason);
if (originalCallerId == null) {
callMetadataBuilder
.withCallerId(directCaller.getId())
.withCallPath(Collections.singletonList(directCaller.getId()))
.withCallers(Collections.singletonList(directCaller));
} else {
CallerType originalCallerType = CallerType.parseCallerType(originalCallerId, httpServletRequest.getHeader(CallMetadataHeaders.CALLER_TYPE_HEADER));
Caller originalCaller = Caller.newBuilder()
.withId(originalCallerId)
.withCallerType(originalCallerType)
.build();
callMetadataBuilder
.withCallerId(originalCallerId)
.withCallPath(asList(originalCallerId, directCaller.getId()))
.withCallers(asList(originalCaller, directCaller));
}
callMetadataThreadLocal.set(callMetadataBuilder.build());
}
private Caller resolveDirectCallerFromServletRequest(HttpServletRequest httpServletRequest) {
String directCallerId = httpServletRequest.getHeader(CallMetadataHeaders.DIRECT_CALLER_ID_HEADER);
Caller.Builder callerBuilder = Caller.newBuilder();
if (StringExt.isEmpty(directCallerId)) {
String httpClientId = httpServletRequest.getRemoteUser();
if (httpClientId == null) {
httpClientId = httpServletRequest.getRemoteHost();
}
if (httpClientId == null) {
httpClientId = CommonCallMetadataUtils.UNKNOWN_CALLER_ID;
}
callerBuilder
.withId(httpClientId)
.withCallerType(CallerType.Unknown);
} else {
callerBuilder
.withId(directCallerId)
.withCallerType(CallerType.parseCallerType(directCallerId, httpServletRequest.getHeader(CallMetadataHeaders.DIRECT_CALLER_TYPE_HEADER)));
}
return callerBuilder
.withContext(getContextFromServletRequest(httpServletRequest))
.build();
}
private void interceptAfter() {
callMetadataThreadLocal.set(null);
}
@Singleton
public static class CallMetadataInterceptorFilter implements Filter {
private final SimpleHttpCallMetadataResolver resolver;
@Inject
public CallMetadataInterceptorFilter(SimpleHttpCallMetadataResolver resolver) {
this.resolver = resolver;
}
@Override
public void init(FilterConfig filterConfig) {
}
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException {
try {
resolver.interceptBefore((HttpServletRequest) request);
chain.doFilter(request, response);
} finally {
resolver.interceptAfter();
}
}
@Override
public void destroy() {
}
}
}
| 1,302 |
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/metadata/CallMetadataResolver.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.endpoint.metadata;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import java.util.Optional;
/**
* Additional information associated with a client's request.
*/
public interface CallMetadataResolver {
Optional<CallMetadata> resolve();
}
| 1,303 |
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/metadata/SimpleCallMetadataResolverProvider.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.endpoint.metadata;
import javax.inject.Provider;
import javax.inject.Singleton;
import static java.util.Arrays.asList;
@Singleton
public class SimpleCallMetadataResolverProvider implements Provider<CallMetadataResolver> {
private final CompositeCallMetadataResolver resolver;
public SimpleCallMetadataResolverProvider() {
this.resolver = new CompositeCallMetadataResolver(
asList(new SimpleGrpcCallMetadataResolver(), new SimpleHttpCallMetadataResolver())
);
}
@Override
public CallMetadataResolver get() {
return resolver;
}
}
| 1,304 |
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/metadata/CommonCallMetadataUtils.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.endpoint.metadata;
import java.util.Optional;
import java.util.function.BiFunction;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import io.grpc.stub.AbstractStub;
public class CommonCallMetadataUtils {
public static final String UNKNOWN_CALLER_ID = "unknownDirectCaller";
public static <STUB extends AbstractStub<STUB>> BiFunction<STUB, Optional<CallMetadata>, STUB> newGrpcStubDecorator(CallMetadataResolver callMetadataResolver) {
return (grpcStub, callMetadataOpt) -> callMetadataOpt
.map(callMetadata -> V3HeaderInterceptor.attachCallMetadata(grpcStub, callMetadata))
.orElseGet(() ->
callMetadataResolver.resolve()
.map(callMetadata -> V3HeaderInterceptor.attachCallMetadata(grpcStub, callMetadata))
.orElse(grpcStub)
);
}
}
| 1,305 |
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/metadata/CallMetadataResolveComponent.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.endpoint.metadata;
import javax.inject.Named;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Primary;
import org.springframework.stereotype.Component;
import static java.util.Arrays.asList;
@Component
public class CallMetadataResolveComponent {
public static final String HTTP_RESOLVER = "http";
public static final String GRPC_RESOLVER = "grpc";
@Bean(name = HTTP_RESOLVER)
public SimpleHttpCallMetadataResolver getSimpleHttpCallMetadataResolver() {
return new SimpleHttpCallMetadataResolver();
}
@Bean(name = GRPC_RESOLVER)
public SimpleGrpcCallMetadataResolver getSimpleGrpcCallMetadataResolver() {
return new SimpleGrpcCallMetadataResolver();
}
@Bean
@Primary
public CallMetadataResolver getCallMetadataResolver(@Named(HTTP_RESOLVER) CallMetadataResolver httpCallMetadataResolver,
@Named(GRPC_RESOLVER) CallMetadataResolver grpcCallMetadataResolver) {
return new CompositeCallMetadataResolver(
asList(grpcCallMetadataResolver, httpCallMetadataResolver)
);
}
}
| 1,306 |
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/metadata | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/metadata/spring/SpringCallMetadataWebConfigurer.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.endpoint.metadata.spring;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.servlet.config.annotation.InterceptorRegistry;
import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
@Configuration
public class SpringCallMetadataWebConfigurer implements WebMvcConfigurer {
@Override
public void addInterceptors(InterceptorRegistry registry) {
registry.addInterceptor(new SpringCallMetadataInterceptor());
}
}
| 1,307 |
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/metadata | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/metadata/spring/SpringCallMetadataInterceptor.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.endpoint.metadata.spring;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import com.netflix.titus.api.model.callmetadata.Caller;
import com.netflix.titus.api.model.callmetadata.CallerType;
import com.netflix.titus.runtime.endpoint.metadata.CallMetadataHeaders;
import io.swagger.annotations.ApiModelProperty;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContextHolder;
import org.springframework.web.servlet.handler.HandlerInterceptorAdapter;
import static java.util.Arrays.asList;
/**
* Spring interceptor to decorate a request {@link Authentication} object with {@link CallMetadata}.
*/
public class SpringCallMetadataInterceptor extends HandlerInterceptorAdapter {
@VisibleForTesting
@ApiModelProperty(required = false, hidden = true)
static final String DEBUG_QUERY_PARAM = "debug";
@Override
public boolean preHandle(HttpServletRequest httpServletRequest, HttpServletResponse httpServletResponse, Object handler) throws Exception {
Authentication delegate = SecurityContextHolder.getContext().getAuthentication();
if (delegate == null || delegate instanceof CallMetadataAuthentication) {
return super.preHandle(httpServletRequest, httpServletResponse, handler);
}
String callReason = httpServletRequest.getHeader(CallMetadataHeaders.CALL_REASON_HEADER);
String debugQueryParameter = httpServletRequest.getParameter(DEBUG_QUERY_PARAM);
boolean debug = debugQueryParameter == null
? Boolean.parseBoolean(httpServletRequest.getHeader(CallMetadataHeaders.DEBUG_HEADER))
: Boolean.parseBoolean(debugQueryParameter);
String originalCallerId = httpServletRequest.getHeader(CallMetadataHeaders.CALLER_ID_HEADER);
Caller directCaller = getDirectCaller(httpServletRequest, delegate);
CallMetadata.Builder callMetadataBuilder = CallMetadata.newBuilder()
.withCallReason(callReason)
.withDebug(debug);
if (originalCallerId == null) {
callMetadataBuilder.withCallers(Collections.singletonList(directCaller));
} else {
CallerType originalCallerType = CallerType.parseCallerType(
originalCallerId,
httpServletRequest.getHeader(CallMetadataHeaders.CALLER_TYPE_HEADER)
);
Caller originalCaller = Caller.newBuilder()
.withId(originalCallerId)
.withCallerType(originalCallerType)
.build();
callMetadataBuilder.withCallers(asList(originalCaller, directCaller));
}
CallMetadataAuthentication authentication = new CallMetadataAuthentication(callMetadataBuilder.build(), delegate);
SecurityContextHolder.getContext().setAuthentication(authentication);
return super.preHandle(httpServletRequest, httpServletResponse, handler);
}
protected Caller getDirectCaller(HttpServletRequest httpServletRequest, Authentication delegate) {
// If there is an original caller, we know the direct caller must be an application.
CallerType callerType = httpServletRequest.getHeader(CallMetadataHeaders.CALLER_ID_HEADER) == null
? CallerType.Unknown
: CallerType.Application;
return Caller.newBuilder()
.withId(delegate.getName())
.withCallerType(callerType)
.withContext(getContextFromServletRequest(httpServletRequest))
.build();
}
protected Map<String, String> getContextFromServletRequest(HttpServletRequest httpServletRequest) {
Map<String, String> context = new HashMap<>();
context.put(CallMetadataHeaders.DIRECT_CALLER_CONTEXT_SERVICE_NAME, httpServletRequest.getRequestURI());
context.put(CallMetadataHeaders.DIRECT_CALLER_CONTEXT_SERVICE_METHOD, httpServletRequest.getMethod());
context.put(CallMetadataHeaders.DIRECT_CALLER_CONTEXT_TRANSPORT_TYPE, "HTTP");
context.put(CallMetadataHeaders.DIRECT_CALLER_CONTEXT_TRANSPORT_SECURE, "" + httpServletRequest.isSecure());
return context;
}
}
| 1,308 |
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/metadata | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/metadata/spring/CallMetadataAuthentication.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.endpoint.metadata.spring;
import java.util.Collection;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import io.swagger.annotations.ApiModelProperty;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.GrantedAuthority;
/**
* A decorator for the {@link Authentication} object, which adds resolved request {@link CallMetadata}.
*/
public class CallMetadataAuthentication implements Authentication {
@ApiModelProperty(hidden = true)
private final Authentication delegate;
@ApiModelProperty(hidden = true)
private final CallMetadata callMetadata;
public CallMetadataAuthentication(CallMetadata callMetadata, Authentication delegate) {
this.delegate = delegate;
this.callMetadata = callMetadata;
}
@ApiModelProperty(hidden = true)
public CallMetadata getCallMetadata() {
return callMetadata;
}
@Override
@ApiModelProperty(hidden = true)
public String getName() {
return delegate.getName();
}
@Override
@ApiModelProperty(hidden = true)
public Collection<? extends GrantedAuthority> getAuthorities() {
return delegate.getAuthorities();
}
@Override
@ApiModelProperty(hidden = true)
public Object getCredentials() {
return delegate.getCredentials();
}
@Override
@ApiModelProperty(hidden = true)
public Object getDetails() {
return delegate.getDetails();
}
@Override
@ApiModelProperty(hidden = true)
public Object getPrincipal() {
return delegate.getPrincipal();
}
@Override
@ApiModelProperty(hidden = true)
public boolean isAuthenticated() {
return delegate.isAuthenticated();
}
@Override
public void setAuthenticated(boolean isAuthenticated) throws IllegalArgumentException {
delegate.setAuthenticated(isAuthenticated);
}
}
| 1,309 |
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/rest/FitSpringResource.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.endpoint.rest;
import java.util.ArrayList;
import java.util.List;
import java.util.function.Function;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.ws.rs.core.Response;
import com.netflix.titus.common.framework.fit.FitAction;
import com.netflix.titus.common.framework.fit.FitComponent;
import com.netflix.titus.common.framework.fit.FitFramework;
import com.netflix.titus.common.framework.fit.FitInjection;
import com.netflix.titus.common.framework.fit.FitUtil;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.runtime.Fit;
import com.netflix.titus.runtime.endpoint.fit.ProtobufFitConverters;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
@RestController
@RequestMapping(path = "/api/diagnostic/fit")
public class FitSpringResource {
private final FitFramework fitFramework;
@Inject
public FitSpringResource(TitusRuntime titusRuntime) {
this.fitFramework = titusRuntime.getFitFramework();
}
@RequestMapping(method = RequestMethod.GET, path = "/components", produces = "application/json")
public Fit.FitComponent getFitComponents() {
return ProtobufFitConverters.toGrpcFitComponent(fitFramework.getRootComponent());
}
@RequestMapping(method = RequestMethod.GET, path = "/actionDescriptors", produces = "application/json")
public Fit.FitActionDescriptors getFitActionDescriptors() {
List<Fit.FitActionDescriptor> descriptors = fitFramework.getFitRegistry().getFitActionDescriptors().stream()
.map(ProtobufFitConverters::toGrpcFitActionDescriptor)
.collect(Collectors.toList());
return Fit.FitActionDescriptors.newBuilder().addAllDescriptors(descriptors).build();
}
@RequestMapping(method = RequestMethod.GET, path = "/actions", produces = "application/json")
public Fit.FitActions getActions() {
List<Fit.FitAction> actions = findAllActions(fitFramework.getRootComponent()).stream().map(ProtobufFitConverters::toGrpcFitAction).collect(Collectors.toList());
return Fit.FitActions.newBuilder().addAllActions(actions).build();
}
@RequestMapping(method = RequestMethod.POST, path = "/actions", consumes = "application/json")
public Response addAction(@RequestBody Fit.AddAction request) {
FitComponent fitComponent = FitUtil.getFitComponentOrFail(fitFramework, request.getComponentId());
FitInjection fitInjection = FitUtil.getFitInjectionOrFail(request.getInjectionId(), fitComponent);
Function<FitInjection, FitAction> fitActionFactory = fitFramework.getFitRegistry().newFitActionFactory(
request.getActionKind(), request.getActionId(), request.getPropertiesMap()
);
fitInjection.addAction(fitActionFactory.apply(fitInjection));
return Response.noContent().build();
}
@RequestMapping(method = RequestMethod.DELETE, path = "/actions/{actionId}")
public Response deleteAction(@PathVariable("actionId") String actionId,
@RequestParam("componentId") String componentId,
@RequestParam("injectionId") String injectionId) {
FitInjection fitInjection = FitUtil.getFitInjectionOrFail(injectionId, FitUtil.getFitComponentOrFail(fitFramework, componentId));
fitInjection.removeAction(FitUtil.getFitActionOrFail(actionId, fitInjection).getId());
return Response.noContent().build();
}
private List<FitAction> findAllActions(FitComponent fitComponent) {
List<FitAction> result = new ArrayList<>();
fitComponent.getInjections().forEach(i -> result.addAll(i.getActions()));
fitComponent.getChildren().forEach(c -> result.addAll(findAllActions(c)));
return result;
}
}
| 1,310 |
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/rest/ErrorResponses.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.endpoint.rest;
import java.io.IOException;
import java.io.PrintWriter;
import java.net.URL;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.TreeMap;
import java.util.stream.Collectors;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import com.fasterxml.jackson.core.JsonLocation;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.exc.InvalidFormatException;
import com.fasterxml.jackson.databind.exc.PropertyBindingException;
import com.netflix.titus.common.util.Evaluators;
import com.netflix.titus.common.util.NetworkExt;
import com.netflix.titus.common.util.StringExt;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.web.context.request.WebRequest;
/**
* Collection of helper functions to build {@link ErrorResponse} instances.
*/
public final class ErrorResponses {
private static final Logger logger = LoggerFactory.getLogger(ErrorResponses.class);
private static final ObjectMapper errorMapper = new ObjectMapper();
private ErrorResponses() {
}
public static ErrorResponse noRequestBody(HttpServletRequest httpServletRequest) {
return ErrorResponse.newError(HttpServletResponse.SC_BAD_REQUEST, "empty request body")
.clientRequest(httpServletRequest)
.serverContext()
.threadContext()
.build();
}
public static ErrorResponse badRequest(HttpServletRequest httpServletRequest, String message) {
return ErrorResponse.newError(HttpServletResponse.SC_BAD_REQUEST, message)
.clientRequest(httpServletRequest)
.serverContext()
.threadContext()
.build();
}
public static ErrorResponse badRequest(HttpServletRequest httpServletRequest, String message, Exception cause) {
return ErrorResponse.newError(HttpServletResponse.SC_BAD_REQUEST, message)
.clientRequest(httpServletRequest)
.serverContext()
.threadContext()
.exceptionContext(cause)
.build();
}
public static ErrorResponse badRequest(HttpServletRequest httpServletRequest, Exception cause) {
return badRequest(httpServletRequest, toMessageChain(cause), cause);
}
public static ErrorResponse internalServerError(HttpServletRequest httpServletRequest, Exception cause) {
return ErrorResponse.newError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, toMessageChain(cause))
.clientRequest(httpServletRequest)
.serverContext()
.exceptionContext(cause)
.build();
}
public static void sendError(HttpServletResponse httpServletResponse, ErrorResponse.ErrorResponseBuilder errorResponseBuilder) {
sendError(httpServletResponse, errorResponseBuilder.build());
}
public static void sendError(HttpServletResponse httpServletResponse, ErrorResponse errorResponse) {
httpServletResponse.setStatus(errorResponse.getStatusCode());
httpServletResponse.setContentType("application/json");
try (PrintWriter writer = httpServletResponse.getWriter()) {
errorMapper.writeValue(writer, errorResponse);
} catch (IOException e) {
// Do not propagate this error further, just leave log entry
logger.warn("Reply body serialization error", e);
}
}
private static String toMessageChain(Throwable cause) {
if (cause.getCause() == null) {
return getDesiredMessage(cause);
}
StringBuilder sb = new StringBuilder();
for (Throwable ex = cause; ex != null; ex = ex.getCause()) {
sb.append(getDesiredMessage(cause)).append('(').append(ex.getClass().getSimpleName()).append(')');
if (ex.getCause() != null) {
sb.append(" --> ");
}
}
return sb.toString();
}
private static String getDesiredMessage(Throwable cause) {
if (cause instanceof JsonProcessingException) {
return ((JsonProcessingException) cause).getOriginalMessage();
}
return cause.getMessage();
}
static Map<String, Object> buildHttpRequestContext(HttpServletRequest req) {
Map<String, Object> result = new TreeMap<>();
StringBuilder uriBuilder = new StringBuilder(req.getServletPath());
if (req.getPathInfo() != null) {
uriBuilder.append(req.getPathInfo());
}
if (req.getQueryString() != null) {
uriBuilder.append('?').append(req.getQueryString());
}
result.put("relativeURI", uriBuilder.toString());
result.put("method", req.getMethod());
Enumeration<String> headerIt = req.getHeaderNames();
while (headerIt.hasMoreElements()) {
String header = headerIt.nextElement();
result.put(header, req.getHeader(header));
}
return result;
}
public static Map<String, Object> buildWebRequestContext(WebRequest webRequest) {
Map<String, Object> result = new TreeMap<>();
result.put("relativeURI", StringExt.safeTrim(webRequest.getContextPath()));
result.put("secure", webRequest.isSecure());
Evaluators.acceptNotNull(webRequest.getRemoteUser(), user -> result.put("remoteUser", user));
return result;
}
/**
* Collect basic information about the server.
*/
static Map<String, Object> buildServerContext() {
Map<String, Object> result = new TreeMap<>();
Optional<String> hostName = NetworkExt.getHostName();
if (hostName.isPresent()) {
result.put("hostName", hostName.get());
}
Optional<List<String>> localIPs = NetworkExt.getLocalIPs();
if (localIPs.isPresent()) {
List<String> nonLoopbackIPs = localIPs.get().stream().filter(ip -> !NetworkExt.isLoopbackIP(ip)).collect(Collectors.toList());
result.put("ipV4", nonLoopbackIPs.stream().filter(ip -> !NetworkExt.isIPv6(ip)).collect(Collectors.toList()));
result.put("ipV6", nonLoopbackIPs.stream().filter(NetworkExt::isIPv6).collect(Collectors.toList()));
}
return result;
}
static List<StackTraceRepresentation> buildExceptionContext(Throwable cause) {
List<StackTraceRepresentation> stackTraces = new ArrayList<>();
for (Throwable currentCause = cause; currentCause != null; currentCause = currentCause.getCause()) {
stackTraces.add(new StackTraceRepresentation(currentCause));
}
return stackTraces;
}
static List<String> buildThreadContext() {
StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace();
int offset = 0;
while (offset < stackTrace.length && !isErrorFrameworkClass(stackTrace[offset].getClassName())) {
offset++;
}
while (offset < stackTrace.length && isErrorFrameworkClass(stackTrace[offset].getClassName())) {
offset++;
}
ArrayList<String> result = new ArrayList<>();
for (; offset < stackTrace.length; offset++) {
result.add(stackTrace[offset].toString());
}
return result;
}
private static boolean isErrorFrameworkClass(String className) {
return ErrorResponse.class.getName().equals(className)
|| ErrorResponses.class.getName().equals(className)
|| ErrorResponse.ErrorResponseBuilder.class.getName().equals(className);
}
static class StackTraceRepresentation {
private final String message;
private final String type;
private final String typeLocation;
private final Map<String, Object> details;
private final List<String> stackTrace;
private StackTraceRepresentation(Throwable cause) {
this.type = cause.getClass().getName();
this.typeLocation = getSourceOf(cause.getClass());
this.message = getDesiredMessage(cause);
this.stackTrace = getStackTraceOf(cause);
if (cause instanceof JsonProcessingException) {
this.details = appendJacksonErrorDetails((JsonProcessingException) cause);
} else {
this.details = null;
}
}
public String getType() {
return type;
}
public String getTypeLocation() {
return typeLocation;
}
public String getMessage() {
return message;
}
public Map<String, Object> getDetails() {
return details;
}
public List<String> getStackTrace() {
return stackTrace;
}
private String getSourceOf(Class<? extends Throwable> aClass) {
URL location = aClass.getResource('/' + aClass.getName().replace('.', '/') + ".class");
if (location == null) {
return null;
}
return location.toString();
}
private ArrayList<String> getStackTraceOf(Throwable cause) {
ArrayList<String> stackTrace = new ArrayList<>(cause.getStackTrace().length);
for (StackTraceElement element : cause.getStackTrace()) {
stackTrace.add(element.toString());
}
return stackTrace;
}
private Map<String, Object> appendJacksonErrorDetails(JsonProcessingException cause) {
Map<String, Object> out = new TreeMap<>();
JsonLocation location = cause.getLocation();
if (location != null) {
out.put("errorLocation", "line: " + location.getLineNr() + ", column: " + location.getColumnNr());
if (location.getSourceRef() != null && location.getSourceRef() instanceof String) {
out.put("document", location.getSourceRef());
}
}
if (cause instanceof JsonMappingException) {
JsonMappingException mappingEx = (JsonMappingException) cause;
if (mappingEx.getPathReference() != null) {
out.put("pathReference", mappingEx.getPathReference());
}
if (cause instanceof InvalidFormatException) {
InvalidFormatException formEx = (InvalidFormatException) cause;
if (formEx.getTargetType() != null) {
out.put("targetType", formEx.getTargetType().getName());
}
} else if (cause instanceof PropertyBindingException) {
PropertyBindingException bindingEx = (PropertyBindingException) cause;
if (bindingEx.getPropertyName() != null) {
out.put("property", bindingEx.getPropertyName());
out.put("knownProperties", bindingEx.getKnownPropertyIds());
}
}
}
return out;
}
}
}
| 1,311 |
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/rest/LocalSchedulerSpringResource.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.endpoint.rest;
import java.util.List;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Response;
import com.netflix.titus.common.framework.scheduler.LocalScheduler;
import com.netflix.titus.common.framework.scheduler.endpoint.representation.EvictionRepresentations;
import com.netflix.titus.common.framework.scheduler.endpoint.representation.ScheduleRepresentation;
import com.netflix.titus.common.runtime.TitusRuntime;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RestController;
@RestController
@RequestMapping(path = "/api/diagnostic/localScheduler")
public class LocalSchedulerSpringResource {
private final LocalScheduler localScheduler;
@Inject
public LocalSchedulerSpringResource(TitusRuntime titusRuntime) {
this.localScheduler = titusRuntime.getLocalScheduler();
}
@RequestMapping(method = RequestMethod.GET, path = "/schedules", produces = "application/json")
public List<ScheduleRepresentation> getActiveSchedules() {
return localScheduler.getActiveSchedules().stream().map(EvictionRepresentations::toRepresentation).collect(Collectors.toList());
}
@RequestMapping(method = RequestMethod.GET, path = "/schedules/{name}", produces = "application/json")
public ScheduleRepresentation getActiveSchedule(@PathVariable("name") String name) {
return localScheduler.getActiveSchedules().stream()
.filter(s -> s.getDescriptor().getName().equals(name))
.findFirst()
.map(EvictionRepresentations::toRepresentation)
.orElseThrow(() -> new WebApplicationException(Response.status(404).build()));
}
@RequestMapping(method = RequestMethod.GET, path = "/archived", produces = "application/json")
public List<ScheduleRepresentation> getArchivedSchedules() {
return localScheduler.getArchivedSchedules().stream()
.map(EvictionRepresentations::toRepresentation)
.collect(Collectors.toList());
}
@RequestMapping(method = RequestMethod.GET, path = "/archived/{name}", produces = "application/json")
public ScheduleRepresentation getArchivedSchedule(@PathVariable("name") String name) {
return localScheduler.getArchivedSchedules().stream()
.filter(s -> s.getDescriptor().getName().equals(name))
.findFirst()
.map(EvictionRepresentations::toRepresentation)
.orElseThrow(() -> new WebApplicationException(Response.status(404).build()));
}
}
| 1,312 |
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/rest/TitusProtobufHttpMessageConverter.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.endpoint.rest;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import javax.inject.Inject;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.protobuf.Message;
import com.netflix.titus.common.util.jackson.CommonObjectMappers;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpInputMessage;
import org.springframework.http.HttpOutputMessage;
import org.springframework.http.MediaType;
import org.springframework.http.converter.HttpMessageConverter;
import org.springframework.http.converter.HttpMessageNotReadableException;
import org.springframework.http.converter.HttpMessageNotWritableException;
import org.springframework.http.converter.protobuf.ProtobufHttpMessageConverter;
import org.springframework.stereotype.Controller;
@Controller
public class TitusProtobufHttpMessageConverter implements HttpMessageConverter<Message> {
private static final ObjectMapper MAPPER = CommonObjectMappers.protobufMapper();
private final HttpMessageConverter<Message> delegate;
@Inject
public TitusProtobufHttpMessageConverter() {
this.delegate = new ProtobufHttpMessageConverter();
}
@Override
public boolean canRead(Class<?> clazz, MediaType mediaType) {
return Message.class.isAssignableFrom(clazz);
}
@Override
public boolean canWrite(Class<?> clazz, MediaType mediaType) {
return Message.class.isAssignableFrom(clazz) && MediaType.APPLICATION_JSON.equals(mediaType);
}
@Override
public List<MediaType> getSupportedMediaTypes() {
return Collections.singletonList(MediaType.APPLICATION_JSON);
}
@Override
public Message read(Class<? extends Message> clazz, HttpInputMessage inputMessage) throws IOException, HttpMessageNotReadableException {
return delegate.read(clazz, inputMessage);
}
@Override
public void write(Message entity, MediaType contentType, HttpOutputMessage outputMessage) throws IOException, HttpMessageNotWritableException {
outputMessage.getHeaders().add(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON_VALUE);
MAPPER.writeValue(outputMessage.getBody(), entity);
}
}
| 1,313 |
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/rest/RestAddOnsComponent.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.endpoint.rest;
import com.netflix.titus.runtime.endpoint.rest.spring.SpringSpectatorWebConfigurer;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Import;
@Configuration
@Import({
CommonExceptionHandlers.class,
TitusProtobufHttpMessageConverter.class,
SpringSpectatorWebConfigurer.class,
LocalSchedulerSpringResource.class,
FitSpringResource.class,
})
public class RestAddOnsComponent {
}
| 1,314 |
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/rest/ErrorResponse.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.endpoint.rest;
import java.util.Collections;
import java.util.Map;
import java.util.TreeMap;
import javax.servlet.http.HttpServletRequest;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonView;
import com.netflix.titus.common.util.jackson.CommonObjectMappers;
import org.springframework.web.context.request.WebRequest;
/**
* Error representation returned as JSON document for failed REST requests.
*/
public class ErrorResponse {
/**
* If 'debug' parameter is included in query, include error context.
*/
public static final String DEBUG_PARAM = "debug";
public static final String CLIENT_REQUEST = "clientRequest";
public static final String THREAD_CONTEXT = "threadContext";
public static final String SERVER_CONTEXT = "serverContext";
public static final String EXCEPTION_CONTEXT = "exception";
@JsonView(CommonObjectMappers.PublicView.class)
private final int statusCode;
@JsonView(CommonObjectMappers.PublicView.class)
private final String message;
@JsonView(CommonObjectMappers.PublicView.class)
private final Object errorDetails;
@JsonView(CommonObjectMappers.DebugView.class)
private final Map<String, Object> errorContext;
@JsonCreator
private ErrorResponse(@JsonProperty("statusCode") int statusCode,
@JsonProperty("message") String message,
@JsonProperty("errorDetails") Object errorDetails,
@JsonProperty("errorContext") Map<String, Object> errorContext) {
this.statusCode = statusCode;
this.message = message;
this.errorDetails = errorDetails;
this.errorContext = errorContext;
}
public int getStatusCode() {
return statusCode;
}
public String getMessage() {
return message;
}
public Object getErrorDetails() {
return errorDetails;
}
/**
* Arbitrary additional information that can be attached to an error. The only requirement is that
* it must be serializable by Jackson.
*/
public Map<String, Object> getErrorContext() {
return errorContext;
}
public static ErrorResponseBuilder newError(int statusCode) {
return new ErrorResponseBuilder().status(statusCode);
}
public static ErrorResponseBuilder newError(int statusCode, String message) {
return new ErrorResponseBuilder().status(statusCode).message(message);
}
public static class ErrorResponseBuilder {
private int statusCode;
private String message;
private final Map<String, Object> errorContext = new TreeMap<>();
private Object errorDetails;
private boolean debug;
public ErrorResponseBuilder status(int statusCode) {
this.statusCode = statusCode;
return this;
}
public ErrorResponseBuilder debug(boolean debug) {
this.debug = debug;
return this;
}
public ErrorResponseBuilder message(String message) {
this.message = message;
return this;
}
public ErrorResponseBuilder errorDetails(Object errorDetails) {
this.errorDetails = errorDetails;
return this;
}
public ErrorResponseBuilder clientRequest(HttpServletRequest httpRequest) {
setDebugFromRequest(httpRequest.getParameterMap());
return withContext(CLIENT_REQUEST, ErrorResponses.buildHttpRequestContext(httpRequest));
}
public ErrorResponseBuilder clientRequest(WebRequest webRequest) {
setDebugFromRequest(webRequest.getParameterMap());
return withContext(CLIENT_REQUEST, ErrorResponses.buildWebRequestContext(webRequest));
}
public ErrorResponseBuilder threadContext() {
return withContext(THREAD_CONTEXT, ErrorResponses.buildThreadContext());
}
public ErrorResponseBuilder serverContext() {
return withContext(SERVER_CONTEXT, ErrorResponses.buildServerContext());
}
public ErrorResponseBuilder exceptionContext(Throwable cause) {
return withContext(EXCEPTION_CONTEXT, ErrorResponses.buildExceptionContext(cause));
}
public ErrorResponseBuilder withContext(String name, Object details) {
if (details == null) {
errorContext.remove(name);
} else {
errorContext.put(name, details);
}
return this;
}
public ErrorResponse build() {
if (debug) {
return new ErrorResponse(
statusCode,
message,
errorDetails,
errorContext.isEmpty() ? null : Collections.unmodifiableMap(errorContext)
);
}
return new ErrorResponse(
statusCode,
message,
null,
null
);
}
private void setDebugFromRequest(Map<String, String[]> parameters) {
if (parameters == null) {
this.debug = false;
} else {
String[] debugParamValue = parameters.get(DEBUG_PARAM);
this.debug = debugParamValue != null && debugParamValue.length > 0 && Boolean.parseBoolean(debugParamValue[0]);
}
}
}
}
| 1,315 |
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/rest/CommonExceptionHandlers.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.endpoint.rest;
import javax.servlet.http.HttpServletResponse;
import org.springframework.core.Ordered;
import org.springframework.core.annotation.Order;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.ControllerAdvice;
import org.springframework.web.bind.annotation.ExceptionHandler;
import org.springframework.web.context.request.WebRequest;
@ControllerAdvice
@Order(Ordered.LOWEST_PRECEDENCE)
public class CommonExceptionHandlers {
@ExceptionHandler(value = {Exception.class})
public ResponseEntity<ErrorResponse> handleException(Exception e, WebRequest request) {
int status = HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
Throwable cause = e.getCause() == null ? e : e.getCause();
ErrorResponse errorResponse = ErrorResponse.newError(status, cause.getMessage())
.clientRequest(request)
.serverContext()
.exceptionContext(cause)
.build();
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR)
.contentType(MediaType.APPLICATION_JSON)
.body(errorResponse);
}
}
| 1,316 |
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/rest | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/rest/spring/SpringSpectatorWebConfigurer.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.endpoint.rest.spring;
import com.netflix.titus.common.runtime.TitusRuntime;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.servlet.config.annotation.InterceptorRegistry;
import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
@Configuration
public class SpringSpectatorWebConfigurer implements WebMvcConfigurer {
@Autowired
private TitusRuntime titusRuntime;
@Override
public void addInterceptors(InterceptorRegistry registry) {
registry.addInterceptor(new SpringSpectatorInterceptor(titusRuntime));
}
}
| 1,317 |
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/rest | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/endpoint/rest/spring/SpringSpectatorInterceptor.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.endpoint.rest.spring;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Tag;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.time.Clock;
import com.netflix.titus.runtime.endpoint.metadata.spring.CallMetadataAuthentication;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContextHolder;
import org.springframework.web.servlet.ModelAndView;
import org.springframework.web.servlet.handler.HandlerInterceptorAdapter;
/**
* HTTP request interceptor for Spring.
*/
public class SpringSpectatorInterceptor extends HandlerInterceptorAdapter {
private static final String METRICS_REQUEST = "titus.httpServerSpring.request";
private static final String METRICS_REQUEST_LATENCY = "titus.httpServerSpring.requestLatency";
private static final String REQUEST_TIMESTAMP = "titus.httpServerSpring.requestTimestamp";
private static final Pattern UUID_PATTERN = Pattern.compile("[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}");
/**
* An AWS instance id, consists of 'i-' prefix and 17 alpha-numeric characters following it, for example: i-07d1b67286b43458e.
*/
private static final Pattern INSTANCE_ID_PATTERN = Pattern.compile("i-(\\p{Alnum}){17}+");
private final Registry registry;
private final Clock clock;
public SpringSpectatorInterceptor(TitusRuntime titusRuntime) {
this.registry = titusRuntime.getRegistry();
this.clock = titusRuntime.getClock();
}
@Override
public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) throws Exception {
request.setAttribute(REQUEST_TIMESTAMP, clock.wallTime());
return super.preHandle(request, response, handler);
}
@Override
public void postHandle(HttpServletRequest request, HttpServletResponse response, Object handler, ModelAndView modelAndView) throws Exception {
super.postHandle(request, response, handler, modelAndView);
String callerId = getCallerId();
String path = request.getServletPath() == null ? "unknown" : trimPath(request.getServletPath());
List<Tag> tags = Arrays.asList(
new BasicTag("port", "" + request.getServerPort()),
new BasicTag("method", request.getMethod()),
new BasicTag("path", path),
new BasicTag("status", "" + response.getStatus()),
new BasicTag("caller", callerId)
);
registry.counter(METRICS_REQUEST, tags).increment();
Long timestamp = (Long) request.getAttribute(REQUEST_TIMESTAMP);
if (timestamp != null) {
registry.timer(METRICS_REQUEST_LATENCY, tags).record(clock.wallTime() - timestamp, TimeUnit.MILLISECONDS);
}
}
private String getCallerId() {
Authentication authentication = SecurityContextHolder.getContext().getAuthentication();
if (authentication == null) {
return "anonymous";
}
if (authentication instanceof CallMetadataAuthentication) {
CallMetadataAuthentication cma = (CallMetadataAuthentication) authentication;
return cma.getCallMetadata().getCallers().get(0).getId();
}
return authentication.getName();
}
static String trimPath(String path) {
return removeTrailingSlash(removePatterns(INSTANCE_ID_PATTERN, removePatterns(UUID_PATTERN, path)));
}
private static String removeTrailingSlash(String text) {
if (!text.endsWith("/")) {
return text;
}
return removeTrailingSlash(text.substring(0, text.length() - 1));
}
static String removePatterns(Pattern pattern, String text) {
return pattern.matcher(text).replaceAll("");
}
}
| 1,318 |
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/connector/GrpcRequestConfiguration.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.connector;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
@Configuration(prefix = "titus.grpcRequestConfiguration")
public interface GrpcRequestConfiguration {
long DEFAULT_REQUEST_TIMEOUT_MS = 60_000;
long DEFAULT_STREAMING_TIMEOUT_MS = 12 * 60 * 60_000;
/**
* GRPC operation timeout.
*/
@DefaultValue("" + DEFAULT_REQUEST_TIMEOUT_MS)
long getRequestTimeoutMs();
/**
* Event streams have unbounded lifetime, but we want to terminate them periodically to improve request distribution
* across multiple nodes.
*/
@DefaultValue("" + DEFAULT_STREAMING_TIMEOUT_MS)
long getStreamingTimeoutMs();
}
| 1,319 |
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/connector/common | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/connector/common/reactor/DefaultGrpcToReactorClientFactory.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.connector.common.reactor;
import java.time.Duration;
import java.util.Optional;
import java.util.function.BiFunction;
import com.netflix.titus.common.util.grpc.reactor.client.ReactorToGrpcClientBuilder;
import com.netflix.titus.runtime.connector.GrpcRequestConfiguration;
import com.netflix.titus.common.util.grpc.reactor.GrpcToReactorClientFactory;
import io.grpc.ServiceDescriptor;
import io.grpc.stub.AbstractStub;
public class DefaultGrpcToReactorClientFactory<CONTEXT> implements GrpcToReactorClientFactory {
private final GrpcRequestConfiguration configuration;
private final BiFunction grpcStubDecorator;
private final Class<CONTEXT> contextType;
public DefaultGrpcToReactorClientFactory(GrpcRequestConfiguration configuration,
BiFunction<AbstractStub, Optional<CONTEXT>, AbstractStub> grpcStubDecorator,
Class<CONTEXT> contextType) {
this.configuration = configuration;
this.grpcStubDecorator = grpcStubDecorator;
this.contextType = contextType;
}
@Override
public <GRPC_STUB extends AbstractStub<GRPC_STUB>, REACT_API> REACT_API apply(GRPC_STUB stub, Class<REACT_API> apiType, ServiceDescriptor serviceDescriptor) {
return ReactorToGrpcClientBuilder
.newBuilder(
apiType, stub, serviceDescriptor, contextType
)
.withGrpcStubDecorator((BiFunction<GRPC_STUB, Optional<CONTEXT>, GRPC_STUB>) grpcStubDecorator)
.withTimeout(Duration.ofMillis(configuration.getRequestTimeoutMs()))
.withStreamingTimeout(Duration.ofMillis(configuration.getStreamingTimeoutMs()))
.build();
}
}
| 1,320 |
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/connector/common | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/connector/common/reactor/GrpcToReactorServerFactoryComponent.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.connector.common.reactor;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import com.netflix.titus.api.model.callmetadata.CallMetadataConstants;
import com.netflix.titus.common.util.grpc.reactor.GrpcToReactorServerFactory;
import com.netflix.titus.common.util.grpc.reactor.server.DefaultGrpcToReactorServerFactory;
import com.netflix.titus.runtime.endpoint.metadata.CallMetadataResolver;
import org.springframework.context.annotation.Bean;
@Configuration
public class GrpcToReactorServerFactoryComponent {
@Bean
public GrpcToReactorServerFactory getGrpcToReactorServerFactory(CallMetadataResolver callMetadataResolver) {
return new DefaultGrpcToReactorServerFactory<>(
CallMetadata.class,
() -> callMetadataResolver.resolve().orElse(CallMetadataConstants.UNDEFINED_CALL_METADATA)
);
}
}
| 1,321 |
0 | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/connector/common | Create_ds/titus-control-plane/titus-common-server/src/main/java/com/netflix/titus/runtime/connector/common/reactor/GrpcToReactorClientFactoryComponent.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.connector.common.reactor;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.archaius2.Archaius2Ext;
import com.netflix.titus.common.util.grpc.reactor.GrpcToReactorClientFactory;
import com.netflix.titus.runtime.connector.GrpcRequestConfiguration;
import com.netflix.titus.runtime.endpoint.metadata.CallMetadataResolver;
import com.netflix.titus.runtime.endpoint.metadata.CommonCallMetadataUtils;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
@Configuration
public class GrpcToReactorClientFactoryComponent {
@Bean
public GrpcRequestConfiguration getChannelTunablesConfiguration(TitusRuntime titusRuntime) {
return Archaius2Ext.newConfiguration(GrpcRequestConfiguration.class, "titus.connector.channelTunables", titusRuntime.getMyEnvironment());
}
@Bean
public GrpcToReactorClientFactory getReactorGrpcClientAdapterFactory(GrpcRequestConfiguration configuration,
CallMetadataResolver callMetadataResolver) {
return new DefaultGrpcToReactorClientFactory(
configuration,
CommonCallMetadataUtils.newGrpcStubDecorator(callMetadataResolver),
CallMetadata.class
);
}
}
| 1,322 |
0 | Create_ds/titus-control-plane/titus-server-federation-springboot/src/main/java/com/netflix/titus | Create_ds/titus-control-plane/titus-server-federation-springboot/src/main/java/com/netflix/titus/federation/FederationSpringBootMain.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.federation;
import com.netflix.titus.api.health.HealthIndicator;
import com.netflix.titus.api.health.HealthIndicators;
import com.netflix.titus.federation.startup.TitusFederationComponent;
import com.netflix.titus.federation.startup.TitusFederationRuntimeComponent;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Import;
@SpringBootApplication
@Import({
TitusFederationRuntimeComponent.class,
TitusFederationComponent.class
})
public class FederationSpringBootMain {
@Bean
public HealthIndicator getHealthIndicator() {
return HealthIndicators.alwaysHealthy();
}
public static void main(String[] args) {
SpringApplication.run(FederationSpringBootMain.class, args);
}
}
| 1,323 |
0 | Create_ds/titus-control-plane/titus-ext/eureka/src/test/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/eureka/src/test/java/com/netflix/titus/ext/eureka/EurekaGenerator.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.eureka;
import com.google.common.base.Preconditions;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import static com.netflix.titus.common.util.Evaluators.getOrDefault;
/**
* Generator of Eureka data.
*/
public class EurekaGenerator {
public static InstanceInfo newInstanceInfo(String instanceId, String vipAddress, String ipAddress, InstanceInfo.InstanceStatus status) {
return InstanceInfo.Builder.newBuilder()
.setInstanceId(instanceId)
.setAppName("testApp")
.setVIPAddress(vipAddress)
.setIPAddr(ipAddress)
.setStatus(status)
.build();
}
public static InstanceInfo newTaskInstanceInfo(Job<?> job, Task task) {
return newTaskInstanceInfo(job, task, InstanceStatus.UP);
}
public static InstanceInfo newTaskInstanceInfo(Job<?> job, Task task, InstanceStatus instanceStatus) {
Preconditions.checkArgument(job.getId().equals(task.getJobId()), "Task belongs to another job");
Preconditions.checkArgument(task.getStatus().getState() != TaskState.Accepted, "Task not started");
return InstanceInfo.Builder.newBuilder()
.setInstanceId(task.getId())
.setAppName(getOrDefault(job.getJobDescriptor().getApplicationName(), "NO_NAME"))
.setStatus(instanceStatus)
.build();
}
}
| 1,324 |
0 | Create_ds/titus-control-plane/titus-ext/eureka/src/test/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/eureka/src/test/java/com/netflix/titus/ext/eureka/EurekaServerStub.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.eureka;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.stream.Collectors;
import javax.annotation.Nullable;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.appinfo.HealthCheckCallback;
import com.netflix.appinfo.HealthCheckHandler;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.CacheRefreshedEvent;
import com.netflix.discovery.EurekaClient;
import com.netflix.discovery.EurekaClientConfig;
import com.netflix.discovery.EurekaEventListener;
import com.netflix.discovery.shared.Application;
import com.netflix.discovery.shared.Applications;
/**
* A helper class for
*/
public class EurekaServerStub {
private final ConcurrentMap<String, InstanceInfo> instanceInfos = new ConcurrentHashMap<>();
private final EurekaClientStub eurekaClient = new EurekaClientStub();
public EurekaClient getEurekaClient() {
return eurekaClient;
}
public void register(InstanceInfo instanceInfo) {
instanceInfos.put(instanceInfo.getId(), instanceInfo);
}
public void unregister(String instanceId) {
instanceInfos.remove(instanceId);
}
public void triggerCacheRefreshUpdate() {
eurekaClient.triggerCacheRefreshUpdate();
}
private class EurekaClientStub implements EurekaClient {
private final List<EurekaEventListener> eventListeners = new CopyOnWriteArrayList<>();
private void triggerCacheRefreshUpdate() {
eventListeners.forEach(l -> l.onEvent(new CacheRefreshedEvent()));
}
@Override
public Applications getApplicationsForARegion(@Nullable String region) {
return null;
}
@Override
public Applications getApplications(String serviceUrl) {
return null;
}
@Override
public List<InstanceInfo> getInstancesByVipAddress(String vipAddress, boolean secure) {
return instanceInfos.values().stream()
.filter(instance -> secure ? vipAddress.equals(instance.getSecureVipAddress()) : vipAddress.equals(instance.getVIPAddress()))
.collect(Collectors.toList());
}
@Override
public List<InstanceInfo> getInstancesByVipAddress(String vipAddress, boolean secure, @Nullable String region) {
return null;
}
@Override
public List<InstanceInfo> getInstancesByVipAddressAndAppName(String vipAddress, String appName, boolean secure) {
return null;
}
@Override
public Set<String> getAllKnownRegions() {
return null;
}
@Override
public InstanceInfo.InstanceStatus getInstanceRemoteStatus() {
return null;
}
@Override
public List<String> getDiscoveryServiceUrls(String zone) {
return null;
}
@Override
public List<String> getServiceUrlsFromConfig(String instanceZone, boolean preferSameZone) {
return null;
}
@Override
public List<String> getServiceUrlsFromDNS(String instanceZone, boolean preferSameZone) {
return null;
}
@Override
public void registerHealthCheckCallback(HealthCheckCallback callback) {
}
@Override
public void registerHealthCheck(HealthCheckHandler healthCheckHandler) {
}
@Override
public void registerEventListener(EurekaEventListener eventListener) {
eventListeners.add(eventListener);
}
@Override
public boolean unregisterEventListener(EurekaEventListener eventListener) {
return eventListeners.remove(eventListener);
}
@Override
public HealthCheckHandler getHealthCheckHandler() {
return null;
}
@Override
public void shutdown() {
}
@Override
public EurekaClientConfig getEurekaClientConfig() {
return null;
}
@Override
public ApplicationInfoManager getApplicationInfoManager() {
return null;
}
@Override
public Application getApplication(String appName) {
return null;
}
@Override
public Applications getApplications() {
return null;
}
@Override
public List<InstanceInfo> getInstancesById(String id) {
InstanceInfo instanceInfo = instanceInfos.get(id);
return instanceInfo == null ? Collections.emptyList() : Collections.singletonList(instanceInfo);
}
@Override
public InstanceInfo getNextServerFromEureka(String virtualHostname, boolean secure) {
return null;
}
}
}
| 1,325 |
0 | Create_ds/titus-control-plane/titus-ext/eureka/src/test/java/com/netflix/titus/ext/eureka | Create_ds/titus-control-plane/titus-ext/eureka/src/test/java/com/netflix/titus/ext/eureka/containerhealth/EurekaContainerHealthServiceTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.eureka.containerhealth;
import java.time.Duration;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.titus.api.containerhealth.model.ContainerHealthState;
import com.netflix.titus.api.containerhealth.model.event.ContainerHealthEvent;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.ext.eureka.EurekaGenerator;
import com.netflix.titus.ext.eureka.EurekaServerStub;
import com.netflix.titus.testkit.model.job.JobComponentStub;
import com.netflix.titus.testkit.rx.TitusRxSubscriber;
import org.junit.Before;
import org.junit.Test;
import reactor.core.Disposable;
import reactor.test.StepVerifier;
import static com.netflix.titus.api.jobmanager.model.job.JobFunctions.ofServiceSize;
import static com.netflix.titus.testkit.junit.asserts.ContainerHealthAsserts.assertContainerHealthAndEvent;
import static com.netflix.titus.testkit.junit.asserts.ContainerHealthAsserts.assertContainerHealthEvent;
import static com.netflix.titus.testkit.model.job.JobDescriptorGenerator.serviceJobDescriptors;
import static org.assertj.core.api.Assertions.assertThat;
public class EurekaContainerHealthServiceTest {
private final TitusRuntime titusRuntime = TitusRuntimes.test();
private final EurekaServerStub eurekaServer = new EurekaServerStub();
private final JobComponentStub jobManagerStub = new JobComponentStub(titusRuntime);
private final ReadOnlyJobOperations jobOperations = jobManagerStub.getJobOperations();
private final EurekaContainerHealthService healthService = new EurekaContainerHealthService(
jobOperations, eurekaServer.getEurekaClient(), titusRuntime
);
private Job job1;
private Task task1;
private String taskId1;
@Before
public void setUp() {
this.job1 = jobManagerStub.addServiceTemplate("testJob", serviceJobDescriptors(ofServiceSize(1)))
.createJobAndTasks("testJob").getLeft();
this.task1 = jobOperations.getTasks(job1.getId()).get(0);
this.taskId1 = task1.getId();
}
@Test
public void testJobManagerUpdate() {
StepVerifier.create(healthService.events(false))
// Task launched, but not in Eureka yet.
.then(() -> jobManagerStub.moveTaskToState(taskId1, TaskState.Launched))
.assertNext(event -> assertContainerHealthAndEvent(event, healthService.getHealthStatus(taskId1), taskId1, ContainerHealthState.Unknown))
// Task started and registered with Eureka
.then(() -> {
eurekaServer.register(newInstanceInfo(taskId1, InstanceStatus.UP));
jobManagerStub.moveTaskToState(taskId1, TaskState.Started);
})
.assertNext(event -> assertContainerHealthAndEvent(event, healthService.getHealthStatus(taskId1), taskId1, ContainerHealthState.Healthy))
// Task terminated
.then(() -> jobManagerStub.moveTaskToState(task1, TaskState.Finished))
.assertNext(event -> assertContainerHealthAndEvent(event, healthService.getHealthStatus(taskId1), taskId1, ContainerHealthState.Terminated))
.thenCancel()
.verify(Duration.ofSeconds(5));
}
@Test
public void testEurekaUpdate() {
jobManagerStub.moveTaskToState(taskId1, TaskState.Started);
StepVerifier.create(healthService.events(false))
// Change state to UP
.then(() -> registerAndRefresh(InstanceStatus.UP))
.assertNext(event -> assertContainerHealthAndEvent(event, healthService.getHealthStatus(taskId1), taskId1, ContainerHealthState.Healthy))
// Change state to DOWN
.then(() -> registerAndRefresh(InstanceStatus.DOWN))
.assertNext(event -> assertContainerHealthAndEvent(event, healthService.getHealthStatus(taskId1), taskId1, ContainerHealthState.Unhealthy))
.thenCancel()
.verify(Duration.ofSeconds(5));
}
@Test
public void testEurekaReRegistration() {
jobManagerStub.moveTaskToState(taskId1, TaskState.Started);
StepVerifier.create(healthService.events(false))
// Change state to UP
.then(() -> registerAndRefresh(InstanceStatus.UP))
.assertNext(event -> assertContainerHealthAndEvent(event, healthService.getHealthStatus(taskId1), taskId1, ContainerHealthState.Healthy))
// Unregister in Eureka
.then(() -> {
eurekaServer.unregister(taskId1);
eurekaServer.triggerCacheRefreshUpdate();
})
.assertNext(event -> assertContainerHealthAndEvent(event, healthService.getHealthStatus(taskId1), taskId1, ContainerHealthState.Unknown))
// Register again
.then(() -> registerAndRefresh(InstanceStatus.UP))
.assertNext(event -> assertContainerHealthAndEvent(event, healthService.getHealthStatus(taskId1), taskId1, ContainerHealthState.Healthy))
.thenCancel()
.verify(Duration.ofSeconds(5));
}
@Test
public void testOutOfServiceJobWithRealStateUp() {
jobManagerStub.moveTaskToState(taskId1, TaskState.Started);
StepVerifier.create(healthService.events(false))
// Change state to OUT_OF_SERVICE
.then(() -> {
jobManagerStub.changeJobEnabledStatus(job1, false);
registerAndRefresh(InstanceStatus.OUT_OF_SERVICE);
})
.assertNext(event -> assertContainerHealthAndEvent(event, healthService.getHealthStatus(taskId1), taskId1, ContainerHealthState.Healthy))
.thenCancel()
.verify(Duration.ofSeconds(5));
}
@Test
public void testOutOfServiceJobWithRealStateDown() {
jobManagerStub.moveTaskToState(taskId1, TaskState.Started);
StepVerifier.create(healthService.events(false))
// Register first with DOWN state
.then(() -> registerAndRefresh(InstanceStatus.DOWN))
.assertNext(event -> assertContainerHealthAndEvent(event, healthService.getHealthStatus(taskId1), taskId1, ContainerHealthState.Unhealthy))
// Disable job and change state to OUT_OF_SERVICE
.then(() -> jobManagerStub.changeJobEnabledStatus(job1, false))
.assertNext(event -> assertContainerHealthAndEvent(event, healthService.getHealthStatus(taskId1), taskId1, ContainerHealthState.Healthy))
.then(() -> registerAndRefresh(InstanceStatus.OUT_OF_SERVICE))
.assertNext(event -> assertContainerHealthAndEvent(event, healthService.getHealthStatus(taskId1), taskId1, ContainerHealthState.Healthy))
// Enable job and remove OUT_OF_SERVICE override
.then(() -> jobManagerStub.changeJobEnabledStatus(job1, true))
.assertNext(event -> assertContainerHealthAndEvent(event, healthService.getHealthStatus(taskId1), taskId1, ContainerHealthState.Unhealthy))
.then(() -> registerAndRefresh(InstanceStatus.UP))
.assertNext(event -> assertContainerHealthAndEvent(event, healthService.getHealthStatus(taskId1), taskId1, ContainerHealthState.Healthy))
.thenCancel()
.verify(Duration.ofSeconds(5_000));
}
@Test
public void testOutOfServiceJobWithRealStateNotRegistered() {
jobManagerStub.moveTaskToState(taskId1, TaskState.Started);
StepVerifier.create(healthService.events(false))
// Check enabled job for not registered container
.then(eurekaServer::triggerCacheRefreshUpdate)
.assertNext(event -> assertContainerHealthAndEvent(event, healthService.getHealthStatus(taskId1), taskId1, ContainerHealthState.Unknown))
// Disable job
.then(() -> jobManagerStub.changeJobEnabledStatus(job1, false))
.assertNext(event -> assertContainerHealthAndEvent(event, healthService.getHealthStatus(taskId1), taskId1, ContainerHealthState.Healthy))
// Enable again
.then(() -> jobManagerStub.changeJobEnabledStatus(job1, true))
.assertNext(event -> assertContainerHealthAndEvent(event, healthService.getHealthStatus(taskId1), taskId1, ContainerHealthState.Unknown))
.thenCancel()
.verify(Duration.ofSeconds(5));
}
@Test
public void testEurekaStaleDataCleanup() {
jobManagerStub.moveTaskToState(taskId1, TaskState.Started);
StepVerifier.create(healthService.events(false))
// Start the task and register with Eureka
.then(() -> registerAndRefresh(InstanceStatus.UP))
.assertNext(event -> assertContainerHealthEvent(event, taskId1, ContainerHealthState.Healthy))
// Lose task
.then(() -> {
jobManagerStub.forget(task1);
eurekaServer.triggerCacheRefreshUpdate();
})
.assertNext(event -> {
assertContainerHealthEvent(event, taskId1, ContainerHealthState.Terminated);
assertThat(healthService.findHealthStatus(taskId1)).isEmpty();
})
.thenCancel()
.verify(Duration.ofSeconds(5));
}
@Test
public void testBadSubscriberIsolation() {
jobManagerStub.moveTaskToState(taskId1, TaskState.Started);
eurekaServer.register(newInstanceInfo(taskId1, InstanceStatus.UP));
// First event / one subscriber
TitusRxSubscriber<ContainerHealthEvent> subscriber1 = new TitusRxSubscriber<>();
healthService.events(false).subscribe(subscriber1);
eurekaServer.triggerCacheRefreshUpdate();
// Add bad subscriber
Disposable subscription2 = healthService.events(false).subscribe(
next -> {
throw new RuntimeException("simulated error");
},
e -> {
throw new RuntimeException("simulated error");
},
() -> {
throw new RuntimeException("simulated error");
}
);
// Event 2
registerAndRefresh(InstanceStatus.DOWN);
assertThat(subscription2.isDisposed()).isTrue();
// Event 3
registerAndRefresh(InstanceStatus.UP);
assertThat(subscriber1.isDisposed()).isFalse();
assertThat(subscriber1.getAllItems()).hasSize(3);
}
private void registerAndRefresh(InstanceStatus status) {
eurekaServer.register(newInstanceInfo(taskId1, status));
eurekaServer.triggerCacheRefreshUpdate();
}
private InstanceInfo newInstanceInfo(String taskId, InstanceStatus instanceStatus) {
Pair<Job<?>, Task> jobTaskPair = jobOperations.findTaskById(taskId).orElseThrow(() -> new IllegalStateException("Task not found: " + taskId));
return EurekaGenerator.newTaskInstanceInfo(jobTaskPair.getLeft(), jobTaskPair.getRight(), instanceStatus);
}
} | 1,326 |
0 | Create_ds/titus-control-plane/titus-ext/eureka/src/test/java/com/netflix/titus/ext/eureka | Create_ds/titus-control-plane/titus-ext/eureka/src/test/java/com/netflix/titus/ext/eureka/common/EurekaUrisTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.eureka.common;
import java.net.URI;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class EurekaUrisTest {
@Test(expected = IllegalArgumentException.class)
public void testFailIfInvalidProtocol() {
EurekaUris.failIfEurekaUriInvalid(URI.create("http://abc"));
}
@Test(expected = IllegalArgumentException.class)
public void testFailIfInvalidServiceName() {
EurekaUris.failIfEurekaUriInvalid(URI.create("eureka://:7001"));
}
@Test
public void testSecureParameter() {
assertThat(EurekaUris.isSecure(URI.create("eureka://myservice:7001"))).isFalse();
assertThat(EurekaUris.isSecure(URI.create("eureka://myservice:7001?secure"))).isTrue();
assertThat(EurekaUris.isSecure(URI.create("eureka://myservice:7001?secure=TRUE"))).isTrue();
assertThat(EurekaUris.isSecure(URI.create("eureka://myservice:7001?secure=FALSE"))).isFalse();
}
} | 1,327 |
0 | Create_ds/titus-control-plane/titus-ext/eureka/src/test/java/com/netflix/titus/ext/eureka | Create_ds/titus-control-plane/titus-ext/eureka/src/test/java/com/netflix/titus/ext/eureka/common/SingleServiceLoadBalancerTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.eureka.common;
import java.util.concurrent.TimeUnit;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.time.Clocks;
import com.netflix.titus.common.util.time.TestClock;
import com.netflix.titus.ext.eureka.EurekaServerStub;
import org.junit.Before;
import org.junit.Test;
import static com.netflix.titus.ext.eureka.EurekaGenerator.newInstanceInfo;
import static java.util.Arrays.asList;
import static org.assertj.core.api.Assertions.assertThat;
public class SingleServiceLoadBalancerTest {
private static final InstanceInfo INSTANCE_1 = newInstanceInfo("id1", "myservice", "1.0.0.1", InstanceStatus.UP);
private static final InstanceInfo INSTANCE_2 = newInstanceInfo("id2", "myservice", "1.0.0.2", InstanceStatus.UP);
private final TestClock testClock = Clocks.testWorldClock();
private final TitusRuntime titusRuntime = TitusRuntimes.test(testClock);
private final EurekaServerStub eurekaServer = new EurekaServerStub();
@Before
public void setUp() {
eurekaServer.register(INSTANCE_1);
eurekaServer.register(INSTANCE_2);
eurekaServer.triggerCacheRefreshUpdate();
}
@Test
public void testChooseWhenNoFailures() {
SingleServiceLoadBalancer singleLB = newSingleLB();
assertThat(asList(takeNextOrFail(singleLB), takeNextOrFail(singleLB))).contains(INSTANCE_1, INSTANCE_2);
}
@Test
public void testChooseWhenFailure() {
SingleServiceLoadBalancer singleLB = newSingleLB();
InstanceInfo first = takeNextOrFail(singleLB);
singleLB.recordFailure(first);
InstanceInfo healthy = first == INSTANCE_1 ? INSTANCE_2 : INSTANCE_1;
assertThat(asList(takeNextOrFail(singleLB), takeNextOrFail(singleLB))).contains(healthy, healthy);
// Move time past quarantine period
testClock.advanceTime(1, TimeUnit.HOURS);
assertThat(asList(takeNextOrFail(singleLB), takeNextOrFail(singleLB))).contains(INSTANCE_1, INSTANCE_2);
}
@Test
public void testAllBad() {
SingleServiceLoadBalancer singleLB = newSingleLB();
InstanceInfo first = takeNextOrFail(singleLB);
InstanceInfo second = takeNextOrFail(singleLB);
singleLB.recordFailure(first);
singleLB.recordFailure(second);
assertThat(singleLB.chooseNext()).isEmpty();
// Move time past quarantine period
testClock.advanceTime(1, TimeUnit.HOURS);
assertThat(asList(takeNextOrFail(singleLB), takeNextOrFail(singleLB))).contains(INSTANCE_1, INSTANCE_2);
}
private SingleServiceLoadBalancer newSingleLB() {
return new SingleServiceLoadBalancer(
eurekaServer.getEurekaClient(),
"myservice",
false,
titusRuntime
);
}
private InstanceInfo takeNextOrFail(SingleServiceLoadBalancer singleLB) {
return singleLB.chooseNext().orElseThrow(() -> new IllegalStateException("not found"));
}
} | 1,328 |
0 | Create_ds/titus-control-plane/titus-ext/eureka/src/test/java/com/netflix/titus/ext/eureka | Create_ds/titus-control-plane/titus-ext/eureka/src/test/java/com/netflix/titus/ext/eureka/spring/EurekaLoadBalancingExchangeFilterFunctionTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.eureka.spring;
import java.net.URI;
import java.net.URISyntaxException;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.ext.eureka.EurekaServerStub;
import com.netflix.titus.ext.eureka.common.EurekaUris;
import org.junit.Before;
import org.junit.Test;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpMethod;
import org.springframework.http.HttpStatus;
import org.springframework.web.reactive.function.client.ClientRequest;
import org.springframework.web.reactive.function.client.ClientResponse;
import org.springframework.web.reactive.function.client.ExchangeFunction;
import reactor.core.publisher.Mono;
import static com.netflix.titus.ext.eureka.EurekaGenerator.newInstanceInfo;
import static com.netflix.titus.ext.eureka.spring.EurekaLoadBalancingExchangeFilterFunction.rewrite;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class EurekaLoadBalancingExchangeFilterFunctionTest {
private static final InstanceInfo INSTANCE_1 = newInstanceInfo("id1", "myservice", "1.0.0.1", InstanceInfo.InstanceStatus.UP);
private final TitusRuntime titusRuntime = TitusRuntimes.internal();
private final EurekaServerStub eurekaServer = new EurekaServerStub();
@Before
public void setUp() {
eurekaServer.register(INSTANCE_1);
eurekaServer.triggerCacheRefreshUpdate();
}
@Test
public void testInterceptor() {
ClientResponse response = execute("eureka://myservice:7001");
assertThat(response.statusCode().is2xxSuccessful()).isTrue();
}
@Test
public void testInterceptorForNonExistingService() {
ClientResponse response = execute("eureka://wrongservice:7001");
assertThat(response.statusCode().is5xxServerError()).isTrue();
}
@Test
public void testUriRewrite() throws URISyntaxException {
assertThat(rewrite(new URI("eureka://myservice:7001"), INSTANCE_1)).isEqualTo(new URI("http://1.0.0.1:7001"));
assertThat(rewrite(new URI("eureka://myservice:7001/mypath?k=v"), INSTANCE_1)).isEqualTo(new URI("http://1.0.0.1:7001/mypath?k=v"));
assertThat(rewrite(new URI("eureka://myservice:7001?secure=true"), INSTANCE_1)).isEqualTo(new URI("https://1.0.0.1:7001?secure=true"));
assertThat(rewrite(new URI("eureka://myservice:7001/mypath?k=v&secure=true"), INSTANCE_1)).isEqualTo(new URI("https://1.0.0.1:7001/mypath?k=v&secure=true"));
}
private ClientResponse execute(String eurekaUri) {
EurekaLoadBalancingExchangeFilterFunction filter = new EurekaLoadBalancingExchangeFilterFunction(
eurekaServer.getEurekaClient(), EurekaUris::getServiceName, titusRuntime
);
ClientRequest request = mock(ClientRequest.class);
when(request.url()).thenReturn(URI.create(eurekaUri));
when(request.method()).thenReturn(HttpMethod.GET);
when(request.headers()).thenReturn(HttpHeaders.EMPTY);
when(request.cookies()).thenReturn(HttpHeaders.EMPTY);
ExchangeFunction next = mock(ExchangeFunction.class);
when(next.exchange(any())).thenAnswer(invocation -> {
ClientRequest rewrittenRequest = invocation.getArgument(0);
ClientResponse clientResponse = mock(ClientResponse.class);
if (rewrittenRequest.url().getHost().equals("1.0.0.1")) {
when(clientResponse.statusCode()).thenReturn(HttpStatus.OK);
} else {
when(clientResponse.statusCode()).thenReturn(HttpStatus.INTERNAL_SERVER_ERROR);
}
return Mono.just(clientResponse);
});
return filter.filter(request, next).block();
}
} | 1,329 |
0 | Create_ds/titus-control-plane/titus-ext/eureka/src/main/java/com/netflix/titus/ext/eureka | Create_ds/titus-control-plane/titus-ext/eureka/src/main/java/com/netflix/titus/ext/eureka/containerhealth/EurekaContainerHealthModule.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.eureka.containerhealth;
import com.google.inject.AbstractModule;
import com.google.inject.multibindings.Multibinder;
import com.netflix.titus.api.containerhealth.service.ContainerHealthService;
public final class EurekaContainerHealthModule extends AbstractModule {
@Override
protected void configure() {
Multibinder.newSetBinder(binder(), ContainerHealthService.class).addBinding().to(EurekaContainerHealthService.class);
}
}
| 1,330 |
0 | Create_ds/titus-control-plane/titus-ext/eureka/src/main/java/com/netflix/titus/ext/eureka | Create_ds/titus-control-plane/titus-ext/eureka/src/main/java/com/netflix/titus/ext/eureka/containerhealth/EurekaContainerHealthService.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.eureka.containerhealth;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.stream.Collectors;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.google.common.base.Preconditions;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.CacheRefreshedEvent;
import com.netflix.discovery.EurekaClient;
import com.netflix.discovery.EurekaEvent;
import com.netflix.discovery.EurekaEventListener;
import com.netflix.titus.api.containerhealth.model.ContainerHealthState;
import com.netflix.titus.api.containerhealth.model.ContainerHealthStatus;
import com.netflix.titus.api.containerhealth.model.event.ContainerHealthEvent;
import com.netflix.titus.api.containerhealth.model.event.ContainerHealthUpdateEvent;
import com.netflix.titus.api.containerhealth.service.ContainerHealthService;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.event.JobManagerEvent;
import com.netflix.titus.api.jobmanager.model.job.event.JobUpdateEvent;
import com.netflix.titus.api.jobmanager.model.job.event.TaskUpdateEvent;
import com.netflix.titus.api.jobmanager.service.ReadOnlyJobOperations;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.guice.annotation.Activator;
import com.netflix.titus.common.util.rx.ReactorExt;
import com.netflix.titus.common.util.rx.ReactorRetriers;
import com.netflix.titus.common.util.tuple.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import reactor.core.Disposable;
import reactor.core.publisher.Flux;
import reactor.core.scheduler.Schedulers;
@Singleton
public class EurekaContainerHealthService implements ContainerHealthService {
private static final Logger logger = LoggerFactory.getLogger(EurekaContainerHealthService.class);
public static final String NAME = "eureka";
private static final Duration RETRY_INTERVAL = Duration.ofSeconds(5);
private final ReadOnlyJobOperations jobOperations;
private final EurekaClient eurekaClient;
private final TitusRuntime titusRuntime;
private final Flux<ContainerHealthEvent> healthStatuses;
private Disposable eventLoggerDisposable;
@Inject
public EurekaContainerHealthService(ReadOnlyJobOperations jobOperations, EurekaClient eurekaClient, TitusRuntime titusRuntime) {
this.jobOperations = jobOperations;
this.eurekaClient = eurekaClient;
this.titusRuntime = titusRuntime;
Flux<EurekaEvent> eurekaCallbacks = ReactorExt.fromListener(
EurekaEventListener.class,
eurekaClient::registerEventListener,
eurekaClient::unregisterEventListener
);
this.healthStatuses = Flux.defer(() -> {
ConcurrentMap<String, ContainerHealthEvent> current = new ConcurrentHashMap<>();
return Flux.merge(eurekaCallbacks, ReactorExt.toFlux(jobOperations.observeJobs()))
.flatMap(event -> handleJobManagerOrEurekaStatusUpdate(event, current));
}).share().transformDeferred(ReactorExt.badSubscriberHandler(logger));
}
@Activator
public void activate() {
this.eventLoggerDisposable = healthStatuses
.retryWhen(ReactorRetriers.instrumentedReactorRetryer("EurekaContainerHealthServiceEventLogger", RETRY_INTERVAL, logger))
.subscribeOn(Schedulers.parallel())
.subscribe(
event -> logger.info("Eureka health status update: {}", event),
e -> logger.error("Unexpected error"),
() -> logger.info("Eureka health event logger terminated")
);
}
@PreDestroy
public void shutdown() {
ReactorExt.safeDispose(eventLoggerDisposable);
}
@Override
public String getName() {
return NAME;
}
@Override
public Optional<ContainerHealthStatus> findHealthStatus(String taskId) {
return jobOperations.findTaskById(taskId).map(jobAndTaskPair -> buildHealthStatus(jobAndTaskPair.getLeft(), jobAndTaskPair.getRight()));
}
@Override
public Flux<ContainerHealthEvent> events(boolean snapshot) {
Preconditions.checkArgument(!snapshot, "Snapshot state is generated by AggregatingContainerHealthService");
return healthStatuses;
}
private ContainerHealthStatus buildHealthStatus(Job<?> job, Task task) {
return ContainerHealthStatus.newBuilder()
.withTaskId(task.getId())
.withTimestamp(titusRuntime.getClock().wallTime())
.withState(takeStateOf(job, task))
.withReason(takeStateReasonOf(job, task))
.build();
}
private ContainerHealthStatus buildHealthStatus(Task task, ContainerHealthState state, String reason) {
return ContainerHealthStatus.newBuilder()
.withTaskId(task.getId())
.withTimestamp(titusRuntime.getClock().wallTime())
.withState(state)
.withReason(reason)
.build();
}
private ContainerHealthState takeStateOf(Job<?> job, Task task) {
// If it is finished, ignore Eureka status
if (task.getStatus().getState() == TaskState.Finished) {
return ContainerHealthState.Terminated;
}
List<InstanceInfo> instances = eurekaClient.getInstancesById(task.getId());
// If a job is disabled, the real Eureka state is hidden. If the container is not registered with Eureka in
// the disabled job, we also do not put any constraints here. In both cases we report it is healthy.
if (JobFunctions.isDisabled(job)) {
return ContainerHealthState.Healthy;
}
if (CollectionsExt.isNullOrEmpty(instances)) {
return ContainerHealthState.Unknown;
}
InstanceInfo instance = instances.get(0);
return instance.getStatus() == InstanceInfo.InstanceStatus.UP
? ContainerHealthState.Healthy
: ContainerHealthState.Unhealthy;
}
private String takeStateReasonOf(Job<?> job, Task task) {
List<InstanceInfo> instances = eurekaClient.getInstancesById(task.getId());
if (CollectionsExt.isNullOrEmpty(instances)) {
return JobFunctions.isDisabled(job) ? "not registered, and job disabled" : "not registered";
}
// If it is finished, ignore Eureka status
if (task.getStatus().getState() == TaskState.Finished) {
return "terminated";
}
return instances.get(0).getStatus().name();
}
private Flux<ContainerHealthEvent> handleJobManagerOrEurekaStatusUpdate(Object event, ConcurrentMap<String, ContainerHealthEvent> state) {
if (event instanceof JobManagerEvent) {
return handleJobManagerEvent((JobManagerEvent) event, state);
}
if (event instanceof EurekaEvent) {
return handleEurekaEvent((EurekaEvent) event, state);
}
return Flux.empty();
}
private Flux<ContainerHealthEvent> handleJobManagerEvent(JobManagerEvent event, ConcurrentMap<String, ContainerHealthEvent> state) {
if (event instanceof JobUpdateEvent) {
JobUpdateEvent jobUpdateEvent = (JobUpdateEvent) event;
return jobUpdateEvent.getPrevious()
.map(previous -> handleJobEnabledStatusUpdate(jobUpdateEvent.getCurrent(), previous, state))
.orElse(Flux.empty());
} else if (event instanceof TaskUpdateEvent) {
TaskUpdateEvent taskEvent = (TaskUpdateEvent) event;
return handleTaskStateUpdate(taskEvent.getCurrentJob(), taskEvent.getCurrentTask(), state).map(Flux::just).orElse(Flux.empty());
}
return Flux.empty();
}
private Flux<ContainerHealthEvent> handleEurekaEvent(EurekaEvent event, ConcurrentMap<String, ContainerHealthEvent> state) {
if (!(event instanceof CacheRefreshedEvent)) {
return Flux.empty();
}
List<Pair<Job, List<Task>>> allJobsAndTasks = jobOperations.getJobsAndTasks();
List<Task> allTasks = new ArrayList<>();
List<ContainerHealthEvent> events = new ArrayList<>();
allJobsAndTasks.forEach(jobAndTasks -> {
jobAndTasks.getRight().forEach(task -> {
handleTaskStateUpdate(jobAndTasks.getLeft(), task, state).ifPresent(events::add);
allTasks.add(task);
});
});
// Cleanup, in case we have stale entries.
Set<String> unknownTaskIds = CollectionsExt.copyAndRemove(state.keySet(), allTasks.stream().map(Task::getId).collect(Collectors.toSet()));
unknownTaskIds.forEach(taskId -> {
state.remove(taskId);
// Assume the task was terminated.
ContainerHealthStatus terminatedStatus = ContainerHealthStatus.newBuilder()
.withTaskId(taskId)
.withTimestamp(titusRuntime.getClock().wallTime())
.withState(ContainerHealthState.Terminated)
.withReason("terminated")
.build();
events.add(ContainerHealthUpdateEvent.healthChanged(terminatedStatus));
});
return Flux.fromIterable(events);
}
private Flux<ContainerHealthEvent> handleJobEnabledStatusUpdate(Job current, Job previous, ConcurrentMap<String, ContainerHealthEvent> state) {
if (!JobFunctions.isServiceJob(current)) {
return Flux.empty();
}
// Examine if a job's 'enabled' status was changed.
boolean isCurrentDisabled = JobFunctions.isDisabled(current);
if (isCurrentDisabled == JobFunctions.isDisabled(previous)) {
return Flux.empty();
}
List<Task> tasks = jobOperations.getTasks(current.getId());
List<ContainerHealthEvent> events = new ArrayList<>();
tasks.forEach(task -> handleTaskStateUpdate(current, task, state).ifPresent(events::add));
return Flux.fromIterable(events);
}
private Optional<ContainerHealthEvent> handleTaskStateUpdate(Job<?> job, Task task, ConcurrentMap<String, ContainerHealthEvent> state) {
ContainerHealthUpdateEvent lastEvent = (ContainerHealthUpdateEvent) state.get(task.getId());
if (lastEvent == null) {
return Optional.of(recordNewState(state, task, ContainerHealthEvent.healthChanged(buildHealthStatus(job, task))));
}
ContainerHealthState newTaskState = takeStateOf(job, task);
String newReason = takeStateReasonOf(job, task);
if (lastEvent.getContainerHealthStatus().getState() == newTaskState && lastEvent.getContainerHealthStatus().getReason().equals(newReason)) {
return Optional.empty();
}
return Optional.of(recordNewState(state, task, ContainerHealthEvent.healthChanged(buildHealthStatus(task, newTaskState, newReason))));
}
private ContainerHealthUpdateEvent recordNewState(ConcurrentMap<String, ContainerHealthEvent> state, Task task, ContainerHealthUpdateEvent newEvent) {
if (task.getStatus().getState() != TaskState.Finished) {
state.put(task.getId(), newEvent);
} else {
state.remove(task.getId());
}
return newEvent;
}
}
| 1,331 |
0 | Create_ds/titus-control-plane/titus-ext/eureka/src/main/java/com/netflix/titus/ext/eureka | Create_ds/titus-control-plane/titus-ext/eureka/src/main/java/com/netflix/titus/ext/eureka/common/EurekaLoadBalancer.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.eureka.common;
import java.io.Closeable;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.function.Function;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.EurekaClient;
import com.netflix.titus.common.runtime.TitusRuntime;
public class EurekaLoadBalancer implements Closeable {
private final EurekaClient eurekaClient;
private final Function<URI, String> vipExtractor;
private final TitusRuntime titusRuntime;
private final ConcurrentMap<String, SingleServiceLoadBalancer> loadBalancers = new ConcurrentHashMap<>();
public EurekaLoadBalancer(EurekaClient eurekaClient,
Function<URI, String> vipExtractor,
TitusRuntime titusRuntime) {
this.eurekaClient = eurekaClient;
this.vipExtractor = vipExtractor;
this.titusRuntime = titusRuntime;
}
@Override
public void close() {
List<SingleServiceLoadBalancer> toClose = new ArrayList<>(loadBalancers.values());
loadBalancers.clear();
toClose.forEach(SingleServiceLoadBalancer::close);
}
public Optional<InstanceInfo> chooseNext(URI eurekaUri) {
return loadBalancers.computeIfAbsent(
toEndpointId(eurekaUri),
uri -> newSingleServiceLoadBalancer(eurekaUri)
).chooseNext();
}
public void recordSuccess(URI eurekaUri, InstanceInfo instanceInfo) {
SingleServiceLoadBalancer singleLB = loadBalancers.get(toEndpointId(eurekaUri));
if (singleLB != null) {
singleLB.recordSuccess(instanceInfo);
}
}
public void recordFailure(URI eurekaUri, InstanceInfo instanceInfo) {
SingleServiceLoadBalancer singleLB = loadBalancers.get(toEndpointId(eurekaUri));
if (singleLB != null) {
singleLB.recordFailure(instanceInfo);
}
}
private String toEndpointId(URI eurekaUri) {
return eurekaUri.getHost() + ':' + eurekaUri.getPort();
}
private SingleServiceLoadBalancer newSingleServiceLoadBalancer(URI eurekaUri) {
return new SingleServiceLoadBalancer(eurekaClient, vipExtractor.apply(eurekaUri), EurekaUris.isSecure(eurekaUri), titusRuntime);
}
}
| 1,332 |
0 | Create_ds/titus-control-plane/titus-ext/eureka/src/main/java/com/netflix/titus/ext/eureka | Create_ds/titus-control-plane/titus-ext/eureka/src/main/java/com/netflix/titus/ext/eureka/common/EurekaUris.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.eureka.common;
import java.net.URI;
import java.util.List;
import com.google.common.base.Preconditions;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.StringExt;
import org.springframework.web.util.UriComponentsBuilder;
public class EurekaUris {
public static final String EUREKA_SCHEME = "eureka";
public static final String EUREKA_SECURE_PARAM = "secure";
public static URI failIfEurekaUriInvalid(URI eurekaUri) {
Preconditions.checkArgument(isEurekaScheme(eurekaUri), "Not Eureka scheme in URI: %s", eurekaUri);
Preconditions.checkArgument(StringExt.isNotEmpty(eurekaUri.getHost()), "Service name not defined in URI: %s", eurekaUri);
return eurekaUri;
}
public static String getServiceName(URI eurekaUri) {
return eurekaUri.getHost();
}
public static boolean isEurekaScheme(URI eurekaUri) {
return EUREKA_SCHEME.equalsIgnoreCase(eurekaUri.getScheme());
}
public static boolean isSecure(URI eurekaUri) {
List<String> secureValue = UriComponentsBuilder.fromUri(eurekaUri).build().getQueryParams().get(EUREKA_SECURE_PARAM);
if (CollectionsExt.isNullOrEmpty(secureValue)) {
return false;
}
String value = StringExt.safeTrim(secureValue.get(0));
return value.isEmpty() || "true".equalsIgnoreCase(value);
}
}
| 1,333 |
0 | Create_ds/titus-control-plane/titus-ext/eureka/src/main/java/com/netflix/titus/ext/eureka | Create_ds/titus-control-plane/titus-ext/eureka/src/main/java/com/netflix/titus/ext/eureka/common/SingleServiceLoadBalancer.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.eureka.common;
import java.io.Closeable;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.CacheRefreshedEvent;
import com.netflix.discovery.EurekaClient;
import com.netflix.discovery.EurekaEvent;
import com.netflix.discovery.EurekaEventListener;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.time.Clock;
/**
* Helper class to track a collection of servers registered with Eureka with properties helping to determine their health,
* as well as uniform load distribution. The load balancer implements round robin algorithm with bad node isolation.
*/
public class SingleServiceLoadBalancer implements Closeable, EurekaEventListener {
private static final int INITIAL_DELAY_AFTER_FAILURE_MS = 100;
private final Clock clock;
private final EurekaClient eurekaClient;
private final String vipAddress;
private final boolean secure;
private final AtomicReference<Map<String, EurekaInstance>> eurekaInstancesRef = new AtomicReference<>(Collections.emptyMap());
public SingleServiceLoadBalancer(EurekaClient eurekaClient,
String vipAddress,
boolean secure,
TitusRuntime titusRuntime) {
this.clock = titusRuntime.getClock();
this.eurekaClient = eurekaClient;
this.vipAddress = vipAddress;
this.secure = secure;
eurekaClient.registerEventListener(this);
refresh();
}
@Override
public void close() {
eurekaClient.unregisterEventListener(this);
}
@Override
public void onEvent(EurekaEvent event) {
if (event instanceof CacheRefreshedEvent) {
refresh();
}
}
private void refresh() {
List<InstanceInfo> instances = eurekaClient.getInstancesByVipAddress(vipAddress, secure);
if (CollectionsExt.isNullOrEmpty(instances)) {
eurekaInstancesRef.set(Collections.emptyMap());
return;
}
Map<String, EurekaInstance> newEurekaInstances = new HashMap<>();
instances.forEach(instanceInfo -> {
if (instanceInfo.getStatus() == InstanceInfo.InstanceStatus.UP) {
String id = instanceInfo.getId();
EurekaInstance previous = eurekaInstancesRef.get().get(id);
if (previous == null) {
newEurekaInstances.put(id, new EurekaInstance(instanceInfo));
} else {
previous.update(instanceInfo);
newEurekaInstances.put(id, previous);
}
}
});
eurekaInstancesRef.set(newEurekaInstances);
}
public Optional<InstanceInfo> chooseNext() {
long now = clock.wallTime();
Map<String, EurekaInstance> eurekaInstances = eurekaInstancesRef.get();
if (eurekaInstances.isEmpty()) {
return Optional.empty();
}
return chooseNextHealthy(eurekaInstances, now);
}
private Optional<InstanceInfo> chooseNextHealthy(Map<String, EurekaInstance> eurekaInstances, long now) {
long minLastRequestTimestamp = Long.MAX_VALUE;
EurekaInstance latestHealthy = null;
for (EurekaInstance eurekaInstance : eurekaInstances.values()) {
boolean disabled = eurekaInstance.getDelayUntil() > 0 && eurekaInstance.getDelayUntil() > now;
if (!disabled && eurekaInstance.getLastRequestTimestamp() < minLastRequestTimestamp) {
minLastRequestTimestamp = eurekaInstance.getLastRequestTimestamp();
latestHealthy = eurekaInstance;
}
}
if (latestHealthy != null) {
latestHealthy.recordSelection(clock.wallTime());
return Optional.of(latestHealthy.getInstanceInfo());
}
return Optional.empty();
}
public void recordSuccess(InstanceInfo instanceInfo) {
EurekaInstance eurekaInstance = eurekaInstancesRef.get().get(instanceInfo.getId());
if (eurekaInstance != null) {
eurekaInstance.recordSuccess();
}
}
public void recordFailure(InstanceInfo instanceInfo) {
EurekaInstance eurekaInstance = eurekaInstancesRef.get().get(instanceInfo.getId());
if (eurekaInstance != null) {
eurekaInstance.recordFailure(clock.wallTime());
}
}
private class EurekaInstance {
private final AtomicReference<InstanceInfo> instanceInfoRef;
private final AtomicLong lastRequestTimestampRef = new AtomicLong();
private final AtomicLong delayAfterFailureRef = new AtomicLong();
private final AtomicLong delayUntilRef = new AtomicLong();
private EurekaInstance(InstanceInfo instanceInfo) {
this.instanceInfoRef = new AtomicReference<>(instanceInfo);
}
private InstanceInfo getInstanceInfo() {
return instanceInfoRef.get();
}
private long getLastRequestTimestamp() {
return lastRequestTimestampRef.get();
}
private long getDelayUntil() {
return delayUntilRef.get();
}
private void recordSelection(long now) {
lastRequestTimestampRef.set(now);
}
private void recordSuccess() {
delayAfterFailureRef.set(0);
delayUntilRef.set(0);
}
private void recordFailure(long now) {
delayAfterFailureRef.set(delayAfterFailureRef.get() + INITIAL_DELAY_AFTER_FAILURE_MS);
delayUntilRef.set(now + delayAfterFailureRef.get());
}
private void update(InstanceInfo instanceInfo) {
instanceInfoRef.set(instanceInfo);
}
}
}
| 1,334 |
0 | Create_ds/titus-control-plane/titus-ext/eureka/src/main/java/com/netflix/titus/ext/eureka | Create_ds/titus-control-plane/titus-ext/eureka/src/main/java/com/netflix/titus/ext/eureka/resolver/EurekaHostCallerIdResolver.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.eureka.resolver;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.function.Consumer;
import java.util.function.Function;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Singleton;
import com.netflix.appinfo.AmazonInfo;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.CacheRefreshedEvent;
import com.netflix.discovery.EurekaClient;
import com.netflix.discovery.EurekaEvent;
import com.netflix.discovery.EurekaEventListener;
import com.netflix.discovery.shared.Application;
import com.netflix.titus.common.util.NetworkExt;
import com.netflix.titus.runtime.endpoint.resolver.HostCallerIdResolver;
import static com.netflix.titus.common.util.StringExt.splitByComma;
/**
*/
@Singleton
public class EurekaHostCallerIdResolver implements HostCallerIdResolver, EurekaEventListener {
public static final String OFFICE_IP_RANGES = "officeIpRanges";
private static final String UNKNOWN_APP = "UNKNOWN";
private static final String OFFICE = "OFFICE";
private final EurekaClient eurekaClient;
private volatile Map<String, String> addressToApplicationMap;
private final Function<String, Boolean> officeNetworkPredicate;
@Inject
public EurekaHostCallerIdResolver(EurekaClient eurekaClient, @Named(OFFICE_IP_RANGES) String officeIpRanges) {
this.eurekaClient = eurekaClient;
this.officeNetworkPredicate = NetworkExt.buildNetworkMatchPredicate(splitByComma(officeIpRanges));
refreshAddressCache();
eurekaClient.registerEventListener(this);
}
@Override
public Optional<String> resolve(String ipOrHostName) {
String sourceApp = null;
if (ipOrHostName != null) {
sourceApp = addressToApplicationMap.get(ipOrHostName);
if (sourceApp == null) {
sourceApp = officeNetworkPredicate.apply(ipOrHostName) ? OFFICE : UNKNOWN_APP;
}
}
return Optional.ofNullable(sourceApp);
}
@Override
public void onEvent(EurekaEvent event) {
if (event instanceof CacheRefreshedEvent) {
refreshAddressCache();
}
}
private void refreshAddressCache() {
Map<String, String> newAddressMap = new HashMap<>();
for (Application application : eurekaClient.getApplications().getRegisteredApplications()) {
application.getInstances().forEach(ii -> appendApplicationAddresses(newAddressMap, ii));
}
this.addressToApplicationMap = newAddressMap;
}
private void appendApplicationAddresses(Map<String, String> newAddressMap, InstanceInfo ii) {
String appName = ii.getAppName();
if (appName == null) {
return;
}
Consumer<String> addNonNull = address -> {
if (address != null) {
newAddressMap.put(address, appName);
}
};
if (ii.getDataCenterInfo() instanceof AmazonInfo) {
AmazonInfo amazonInfo = (AmazonInfo) ii.getDataCenterInfo();
addNonNull.accept(amazonInfo.get(AmazonInfo.MetaDataKey.localHostname));
addNonNull.accept(amazonInfo.get(AmazonInfo.MetaDataKey.localIpv4));
addNonNull.accept(amazonInfo.get(AmazonInfo.MetaDataKey.publicHostname));
addNonNull.accept(amazonInfo.get(AmazonInfo.MetaDataKey.publicIpv4));
} else {
addNonNull.accept(ii.getIPAddr());
addNonNull.accept(ii.getHostName());
}
}
}
| 1,335 |
0 | Create_ds/titus-control-plane/titus-ext/eureka/src/main/java/com/netflix/titus/ext/eureka | Create_ds/titus-control-plane/titus-ext/eureka/src/main/java/com/netflix/titus/ext/eureka/supervisor/EurekaLocalMasterReadinessResolver.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.eureka.supervisor;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.EurekaClient;
import com.netflix.discovery.EurekaEventListener;
import com.netflix.titus.api.supervisor.model.ReadinessState;
import com.netflix.titus.api.supervisor.model.ReadinessStatus;
import com.netflix.titus.api.supervisor.service.LocalMasterReadinessResolver;
import com.netflix.titus.common.runtime.TitusRuntime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.FluxSink;
@Singleton
public class EurekaLocalMasterReadinessResolver implements LocalMasterReadinessResolver {
private static final Logger logger = LoggerFactory.getLogger(EurekaLocalMasterReadinessResolver.class);
private final EurekaClient eurekaClient;
private final String instanceId;
private final TitusRuntime titusRuntime;
@Inject
public EurekaLocalMasterReadinessResolver(EurekaClient eurekaClient, EurekaSupervisorConfiguration configuration, TitusRuntime titusRuntime) {
this.eurekaClient = eurekaClient;
this.instanceId = configuration.getInstanceId();
this.titusRuntime = titusRuntime;
}
@Override
public Flux<ReadinessStatus> observeLocalMasterReadinessUpdates() {
return Flux.create(emitter -> {
AtomicReference<ReadinessStatus> last = new AtomicReference<>(fetchCurrent());
// Emit immediately known state
emitter.next(last.get());
EurekaEventListener listener = event -> {
try {
ReadinessStatus next = fetchCurrent();
emitter.next(next);
last.set(next);
} catch (Exception e) {
titusRuntime.getCodeInvariants().unexpectedError(
"EurekaClient event processing error: event=%s, error=%s", event, e.getMessage()
);
logger.debug("Unexpected failure", e);
}
};
// There is a delay between the first fetchCurrent() operation, and the listener registration, in which
// case it is possible to miss the first notification. It is ok, as the notifications are delivered at
// a regular interval by Eureka client.
eurekaClient.registerEventListener(listener);
emitter.onCancel(() -> eurekaClient.unregisterEventListener(listener));
}, FluxSink.OverflowStrategy.LATEST);
}
private ReadinessStatus fetchCurrent() {
List<InstanceInfo> instances = eurekaClient.getInstancesById(instanceId);
if (instances.isEmpty()) {
return ReadinessStatus.newBuilder()
.withState(ReadinessState.NotReady)
.withMessage("TitusMaster not registered with Eureka")
.withTimestamp(titusRuntime.getClock().wallTime())
.build();
}
InstanceInfo instance = instances.get(0);
ReadinessStatus.Builder statusBuilder = ReadinessStatus.newBuilder().withTimestamp(titusRuntime.getClock().wallTime());
switch (instance.getStatus()) {
case STARTING:
statusBuilder
.withState(ReadinessState.NotReady)
.withMessage("TitusMaster is not started yet");
break;
case UP:
statusBuilder
.withState(ReadinessState.Enabled)
.withMessage("TitusMaster is UP in Eureka");
break;
case DOWN:
statusBuilder
.withState(ReadinessState.Disabled)
.withMessage("TitusMaster is DOWN in Eureka");
break;
case OUT_OF_SERVICE:
statusBuilder
.withState(ReadinessState.Disabled)
.withMessage("TitusMaster is OUT_OF_SERVICE in Eureka");
break;
case UNKNOWN:
default:
statusBuilder
.withState(ReadinessState.Disabled)
.withMessage("TitusMaster status is unknown by Eureka");
break;
}
return statusBuilder.build();
}
}
| 1,336 |
0 | Create_ds/titus-control-plane/titus-ext/eureka/src/main/java/com/netflix/titus/ext/eureka | Create_ds/titus-control-plane/titus-ext/eureka/src/main/java/com/netflix/titus/ext/eureka/supervisor/EurekaSupervisorConfiguration.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.eureka.supervisor;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
@Configuration(prefix = "titusMaster.ext.eureka.supervisor")
public interface EurekaSupervisorConfiguration {
@DefaultValue("myInstanceId")
String getInstanceId();
}
| 1,337 |
0 | Create_ds/titus-control-plane/titus-ext/eureka/src/main/java/com/netflix/titus/ext/eureka | Create_ds/titus-control-plane/titus-ext/eureka/src/main/java/com/netflix/titus/ext/eureka/supervisor/EurekaSupervisorModule.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.eureka.supervisor;
import javax.inject.Singleton;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
import com.netflix.archaius.ConfigProxyFactory;
import com.netflix.titus.api.supervisor.service.LocalMasterReadinessResolver;
public class EurekaSupervisorModule extends AbstractModule {
@Override
protected void configure() {
bind(LocalMasterReadinessResolver.class).to(EurekaLocalMasterReadinessResolver.class);
}
@Provides
@Singleton
public EurekaSupervisorConfiguration getEurekaSupervisorConfiguration(ConfigProxyFactory factory) {
return factory.newProxy(EurekaSupervisorConfiguration.class);
}
}
| 1,338 |
0 | Create_ds/titus-control-plane/titus-ext/eureka/src/main/java/com/netflix/titus/ext/eureka | Create_ds/titus-control-plane/titus-ext/eureka/src/main/java/com/netflix/titus/ext/eureka/spring/EurekaLoadBalancingExchangeFilterFunction.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.eureka.spring;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.function.Function;
import javax.ws.rs.core.UriBuilder;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.EurekaClient;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.StringExt;
import com.netflix.titus.ext.eureka.common.EurekaLoadBalancer;
import com.netflix.titus.ext.eureka.common.EurekaUris;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.http.HttpStatus;
import org.springframework.web.reactive.function.client.ClientRequest;
import org.springframework.web.reactive.function.client.ClientResponse;
import org.springframework.web.reactive.function.client.ExchangeFilterFunction;
import org.springframework.web.reactive.function.client.ExchangeFunction;
import reactor.core.publisher.Mono;
import static java.util.Arrays.asList;
public class EurekaLoadBalancingExchangeFilterFunction implements ExchangeFilterFunction {
private static final Logger logger = LoggerFactory.getLogger(EurekaLoadBalancingExchangeFilterFunction.class);
private static final String EUREKA_SCHEMA = "eureka";
private final EurekaLoadBalancer loadBalancer;
public EurekaLoadBalancingExchangeFilterFunction(EurekaClient eurekaClient,
Function<URI, String> vipExtractor,
TitusRuntime titusRuntime) {
this.loadBalancer = new EurekaLoadBalancer(eurekaClient, vipExtractor, titusRuntime);
}
@Override
public Mono<ClientResponse> filter(ClientRequest request, ExchangeFunction next) {
URI eurekaUri;
try {
eurekaUri = EurekaUris.failIfEurekaUriInvalid(request.url());
} catch (IllegalArgumentException e) {
logger.warn(e.getMessage());
logger.debug("Stack trace", e);
return Mono.just(ClientResponse.create(HttpStatus.SERVICE_UNAVAILABLE).body(e.getMessage()).build());
}
return loadBalancer.chooseNext(eurekaUri)
.map(instance -> doExecute(instance, request, next))
.orElseGet(() -> doFailOnNoInstance(eurekaUri));
}
private Mono<ClientResponse> doExecute(InstanceInfo instance, ClientRequest request, ExchangeFunction next) {
URI eurekaUri = request.url();
URI rewrittenURI = rewrite(eurekaUri, instance);
ClientRequest newRequest = ClientRequest.create(request.method(), rewrittenURI)
.headers(headers -> headers.addAll(request.headers()))
.cookies(cookies -> cookies.addAll(request.cookies()))
.attributes(attributes -> attributes.putAll(request.attributes()))
.body(request.body()).build();
return next.exchange(newRequest)
.doOnNext(response -> {
if (response.statusCode().is5xxServerError()) {
loadBalancer.recordFailure(eurekaUri, instance);
} else {
loadBalancer.recordSuccess(eurekaUri, instance);
}
})
.doOnError(error -> loadBalancer.recordFailure(eurekaUri, instance));
}
private Mono<ClientResponse> doFailOnNoInstance(URI eurekaUri) {
return Mono.just(ClientResponse.create(HttpStatus.SERVICE_UNAVAILABLE)
.body("Server pool empty for eurekaUri=" + eurekaUri)
.build()
);
}
@VisibleForTesting
static URI rewrite(URI original, InstanceInfo instance) {
URI effectiveUri;
if (original.getScheme().equals(EUREKA_SCHEMA)) {
boolean secure = StringExt.isNotEmpty(original.getQuery()) && asList(original.getQuery().split("&")).contains("secure=true");
try {
effectiveUri = new URI((secure ? "https" : "http") + original.toString().substring(EUREKA_SCHEMA.length()));
} catch (URISyntaxException e) {
effectiveUri = original;
}
} else {
effectiveUri = original;
}
return UriBuilder.fromUri(effectiveUri).host(instance.getIPAddr()).build();
}
}
| 1,339 |
0 | Create_ds/titus-control-plane/titus-ext/jooq-relocation/src/test/java/com/netflix/titus/ext/jooq | Create_ds/titus-control-plane/titus-ext/jooq-relocation/src/test/java/com/netflix/titus/ext/jooq/relocation/JooqTaskRelocationStoreTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.jooq.relocation;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.ext.jooq.JooqContext;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.context.annotation.Bean;
import org.springframework.core.env.Environment;
import org.springframework.test.annotation.DirtiesContext;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import reactor.test.StepVerifier;
import static org.assertj.core.api.Assertions.assertThat;
import static org.springframework.test.annotation.DirtiesContext.ClassMode.AFTER_EACH_TEST_METHOD;
@RunWith(SpringJUnit4ClassRunner.class)
@SpringBootTest(
properties = {
"spring.application.name=test",
"titus.ext.jooq.relocation.inMemoryDb=true"
},
classes = {
JooqRelocationContextComponent.class,
JooqTaskRelocationStoreTest.class,
}
)
@DirtiesContext(classMode = AFTER_EACH_TEST_METHOD)
public class JooqTaskRelocationStoreTest {
@Bean
public TitusRuntime getTitusRuntime(Environment environment) {
return TitusRuntimes.internal(environment);
}
@Autowired
public JooqContext jooqContext;
private JooqTaskRelocationStore store;
@Before
public void setUp() {
this.store = newStore();
}
@After
public void tearDown() {
StepVerifier.create(store.clearStore()).verifyComplete();
}
@Test
public void testRelocationPlanStoreCrud() {
List<TaskRelocationPlan> plans = newRelocationPlans(1);
TaskRelocationPlan plan = plans.get(0);
// Create
Map<String, Optional<Throwable>> result = store.createOrUpdateTaskRelocationPlans(plans).block();
assertThat(result).hasSize(1);
assertThat(result.get(plan.getTaskId())).isEmpty();
// Reboot (to force reload from the database).
this.store = newStore();
// Read
assertThat(store.getAllTaskRelocationPlans().block()).hasSize(1);
assertThat(store.getAllTaskRelocationPlans().block().get(plan.getTaskId())).isEqualTo(plan);
// Update
TaskRelocationPlan updatedPlan = plan.toBuilder().withReasonMessage("Updated...").build();
Map<String, Optional<Throwable>> updatedPlanResult = store.createOrUpdateTaskRelocationPlans(Collections.singletonList(updatedPlan)).block();
assertThat(updatedPlanResult).hasSize(1);
assertThat(store.getAllTaskRelocationPlans().block().get(plan.getTaskId())).isEqualTo(updatedPlan);
// Delete
Map<String, Optional<Throwable>> deleteResult = store.removeTaskRelocationPlans(Collections.singleton(plan.getTaskId())).block();
assertThat(deleteResult).hasSize(1);
// Reboot
this.store = newStore();
assertThat(store.getAllTaskRelocationPlans().block()).hasSize(0);
}
@Test
public void testStoringLargeAmountOfPlans() {
List<TaskRelocationPlan> plans = newRelocationPlans(10_000);
// Create
Map<String, Optional<Throwable>> result = store.createOrUpdateTaskRelocationPlans(plans).block();
assertThat(result).hasSize(plans.size());
long failures = result.values().stream().filter(Optional::isPresent).count();
assertThat(failures).isZero();
// Reboot
this.store = newStore();
assertThat(store.getAllTaskRelocationPlans().block()).hasSize(10_000);
}
private JooqTaskRelocationStore newStore() {
JooqTaskRelocationStore store = new JooqTaskRelocationStore(jooqContext.getDslContext());
store.activate();
return store;
}
private List<TaskRelocationPlan> newRelocationPlans(int count) {
List<TaskRelocationPlan> plans = new ArrayList<>();
for (int i = 0; i < count; i++) {
plans.add(TaskRelocationPlan.newBuilder()
.withTaskId("task" + i)
.withReason(TaskRelocationPlan.TaskRelocationReason.TaskMigration)
.withReasonMessage("Test...")
.withRelocationTime(123)
.build()
);
}
return plans;
}
} | 1,340 |
0 | Create_ds/titus-control-plane/titus-ext/jooq-relocation/src/test/java/com/netflix/titus/ext/jooq | Create_ds/titus-control-plane/titus-ext/jooq-relocation/src/test/java/com/netflix/titus/ext/jooq/relocation/JooqTaskRelocationResultStoreTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.jooq.relocation;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.api.relocation.model.TaskRelocationStatus;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.ext.jooq.JooqContext;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.context.annotation.Bean;
import org.springframework.core.env.Environment;
import org.springframework.test.annotation.DirtiesContext;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import reactor.test.StepVerifier;
import static org.assertj.core.api.Assertions.assertThat;
import static org.springframework.test.annotation.DirtiesContext.ClassMode.AFTER_EACH_TEST_METHOD;
@RunWith(SpringJUnit4ClassRunner.class)
@SpringBootTest(
properties = {
"spring.application.name=test",
"titus.ext.jooq.relocation.inMemoryDb=true"
},
classes = {
JooqRelocationContextComponent.class,
JooqTaskRelocationResultStoreTest.class,
}
)
@DirtiesContext(classMode = AFTER_EACH_TEST_METHOD)
public class JooqTaskRelocationResultStoreTest {
@Bean
public TitusRuntime getTitusRuntime(Environment environment) {
return TitusRuntimes.internal(environment);
}
@Autowired
public TitusRuntime titusRuntime;
@Autowired
public JooqContext jooqContext;
@Autowired
public JooqRelocationConfiguration configuration;
private JooqTaskRelocationResultStore store;
@Before
public void setUp() {
this.store = newStore();
}
@After
public void tearDown() {
StepVerifier.create(store.clearStore()).verifyComplete();
}
@Test
public void testRelocationStatusStoreCrud() {
List<TaskRelocationStatus> statusList = newRelocationStatuses("task", 1, System.currentTimeMillis());
TaskRelocationStatus status = statusList.get(0);
// Create
Map<String, Optional<Throwable>> result = store.createTaskRelocationStatuses(statusList).block();
assertThat(result).hasSize(1);
assertThat(result.get(status.getTaskId())).isEmpty();
// Reboot (to force reload from the database).
this.store = newStore();
// Read
List<TaskRelocationStatus> statusListRead = store.getTaskRelocationStatusList(status.getTaskId()).block();
assertThat(statusListRead).hasSize(1);
assertThat(statusListRead.get(0)).isEqualTo(status);
// Update
TaskRelocationStatus updatedStatus = status.toBuilder().withStatusMessage("Updated...").build();
Map<String, Optional<Throwable>> updatedResult = store.createTaskRelocationStatuses(Collections.singletonList(updatedStatus)).block();
assertThat(updatedResult).hasSize(1);
assertThat(store.getTaskRelocationStatusList(status.getTaskId()).block().get(0)).isEqualTo(updatedStatus);
// Reboot (to force reload from the database).
this.store = newStore();
// Read
assertThat(store.getTaskRelocationStatusList(status.getTaskId()).block().get(0)).isEqualTo(updatedStatus);
}
@Test
public void testStoringLargeAmountOfStatuses() {
List<TaskRelocationStatus> statusList = newRelocationStatuses("task", 10_000, System.currentTimeMillis());
// Create
Map<String, Optional<Throwable>> result = store.createTaskRelocationStatuses(statusList).block();
assertThat(result).hasSize(statusList.size());
long failures = result.values().stream().filter(Optional::isPresent).count();
assertThat(failures).isZero();
}
@Test
public void testGC() {
long now = System.currentTimeMillis();
List<TaskRelocationStatus> statusList = CollectionsExt.merge(
newRelocationStatuses("old", 1, now - 3_600_000),
newRelocationStatuses("new", 1, now - 60_000)
);
store.createTaskRelocationStatuses(statusList).block();
JooqTaskRelocationGC gc = new JooqTaskRelocationGC(configuration, jooqContext.getDslContext(), store, titusRuntime);
int removed = gc.removeExpiredData(now - 3_000_000);
assertThat(removed).isEqualTo(1);
List<TaskRelocationStatus> oldTaskStatus = store.getTaskRelocationStatusList("old0").block();
assertThat(oldTaskStatus).isEmpty();
List<TaskRelocationStatus> newTaskStatus = store.getTaskRelocationStatusList("new0").block();
assertThat(newTaskStatus).hasSize(1);
}
private JooqTaskRelocationResultStore newStore() {
return new JooqTaskRelocationResultStore(jooqContext.getDslContext(), titusRuntime);
}
private List<TaskRelocationStatus> newRelocationStatuses(String taskPrefix, int count, long executionTime) {
List<TaskRelocationStatus> result = new ArrayList<>();
for (int i = 0; i < count; i++) {
result.add(TaskRelocationStatus.newBuilder()
.withTaskId(taskPrefix + i)
.withState(TaskRelocationStatus.TaskRelocationState.Success)
.withStatusCode("status123")
.withStatusMessage("statusMessage123")
.withTimestamp(executionTime)
.withTaskRelocationPlan(TaskRelocationPlan.newBuilder()
.withTaskId("task" + i)
.withReason(TaskRelocationPlan.TaskRelocationReason.TaskMigration)
.withReasonMessage("Test...")
.withRelocationTime(123)
.build()
)
.build()
);
}
return result;
}
} | 1,341 |
0 | Create_ds/titus-control-plane/titus-ext/jooq-relocation/src/test/java/com/netflix/titus/ext/jooq | Create_ds/titus-control-plane/titus-ext/jooq-relocation/src/test/java/com/netflix/titus/ext/jooq/relocation/JooqRelocationComponentTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.jooq.relocation;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationResultStore;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationStore;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.context.annotation.Bean;
import org.springframework.core.env.Environment;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
@RunWith(SpringJUnit4ClassRunner.class)
@SpringBootTest(
properties = {
"spring.application.name=test",
"titus.ext.jooq.relocation.inMemoryDb=true"
},
classes = {
JooqRelocationContextComponent.class,
JooqRelocationComponent.class,
JooqRelocationComponentTest.class,
}
)
public class JooqRelocationComponentTest {
@Bean
public TitusRuntime getTitusRuntime(Environment environment) {
return TitusRuntimes.internal(environment);
}
@Autowired
public TaskRelocationStore relocationStore;
@Autowired
public TaskRelocationResultStore resultStore;
@Test
public void testDependencyOnRelocationSchemaManager() {
// Nothing to do. We test the spring initialization order here only.
}
}
| 1,342 |
0 | Create_ds/titus-control-plane/titus-ext/jooq-relocation/src/main/java/com/netflix/titus/ext/jooq | Create_ds/titus-control-plane/titus-ext/jooq-relocation/src/main/java/com/netflix/titus/ext/jooq/relocation/RelocationSchemaManager.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.jooq.relocation;
import javax.inject.Singleton;
import com.netflix.titus.ext.jooq.JooqConfiguration;
import com.netflix.titus.ext.jooq.JooqContext;
import org.flywaydb.core.Flyway;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Singleton
public class RelocationSchemaManager {
private static final Logger logger = LoggerFactory.getLogger(RelocationSchemaManager.class);
public RelocationSchemaManager(JooqConfiguration configuration, JooqContext jooqContext) {
if (configuration.isCreateSchemaIfNotExist()) {
logger.info("Creating/migrating task relocation DB schema...");
Flyway flyway = Flyway.configure()
.schemas("relocation")
.locations("classpath:db/migration/relocation")
.dataSource(jooqContext.getDataSource())
.load();
flyway.migrate();
}
}
}
| 1,343 |
0 | Create_ds/titus-control-plane/titus-ext/jooq-relocation/src/main/java/com/netflix/titus/ext/jooq | Create_ds/titus-control-plane/titus-ext/jooq-relocation/src/main/java/com/netflix/titus/ext/jooq/relocation/JooqRelocationConfiguration.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.jooq.relocation;
import java.time.Duration;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
@Configuration(prefix = "titus.ext.jooq.relocation")
public interface JooqRelocationConfiguration {
/**
* Task relocation table data retention time.
*/
@DefaultValue("14d")
Duration getRetentionTime();
@DefaultValue("5m")
Duration getGcInterval();
@DefaultValue("1000")
int getGcRowLimit();
}
| 1,344 |
0 | Create_ds/titus-control-plane/titus-ext/jooq-relocation/src/main/java/com/netflix/titus/ext/jooq | Create_ds/titus-control-plane/titus-ext/jooq-relocation/src/main/java/com/netflix/titus/ext/jooq/relocation/JooqTaskRelocationStore.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.jooq.relocation;
import java.sql.Timestamp;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan.TaskRelocationReason;
import com.netflix.titus.ext.jooq.JooqUtils;
import com.netflix.titus.ext.jooq.relocation.tables.records.RelocationPlanRecord;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationStore;
import org.jooq.DSLContext;
import org.jooq.Delete;
import org.jooq.Result;
import org.jooq.StoreQuery;
import reactor.core.publisher.Mono;
import reactor.core.publisher.MonoProcessor;
@Singleton
public class JooqTaskRelocationStore implements TaskRelocationStore {
private final DSLContext dslContext;
private final ConcurrentMap<String, TaskRelocationPlan> plansByTaskId = new ConcurrentHashMap<>();
@Inject
public JooqTaskRelocationStore(DSLContext dslContext) {
this.dslContext = dslContext;
}
@Override
public void activate() {
load();
}
@VisibleForTesting
Mono<Void> clearStore() {
return JooqUtils.executeAsyncMono(() -> dslContext.truncateTable(Relocation.RELOCATION.RELOCATION_PLAN).execute(), dslContext).then();
}
private void load() {
Result<RelocationPlanRecord> allRows = dslContext.selectFrom(Relocation.RELOCATION.RELOCATION_PLAN).fetch();
for (RelocationPlanRecord record : allRows) {
plansByTaskId.put(
record.getTaskId(),
TaskRelocationPlan.newBuilder()
.withTaskId(record.getTaskId())
.withReason(TaskRelocationReason.valueOf(record.getReasonCode()))
.withReasonMessage(record.getReasonMessage())
.withDecisionTime(record.getDecisionTime().getTime())
.withRelocationTime(record.getRelocationTime().getTime())
.build()
);
}
}
@Override
public Mono<Map<String, Optional<Throwable>>> createOrUpdateTaskRelocationPlans(List<TaskRelocationPlan> taskRelocationPlans) {
if (taskRelocationPlans.isEmpty()) {
return Mono.empty();
}
return Mono.defer(() -> {
List<StoreQuery<RelocationPlanRecord>> queries = taskRelocationPlans.stream().map(this::newCreateOrUpdateQuery).collect(Collectors.toList());
CompletionStage<int[]> asyncAction = JooqUtils.executeAsync(() ->
dslContext.batch(queries).execute(), dslContext);
MonoProcessor<Map<String, Optional<Throwable>>> callerProcessor = MonoProcessor.create();
asyncAction.handle((result, error) -> {
Map<String, Optional<Throwable>> resultMap = new HashMap<>();
if (error == null) {
taskRelocationPlans.forEach(p -> {
resultMap.put(p.getTaskId(), Optional.empty());
plansByTaskId.put(p.getTaskId(), p);
});
callerProcessor.onNext(resultMap);
} else {
callerProcessor.onError(error);
}
return null;
});
return callerProcessor;
});
}
@Override
public Mono<Map<String, TaskRelocationPlan>> getAllTaskRelocationPlans() {
return Mono.just(Collections.unmodifiableMap(plansByTaskId));
}
@Override
public Mono<Map<String, Optional<Throwable>>> removeTaskRelocationPlans(Set<String> toRemove) {
if (toRemove.isEmpty()) {
return Mono.empty();
}
return Mono.defer(() -> {
List<Delete<RelocationPlanRecord>> deletes = toRemove.stream()
.filter(plansByTaskId::containsKey)
.map(this::newDelete)
.collect(Collectors.toList());
CompletionStage<int[]> asyncAction = JooqUtils.executeAsync(() ->
dslContext.batch(deletes).execute(), dslContext);
MonoProcessor<Map<String, Optional<Throwable>>> callerProcessor = MonoProcessor.create();
asyncAction.handle((result, error) -> {
if (error == null) {
toRemove.forEach(plansByTaskId::remove);
Map<String, Optional<Throwable>> resultMap = toRemove.stream()
.collect(Collectors.toMap(taskId -> taskId, taskId -> Optional.empty()));
callerProcessor.onNext(resultMap);
} else {
callerProcessor.onError(error);
}
return null;
});
return callerProcessor;
});
}
private StoreQuery<RelocationPlanRecord> newCreateOrUpdateQuery(TaskRelocationPlan relocationPlan) {
StoreQuery<RelocationPlanRecord> storeQuery;
if (plansByTaskId.containsKey(relocationPlan.getTaskId())) {
storeQuery = dslContext.updateQuery(Relocation.RELOCATION.RELOCATION_PLAN);
} else {
storeQuery = dslContext.insertQuery(Relocation.RELOCATION.RELOCATION_PLAN);
storeQuery.addValue(Relocation.RELOCATION.RELOCATION_PLAN.TASK_ID, relocationPlan.getTaskId());
}
storeQuery.addValue(Relocation.RELOCATION.RELOCATION_PLAN.REASON_CODE, relocationPlan.getReason().name());
storeQuery.addValue(Relocation.RELOCATION.RELOCATION_PLAN.REASON_MESSAGE, relocationPlan.getReasonMessage());
storeQuery.addValue(Relocation.RELOCATION.RELOCATION_PLAN.DECISION_TIME, new Timestamp(relocationPlan.getDecisionTime()));
storeQuery.addValue(Relocation.RELOCATION.RELOCATION_PLAN.RELOCATION_TIME, new Timestamp(relocationPlan.getRelocationTime()));
return storeQuery;
}
private Delete<RelocationPlanRecord> newDelete(String taskId) {
return dslContext.delete(Relocation.RELOCATION.RELOCATION_PLAN).where(Relocation.RELOCATION.RELOCATION_PLAN.TASK_ID.eq(taskId));
}
}
| 1,345 |
0 | Create_ds/titus-control-plane/titus-ext/jooq-relocation/src/main/java/com/netflix/titus/ext/jooq | Create_ds/titus-control-plane/titus-ext/jooq-relocation/src/main/java/com/netflix/titus/ext/jooq/relocation/JooqRelocationComponent.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.jooq.relocation;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationResultStore;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationStore;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationStoreActivator;
import org.jooq.DSLContext;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.DependsOn;
import org.springframework.context.annotation.Import;
@Configuration
@Import({JooqTaskRelocationGC.class})
@ConditionalOnProperty(name = "titus.ext.jooq.relocation.enabled", havingValue = "true", matchIfMissing = true)
public class JooqRelocationComponent {
@Bean
@DependsOn({"relocationSchemaManager"})
public TaskRelocationStore getTaskRelocationStore(DSLContext dslContext) {
return new JooqTaskRelocationStore(dslContext);
}
@Bean
@DependsOn({"relocationSchemaManager"})
public TaskRelocationResultStore getTaskRelocationResultStore(DSLContext dslContext,
TitusRuntime titusRuntime) {
return new JooqTaskRelocationResultStore(dslContext, titusRuntime);
}
@Bean
public TaskRelocationStoreActivator getTaskRelocationStoreActivator(JooqTaskRelocationStore relocationStore,
JooqTaskRelocationGC relocationGC) {
return new TaskRelocationStoreActivator() {
@Override
public void activate() {
relocationStore.activate();
relocationGC.activate();
}
};
}
}
| 1,346 |
0 | Create_ds/titus-control-plane/titus-ext/jooq-relocation/src/main/java/com/netflix/titus/ext/jooq | Create_ds/titus-control-plane/titus-ext/jooq-relocation/src/main/java/com/netflix/titus/ext/jooq/relocation/JooqTaskRelocationResultStore.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.jooq.relocation;
import java.sql.Timestamp;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.api.relocation.model.TaskRelocationStatus;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.cache.Cache;
import com.netflix.titus.common.util.cache.Caches;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.ext.jooq.JooqUtils;
import com.netflix.titus.ext.jooq.relocation.tables.records.RelocationStatusRecord;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationResultStore;
import org.jooq.Configuration;
import org.jooq.DSLContext;
import org.jooq.Result;
import org.jooq.StoreQuery;
import reactor.core.publisher.Mono;
import reactor.core.publisher.MonoProcessor;
@Singleton
public class JooqTaskRelocationResultStore implements TaskRelocationResultStore {
private static final int MAX_TEXT_LENGTH = 2048;
private final DSLContext dslContext;
private final Cache<String, TaskRelocationStatus> statusesByTaskId;
@Inject
public JooqTaskRelocationResultStore(DSLContext dslContext, TitusRuntime titusRuntime) {
this.dslContext = dslContext;
this.statusesByTaskId = Caches.instrumentedCacheWithMaxSize(
100_000,
"titus.ext.jooq.relocationResultStore",
titusRuntime.getRegistry()
);
}
@VisibleForTesting
Mono<Void> clearStore() {
return JooqUtils.executeAsyncMono(() -> dslContext.truncateTable(Relocation.RELOCATION.RELOCATION_STATUS).execute(), dslContext).then();
}
@Override
public Mono<Map<String, Optional<Throwable>>> createTaskRelocationStatuses(List<TaskRelocationStatus> taskRelocationStatuses) {
return Mono.defer(() -> {
CompletionStage<int[]> asyncAction = JooqUtils.executeAsync(() -> {
loadToCache(findNotCached(taskRelocationStatuses), dslContext.configuration());
List<StoreQuery<RelocationStatusRecord>> queries = taskRelocationStatuses.stream()
.map(this::newCreateOrUpdateQuery)
.collect(Collectors.toList());
return dslContext
.batch(queries)
.execute();
}, dslContext);
MonoProcessor<Map<String, Optional<Throwable>>> callerProcessor = MonoProcessor.create();
asyncAction.handle((result, error) -> {
Map<String, Optional<Throwable>> resultMap = new HashMap<>();
if (error == null) {
taskRelocationStatuses.forEach(p -> {
resultMap.put(p.getTaskId(), Optional.empty());
statusesByTaskId.put(p.getTaskId(), p);
});
callerProcessor.onNext(resultMap);
} else {
callerProcessor.onError(error);
}
return null;
});
return callerProcessor;
});
}
@Override
public Mono<List<TaskRelocationStatus>> getTaskRelocationStatusList(String taskId) {
return Mono.defer(() -> {
TaskRelocationStatus status = statusesByTaskId.getIfPresent(taskId);
if (status != null) {
return Mono.just(Collections.singletonList(status));
}
CompletionStage<Void> asyncAction = JooqUtils.executeAsync(() -> {
loadToCache(Collections.singleton(taskId), dslContext.configuration());
return null;
}, dslContext);
MonoProcessor<List<TaskRelocationStatus>> callerProcessor = MonoProcessor.create();
asyncAction.handle((result, error) -> {
if (error == null) {
TaskRelocationStatus loadedStatus = statusesByTaskId.getIfPresent(taskId);
callerProcessor.onNext(loadedStatus == null ? Collections.emptyList() : Collections.singletonList(loadedStatus));
} else {
callerProcessor.onError(error);
}
return null;
});
return callerProcessor;
});
}
/**
* Remove from cache garbage collected entries.
*/
void removeFromCache(List<Pair<String, Long>> toRemove) {
toRemove.forEach(p -> {
String taskId = p.getLeft();
long timestamp = p.getRight();
TaskRelocationStatus status = statusesByTaskId.getIfPresent(taskId);
if (status != null && status.getTimestamp() == timestamp) {
statusesByTaskId.invalidate(taskId);
}
});
}
private Set<String> findNotCached(List<TaskRelocationStatus> taskRelocationStatuses) {
return taskRelocationStatuses.stream()
.map(TaskRelocationStatus::getTaskId)
.filter(taskId -> statusesByTaskId.getIfPresent(taskId) == null)
.collect(Collectors.toSet());
}
private void loadToCache(Set<String> notCached, Configuration configuration) {
Result<RelocationStatusRecord> loaded = configuration.dsl()
.selectFrom(Relocation.RELOCATION.RELOCATION_STATUS)
.where(Relocation.RELOCATION.RELOCATION_STATUS.TASK_ID.in(notCached)).fetch();
loaded.forEach(record ->
statusesByTaskId.put(record.getTaskId(),
TaskRelocationStatus.newBuilder()
.withTaskId(record.getTaskId())
.withState(TaskRelocationStatus.TaskRelocationState.valueOf(record.getRelocationState()))
.withStatusCode(record.getStatusCode())
.withStatusMessage(record.getStatusMessage())
.withTimestamp(record.getRelocationExecutionTime().getTime())
.withTaskRelocationPlan(TaskRelocationPlan.newBuilder()
.withTaskId(record.getTaskId())
.withReason(TaskRelocationPlan.TaskRelocationReason.valueOf(record.getReasonCode()))
.withReasonMessage(record.getReasonMessage())
.withDecisionTime(record.getRelocationDecisionTime().getTime())
.withRelocationTime(record.getRelocationPlanTime().getTime())
.build()
)
.withTimestamp(record.getRelocationExecutionTime().getTime())
.build()
));
}
private StoreQuery<RelocationStatusRecord> newCreateOrUpdateQuery(TaskRelocationStatus relocationStatus) {
StoreQuery<RelocationStatusRecord> storeQuery;
if (statusesByTaskId.getIfPresent(relocationStatus.getTaskId()) != null) {
storeQuery = dslContext.updateQuery(Relocation.RELOCATION.RELOCATION_STATUS);
} else {
storeQuery = dslContext.insertQuery(Relocation.RELOCATION.RELOCATION_STATUS);
storeQuery.addValue(Relocation.RELOCATION.RELOCATION_STATUS.TASK_ID, relocationStatus.getTaskId());
}
storeQuery.addValue(Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_STATE, relocationStatus.getState().name());
storeQuery.addValue(Relocation.RELOCATION.RELOCATION_STATUS.STATUS_CODE, relocationStatus.getStatusCode());
storeQuery.addValue(Relocation.RELOCATION.RELOCATION_STATUS.STATUS_MESSAGE, toLengthLimitedVarchar(relocationStatus.getStatusMessage()));
storeQuery.addValue(Relocation.RELOCATION.RELOCATION_STATUS.REASON_CODE, relocationStatus.getTaskRelocationPlan().getReason().name());
storeQuery.addValue(Relocation.RELOCATION.RELOCATION_STATUS.REASON_MESSAGE, toLengthLimitedVarchar(relocationStatus.getTaskRelocationPlan().getReasonMessage()));
storeQuery.addValue(Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_DECISION_TIME, new Timestamp(relocationStatus.getTaskRelocationPlan().getDecisionTime()));
storeQuery.addValue(Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_PLAN_TIME, new Timestamp(relocationStatus.getTaskRelocationPlan().getRelocationTime()));
storeQuery.addValue(Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_EXECUTION_TIME, new Timestamp(relocationStatus.getTimestamp()));
return storeQuery;
}
private String toLengthLimitedVarchar(String text) {
return text.length() <= MAX_TEXT_LENGTH ? text : text.substring(0, MAX_TEXT_LENGTH);
}
}
| 1,347 |
0 | Create_ds/titus-control-plane/titus-ext/jooq-relocation/src/main/java/com/netflix/titus/ext/jooq | Create_ds/titus-control-plane/titus-ext/jooq-relocation/src/main/java/com/netflix/titus/ext/jooq/relocation/JooqTaskRelocationGC.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.jooq.relocation;
import java.sql.Timestamp;
import java.time.Duration;
import java.time.Instant;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Stopwatch;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Gauge;
import com.netflix.titus.api.common.LeaderActivationListener;
import com.netflix.titus.common.framework.scheduler.ScheduleReference;
import com.netflix.titus.common.framework.scheduler.model.ScheduleDescriptor;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.Evaluators;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.supplementary.relocation.store.TaskRelocationResultStore;
import org.jooq.DSLContext;
import org.jooq.Record2;
import org.jooq.Result;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Task relocation garbage collector. Removes entries from the relocation status table older than a configurable
* amount of time.
*/
@Singleton
public class JooqTaskRelocationGC implements LeaderActivationListener {
public static String METRIC_ROOT = "titus.relocation.gc.";
private static final Logger logger = LoggerFactory.getLogger(JooqTaskRelocationGC.class);
private final JooqRelocationConfiguration configuration;
private final DSLContext dslContext;
private final JooqTaskRelocationResultStore relocationResultStore;
private final TitusRuntime titusRuntime;
private ScheduleReference scheduleRef;
private final Gauge allRowsGauge;
private final Gauge expiredRowsGauge;
private final Counter gcCounter;
@Inject
public JooqTaskRelocationGC(JooqRelocationConfiguration configuration,
DSLContext dslContext,
TaskRelocationResultStore relocationResultStore,
TitusRuntime titusRuntime) {
this.configuration = configuration;
this.dslContext = dslContext;
this.relocationResultStore = (JooqTaskRelocationResultStore) relocationResultStore;
this.titusRuntime = titusRuntime;
this.allRowsGauge = titusRuntime.getRegistry().gauge(METRIC_ROOT + "allRows", "table", "relocation_status");
this.expiredRowsGauge = titusRuntime.getRegistry().gauge(METRIC_ROOT + "expiredRows", "table", "relocation_status");
this.gcCounter = titusRuntime.getRegistry().counter(METRIC_ROOT + "removedCount", "table", "relocation_status");
}
@Override
public void activate() {
this.scheduleRef = titusRuntime.getLocalScheduler().schedule(ScheduleDescriptor.newBuilder()
.withName(JooqTaskRelocationGC.class.getSimpleName())
.withDescription("Titus relocation data GC")
.withInitialDelay(Duration.ZERO)
.withInterval(configuration.getGcInterval())
.withTimeout(Duration.ofMinutes(30))
.build(),
context -> doGC(),
true
);
}
@PreDestroy
public void shutdown() {
Evaluators.acceptNotNull(scheduleRef, ScheduleReference::cancel);
}
private void doGC() {
Stopwatch stopwatch = Stopwatch.createStarted();
logger.info("Starting new relocation status table GC cycle...");
removeExpiredData(titusRuntime.getClock().wallTime() - configuration.getRetentionTime().toMillis());
logger.info("The relocation status table GC cycle finished: elapsedMs={}", stopwatch.elapsed(TimeUnit.MILLISECONDS));
}
/**
* Removes all entries older than the given time threshold.
*/
@VisibleForTesting
int removeExpiredData(long timeThreshold) {
// Count all items
int allCount = dslContext.fetchCount(Relocation.RELOCATION.RELOCATION_STATUS);
logger.info("All rows in 'relocation_status' table: {}", allCount);
allRowsGauge.set(allCount);
int expiredCount = dslContext.fetchCount(
Relocation.RELOCATION.RELOCATION_STATUS,
Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_EXECUTION_TIME.lt(Timestamp.from(Instant.ofEpochMilli(timeThreshold)))
);
logger.info("Expired rows in 'relocation_status' table: {}", expiredCount);
expiredRowsGauge.set(expiredCount);
if (expiredCount <= 0) {
return 0;
}
// Locate timestamp from which to remove.
Result<Record2<String, Timestamp>> timestampRow = dslContext.select(Relocation.RELOCATION.RELOCATION_STATUS.TASK_ID, Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_EXECUTION_TIME)
.from(Relocation.RELOCATION.RELOCATION_STATUS)
.where(Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_EXECUTION_TIME.lt(Timestamp.from(Instant.ofEpochMilli(timeThreshold))))
.orderBy(Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_EXECUTION_TIME.asc())
.limit(configuration.getGcRowLimit())
.fetch();
if (timestampRow.isEmpty()) {
logger.info("No expired data found");
return 0;
}
Timestamp lastToRemove = timestampRow.get(timestampRow.size() - 1).getValue(Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_EXECUTION_TIME);
// Load all data up to the given timestamp. This could be more data than above (and more than the GC limit when
// there are records with the lastToRemove timestamp, which were not returned due to the limit constraint.
// This is fine, as we do not expect that there are too many like this.
Result<Record2<String, Timestamp>> toRemoveRows = dslContext.select(Relocation.RELOCATION.RELOCATION_STATUS.TASK_ID, Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_EXECUTION_TIME)
.from(Relocation.RELOCATION.RELOCATION_STATUS)
.where(Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_EXECUTION_TIME.le(lastToRemove))
.orderBy(Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_EXECUTION_TIME.asc())
.fetch();
List<Pair<String, Long>> toRemoveSet = toRemoveRows.stream()
.map(r -> Pair.of(r.get(Relocation.RELOCATION.RELOCATION_STATUS.TASK_ID), r.get(Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_EXECUTION_TIME).getTime()))
.collect(Collectors.toList());
logger.info("Records to remove: {}", toRemoveSet);
int removedFromDb = dslContext.delete(Relocation.RELOCATION.RELOCATION_STATUS).where(Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_EXECUTION_TIME.le(lastToRemove)).execute();
logger.info("Removed expired rows from 'relocation_status' table: {}", removedFromDb);
gcCounter.increment(removedFromDb);
relocationResultStore.removeFromCache(toRemoveSet);
return removedFromDb;
}
}
| 1,348 |
0 | Create_ds/titus-control-plane/titus-ext/jooq-relocation/src/main/java/com/netflix/titus/ext/jooq | Create_ds/titus-control-plane/titus-ext/jooq-relocation/src/main/java/com/netflix/titus/ext/jooq/relocation/JooqRelocationContextComponent.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.jooq.relocation;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.archaius2.Archaius2Ext;
import com.netflix.titus.ext.jooq.JooqConfiguration;
import com.netflix.titus.ext.jooq.JooqContext;
import com.netflix.titus.ext.jooq.ProductionJooqContext;
import com.netflix.titus.ext.jooq.embedded.EmbeddedJooqContext;
import org.jooq.DSLContext;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.ConfigurableApplicationContext;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
@Configuration
@ConditionalOnProperty(name = "titus.ext.jooq.relocation.enabled", havingValue = "true", matchIfMissing = true)
public class JooqRelocationContextComponent {
@Bean
public JooqConfiguration getJooqPropertyConfiguration(TitusRuntime titusRuntime) {
return Archaius2Ext.newConfiguration(JooqConfiguration.class, "titus.ext.jooq.relocation", titusRuntime.getMyEnvironment());
}
@Bean
public JooqRelocationConfiguration getJooqRelocationConfiguration(TitusRuntime titusRuntime) {
return Archaius2Ext.newConfiguration(JooqRelocationConfiguration.class, titusRuntime.getMyEnvironment());
}
@Bean
public JooqContext getJooqContext(JooqConfiguration jooqConfiguration,
ConfigurableApplicationContext applicationContext,
TitusRuntime titusRuntime) {
if (jooqConfiguration.isInMemoryDb()) {
return new EmbeddedJooqContext(applicationContext, "relocation", titusRuntime);
}
return new ProductionJooqContext(jooqConfiguration, titusRuntime);
}
@Bean
public DSLContext getDSLContext(JooqContext relocationContext) {
return relocationContext.getDslContext();
}
@Bean(name = "relocationSchemaManager")
public RelocationSchemaManager getRelocationSchemaManager(JooqConfiguration jooqConfiguration, JooqContext relocationContext) {
return new RelocationSchemaManager(jooqConfiguration, relocationContext);
}
}
| 1,349 |
0 | Create_ds/titus-control-plane/titus-ext/zookeeper/src/test/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/zookeeper/src/test/java/com/netflix/titus/ext/zookeeper/CuratorServiceResource.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.zookeeper;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.ExceptionExt;
import com.netflix.titus.ext.zookeeper.connector.CuratorService;
import com.netflix.titus.ext.zookeeper.connector.CuratorServiceImpl;
import com.netflix.titus.ext.zookeeper.connector.CuratorUtils;
import com.netflix.titus.ext.zookeeper.connector.DefaultZookeeperClusterResolver;
import com.netflix.titus.ext.zookeeper.connector.ZookeeperClusterResolver;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.test.TestingServer;
import org.junit.rules.ExternalResource;
import org.mockito.Mockito;
public class CuratorServiceResource extends ExternalResource {
private final TitusRuntime titusRuntime;
private ZookeeperPaths zkPaths;
private CuratorServiceImpl curatorService;
private TestingServer zkServer;
public CuratorServiceResource(TitusRuntime titusRuntime) {
this.titusRuntime = titusRuntime;
}
@Override
protected void before() throws Throwable {
zkServer = new TestingServer(true);
ZookeeperConfiguration zookeeperConfiguration = ZookeeperTestUtils.withEmbeddedZookeeper(Mockito.mock(ZookeeperConfiguration.class), zkServer.getConnectString());
zkPaths = new ZookeeperPaths(zookeeperConfiguration);
ZookeeperClusterResolver clusterResolver = new DefaultZookeeperClusterResolver(zookeeperConfiguration);
curatorService = new CuratorServiceImpl(zookeeperConfiguration, clusterResolver, titusRuntime.getRegistry());
curatorService.start();
}
@Override
protected void after() {
curatorService.shutdown();
ExceptionExt.silent(() -> zkServer.close());
}
public void createAllPaths() {
CuratorFramework curator = curatorService.getCurator();
CuratorUtils.createPathIfNotExist(curator, zkPaths.getLeaderElectionPath(), true);
CuratorUtils.createPathIfNotExist(curator, zkPaths.getLeaderAnnouncementPath(), true);
}
public CuratorService getCuratorService() {
return curatorService;
}
public ZookeeperPaths getZkPaths() {
return zkPaths;
}
}
| 1,350 |
0 | Create_ds/titus-control-plane/titus-ext/zookeeper/src/test/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/zookeeper/src/test/java/com/netflix/titus/ext/zookeeper/ZookeeperTestUtils.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.zookeeper;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Collections;
import com.netflix.titus.api.supervisor.model.MasterInstance;
import com.netflix.titus.api.supervisor.model.MasterState;
import com.netflix.titus.api.supervisor.model.MasterStatus;
import com.netflix.titus.api.supervisor.service.MasterDescription;
import static org.mockito.Mockito.when;
public class ZookeeperTestUtils {
public static ZookeeperConfiguration withEmbeddedZookeeper(ZookeeperConfiguration configurationMock, String zkConnectStr) {
when(configurationMock.getZkConnectionTimeoutMs()).thenReturn(1000);
when(configurationMock.getZkConnectionRetrySleepMs()).thenReturn(100);
when(configurationMock.getZkConnectionMaxRetries()).thenReturn(3);
when(configurationMock.getZkConnectionString()).thenReturn(zkConnectStr);
return configurationMock;
}
public static MasterDescription newMasterDescription(int apiPort) {
return new MasterDescription(
getHost(),
getHostIP(),
apiPort,
"http://myFakeStatusURI",
System.currentTimeMillis()
);
}
public static MasterInstance newMasterInstance(String instanceId, MasterState initialState) {
return MasterInstance.newBuilder()
.withInstanceId(instanceId)
.withInstanceGroupId(instanceId + "Group")
.withIpAddress("1.2.3.4")
.withStatus(MasterStatus.newBuilder()
.withState(initialState)
.withMessage("Initial MasterInstance version")
.build()
)
.withStatusHistory(Collections.emptyList())
.build();
}
private static String getHost() {
try {
return InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException e) {
throw new RuntimeException("Failed to get the host information: " + e.getMessage(), e);
}
}
private static String getHostIP() {
try {
return InetAddress.getLocalHost().getHostAddress();
} catch (UnknownHostException e) {
throw new RuntimeException("Failed to get the host information: " + e.getMessage(), e);
}
}
}
| 1,351 |
0 | Create_ds/titus-control-plane/titus-ext/zookeeper/src/test/java/com/netflix/titus/ext/zookeeper | Create_ds/titus-control-plane/titus-ext/zookeeper/src/test/java/com/netflix/titus/ext/zookeeper/supervisor/ZookeeperLeaderElectorTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.zookeeper.supervisor;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.ext.zookeeper.CuratorServiceResource;
import com.netflix.titus.ext.zookeeper.ZookeeperConfiguration;
import com.netflix.titus.ext.zookeeper.ZookeeperPaths;
import com.netflix.titus.ext.zookeeper.connector.CuratorServiceImpl;
import com.netflix.titus.ext.zookeeper.connector.CuratorUtils;
import com.netflix.titus.ext.zookeeper.connector.DefaultZookeeperClusterResolver;
import com.netflix.titus.ext.zookeeper.connector.ZookeeperClusterResolver;
import com.netflix.titus.api.supervisor.service.LeaderActivator;
import com.netflix.titus.api.supervisor.service.MasterDescription;
import com.netflix.titus.testkit.junit.category.IntegrationNotParallelizableTest;
import com.netflix.titus.testkit.junit.resource.CloseableExternalResource;
import org.apache.curator.CuratorConnectionLossException;
import org.apache.curator.framework.CuratorFramework;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@Category(IntegrationNotParallelizableTest.class)
public class ZookeeperLeaderElectorTest {
private static final TitusRuntime titusRuntime = TitusRuntimes.internal();
@ClassRule
public static final CuratorServiceResource curatorServiceResource = new CuratorServiceResource(titusRuntime);
@Rule
public final CloseableExternalResource closeable = new CloseableExternalResource();
private final LeaderActivator leaderActivator = mock(LeaderActivator.class);
private final MasterDescription masterDescription = new MasterDescription(
"myHost", "1.1.1.1", 8080, "/api/status/uri", System.currentTimeMillis()
);
private final CuratorFramework curator = curatorServiceResource.getCuratorService().getCurator();
private final ZookeeperPaths zkPaths = curatorServiceResource.getZkPaths();
@Test
public void testConnectionLossWillLeadToStartupFailure() {
ZookeeperConfiguration config = mock(ZookeeperConfiguration.class);
when(config.getZkConnectionString()).thenReturn("127.0.0.1:44444");
ZookeeperClusterResolver clusterResolver = new DefaultZookeeperClusterResolver(config);
try {
CuratorServiceImpl cs = closeable.autoCloseable(new CuratorServiceImpl(config, clusterResolver, new DefaultRegistry()), CuratorServiceImpl::shutdown);
cs.start();
ZookeeperLeaderElector elector = new ZookeeperLeaderElector(leaderActivator, cs, zkPaths, masterDescription, titusRuntime);
elector.join();
fail("The elector should fail fast");
} catch (IllegalStateException e) {
assertEquals("The cause should be from ZK connection failure", CuratorConnectionLossException.class, e.getCause().getClass());
assertTrue("The error message is unexpected: " + e.getMessage(), e.getCause().getMessage().contains("ConnectionLoss"));
}
}
@Test
public void testLeaderCanHandleExistingPath() throws Exception {
CuratorUtils.createPathIfNotExist(curator, zkPaths.getLeaderElectionPath(), true);
CuratorUtils.createPathIfNotExist(curator, zkPaths.getLeaderAnnouncementPath(), true);
ZookeeperLeaderElector elector = newZookeeperLeaderElector();
assertThat(elector.join()).isTrue();
awaitLeaderActivation();
}
@Test
public void testLeaveIfNotLeader() throws Exception {
ZookeeperLeaderElector firstElector = newZookeeperLeaderElector();
ZookeeperLeaderElector secondElector = newZookeeperLeaderElector();
// Make the first elector a leader.
assertThat(firstElector.join()).isTrue();
awaitLeaderActivation();
assertThat(firstElector.leaveIfNotLeader()).isFalse();
// Check that second elector can join and leave the leader election process.
assertThat(secondElector.join()).isTrue();
assertThat(secondElector.leaveIfNotLeader()).isTrue();
}
private ZookeeperLeaderElector newZookeeperLeaderElector() {
return closeable.autoCloseable(
new ZookeeperLeaderElector(leaderActivator, curatorServiceResource.getCuratorService(), zkPaths, masterDescription, titusRuntime),
ZookeeperLeaderElector::shutdown
);
}
private void awaitLeaderActivation() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
doAnswer(invocation -> {
latch.countDown();
return null;
}).when(leaderActivator).becomeLeader();
latch.await(5, TimeUnit.SECONDS);
verify(leaderActivator, times(1)).becomeLeader();
}
}
| 1,352 |
0 | Create_ds/titus-control-plane/titus-ext/zookeeper/src/test/java/com/netflix/titus/ext/zookeeper | Create_ds/titus-control-plane/titus-ext/zookeeper/src/test/java/com/netflix/titus/ext/zookeeper/supervisor/ZookeeperMasterMonitorTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.zookeeper.supervisor;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import com.netflix.titus.api.json.ObjectMappers;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.ext.zookeeper.CuratorServiceResource;
import com.netflix.titus.ext.zookeeper.ZookeeperPaths;
import com.netflix.titus.ext.zookeeper.ZookeeperTestUtils;
import com.netflix.titus.ext.zookeeper.connector.CuratorUtils;
import com.netflix.titus.master.supervisor.endpoint.grpc.SupervisorGrpcModelConverters;
import com.netflix.titus.api.supervisor.model.MasterInstance;
import com.netflix.titus.api.supervisor.model.MasterInstanceFunctions;
import com.netflix.titus.api.supervisor.model.MasterState;
import com.netflix.titus.api.supervisor.model.MasterStatus;
import com.netflix.titus.api.supervisor.service.MasterDescription;
import com.netflix.titus.testkit.junit.category.IntegrationNotParallelizableTest;
import com.netflix.titus.testkit.rx.ExtTestSubscriber;
import org.apache.curator.framework.CuratorFramework;
import org.junit.After;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static com.netflix.titus.ext.zookeeper.ZookeeperTestUtils.newMasterDescription;
import static com.netflix.titus.master.supervisor.endpoint.grpc.SupervisorGrpcModelConverters.toCoreMasterInstance;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.fail;
@Category(IntegrationNotParallelizableTest.class)
public class ZookeeperMasterMonitorTest {
private static final TitusRuntime titusRuntime = TitusRuntimes.internal();
private static final MasterInstance DEFAULT_MASTER_INSTANCE = toCoreMasterInstance(com.netflix.titus.grpc.protogen.MasterInstance.getDefaultInstance());
@ClassRule
public static CuratorServiceResource curatorServiceResource = new CuratorServiceResource(titusRuntime);
private static CuratorFramework curator;
private static ZookeeperPaths zkPaths;
private ZookeeperMasterMonitor masterMonitor;
@BeforeClass
public static void setUpClass() {
curatorServiceResource.createAllPaths();
curator = curatorServiceResource.getCuratorService().getCurator();
zkPaths = curatorServiceResource.getZkPaths();
}
@Before
public void setUp() {
masterMonitor = new ZookeeperMasterMonitor(zkPaths, curatorServiceResource.getCuratorService(), titusRuntime);
masterMonitor.start();
}
@After
public void tearDown() {
masterMonitor.shutdown();
}
@Test(timeout = 30_000)
public void testMonitorWorksForMultipleLeaderUpdates() throws Exception {
// Note we intentionally didn't set the initial value of master description because we'd like to make sure
// that the monitor will work property even if it fails occasionally (in this case, it will fail to deserialize
// the master description in the very beginning
ExtTestSubscriber<MasterDescription> leaderSubscriber = new ExtTestSubscriber<>();
masterMonitor.getLeaderObservable().filter(Objects::nonNull).subscribe(leaderSubscriber);
for (int i = 0; i < 5; i++) {
curator.setData()
.forPath(zkPaths.getLeaderAnnouncementPath(), ObjectMappers.defaultMapper().writeValueAsBytes(newMasterDescription(i)));
// Try a few times, as we can get update for the same entity more than once.
for (int j = 0; j < 3; j++) {
MasterDescription newLeader = leaderSubscriber.takeNext(5, TimeUnit.SECONDS);
if (newLeader != null && newLeader.getApiPort() == i) {
return;
}
}
fail("Did not received TitusMaster update for iteration " + i);
}
}
@Test(timeout = 30_000)
public void testLocalMasterInstanceUpdates() throws Exception {
// Should get dummy version first.
assertThat(masterMonitor.getCurrentMasterInstance()).isEqualTo(ZookeeperMasterMonitor.UNKNOWN_MASTER_INSTANCE);
ExtTestSubscriber<List<MasterInstance>> mastersSubscriber = new ExtTestSubscriber<>();
masterMonitor.observeMasters().subscribe(mastersSubscriber);
assertThat(mastersSubscriber.takeNext()).isEmpty();
// Update information about itself
MasterInstance initial = ZookeeperTestUtils.newMasterInstance("selfId", MasterState.Inactive);
assertThat(masterMonitor.updateOwnMasterInstance(initial).get()).isNull();
expectMasters(mastersSubscriber, initial);
// Change state
MasterInstance updated = MasterInstanceFunctions.moveTo(initial, MasterStatus.newBuilder()
.withState(MasterState.NonLeader)
.withMessage("testing")
.build()
);
assertThat(masterMonitor.updateOwnMasterInstance(updated).get()).isNull();
expectMasters(mastersSubscriber, updated);
// Now add second master
MasterInstance second = ZookeeperTestUtils.newMasterInstance("secondId", MasterState.Inactive);
addMasterInstanceToZookeeper(second);
expectMasters(mastersSubscriber, updated, second);
// And remove it
removeMasterInstanceFromZookeeper(second.getInstanceId());
expectMasters(mastersSubscriber, updated);
}
/**
* Zookeeper sends multiple events for the same master update, so we have to be able to filter this out.
*/
private void expectMasters(ExtTestSubscriber<List<MasterInstance>> mastersSubscriber, MasterInstance... masters) throws Exception {
long deadline = System.currentTimeMillis() + 5_000;
List<MasterInstance> last = null;
while (true) {
long delayMs = deadline - System.currentTimeMillis();
if (delayMs <= 0) {
fail("Did not received expected update. Last observed update=" + last);
}
List<MasterInstance> next = mastersSubscriber.takeNext(delayMs, TimeUnit.MILLISECONDS);
assertThat(next).isNotNull();
// Due to race condition we may get here default protobuf MasterInstance value (empty ZK node), followed
// by the initial value set after the ZK path is created.
long emptyNodeCount = next.stream().filter(m -> m.equals(DEFAULT_MASTER_INSTANCE)).count();
if (emptyNodeCount == 0) {
assertThat(next).hasSize(masters.length).contains(masters);
return;
}
}
}
private String buildInstancePath(String instanceId) {
return zkPaths.getAllMastersPath() + "/" + instanceId;
}
private void addMasterInstanceToZookeeper(MasterInstance instance) {
String path = buildInstancePath(instance.getInstanceId());
CuratorUtils.createPathIfNotExist(curator, path, false);
byte[] bytes = SupervisorGrpcModelConverters.toGrpcMasterInstance(instance).toByteArray();
assertThat(CuratorUtils.setData(curator, path, bytes).get()).isNull();
}
private void removeMasterInstanceFromZookeeper(String instanceId) throws Exception {
curator.delete().inBackground((client, event) -> {
System.out.printf("Received Curator notification after the instance node %s was removed: %s", instanceId, event);
}).forPath(buildInstancePath(instanceId));
}
} | 1,353 |
0 | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper/ZookeeperConstants.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.zookeeper;
public final class ZookeeperConstants {
public static final String METRICS_ROOT = "titusMaster.ext.zookeeper.";
}
| 1,354 |
0 | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper/ZookeeperConfiguration.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.zookeeper;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
@Configuration(prefix = "titusMaster.ext.zookeeper")
public interface ZookeeperConfiguration {
@DefaultValue("localhost:2181")
String getZkConnectionString();
@DefaultValue("10000")
int getZkConnectionTimeoutMs();
@DefaultValue("500")
int getZkConnectionRetrySleepMs();
@DefaultValue("5")
int getZkConnectionMaxRetries();
@DefaultValue("/titus")
String getZkRoot();
}
| 1,355 |
0 | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper/ZookeeperPaths.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.zookeeper;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.curator.utils.ZKPaths;
/**
* This class provides information about all Zookeeper paths used in Titus.
*/
@Singleton
public class ZookeeperPaths {
public static final String ALL_MASTERS = "/allMasters";
public static final String LEADER_ANNOUNCEMENT_NODE = "/leader";
public static final String LEADER_ELECTION_NODE = "/hosts";
private final String allMastersPath;
private final String leaderElectionPath;
private final String leaderAnnouncementPath;
@Inject
public ZookeeperPaths(ZookeeperConfiguration configuration) {
String titusMasterPath = ZKPaths.makePath(configuration.getZkRoot(), "master");
this.allMastersPath = ZKPaths.makePath(titusMasterPath, ALL_MASTERS);
this.leaderElectionPath = ZKPaths.makePath(titusMasterPath, LEADER_ELECTION_NODE);
this.leaderAnnouncementPath = ZKPaths.makePath(titusMasterPath, LEADER_ANNOUNCEMENT_NODE);
}
public String getAllMastersPath() {
return allMastersPath;
}
public String getLeaderElectionPath() {
return leaderElectionPath;
}
public String getLeaderAnnouncementPath() {
return leaderAnnouncementPath;
}
@Override
public String toString() {
return "ZookeeperPaths{" +
"allMastersPath='" + allMastersPath + '\'' +
", leaderElectionPath='" + leaderElectionPath + '\'' +
", leaderAnnouncementPath='" + leaderAnnouncementPath + '\'' +
'}';
}
}
| 1,356 |
0 | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper/connector/CuratorUtils.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.zookeeper.connector;
import java.io.IOException;
import org.apache.curator.framework.CuratorFramework;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.data.Stat;
import rx.Completable;
import static org.apache.zookeeper.KeeperException.Code.OK;
public class CuratorUtils {
public static boolean createPathIfNotExist(CuratorFramework curator, String fullPath, boolean persisted) {
try {
Stat pathStat = curator.checkExists().forPath(fullPath);
// Create the path only if the path does not exist
if (pathStat != null) {
return false;
}
curator.create()
.creatingParentsIfNeeded()
.withMode(persisted ? CreateMode.PERSISTENT : CreateMode.EPHEMERAL)
.forPath(fullPath);
return true;
} catch (Exception e) {
throw new IllegalStateException("Cannot create Zookeeper path: " + fullPath, e);
}
}
public static Completable setData(CuratorFramework curator, String path, byte[] data) {
return Completable.fromEmitter(emitter -> {
try {
curator
.setData()
.inBackground((client, event) -> {
if (event.getResultCode() == OK.intValue()) {
emitter.onCompleted();
} else {
emitter.onError(new IOException(String.format("Failed to store data in zookeeper node: path=%s, event=%s", path, event)));
}
}).forPath(path, data);
} catch (Exception e) {
emitter.onError(new IOException(String.format("Unexpected error when storing data in zookeeper node: path=%s, error=%s", path, e.getMessage()), e));
}
});
}
}
| 1,357 |
0 | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper/connector/ZookeeperClusterResolver.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.zookeeper.connector;
import java.util.Optional;
/**
* Resolve Zookeeper connection string.
*/
public interface ZookeeperClusterResolver {
/**
* @return zookeeper connection string if available, or {Code Optional.empty()} otherwise.
*/
Optional<String> resolve();
}
| 1,358 |
0 | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper/connector/DefaultZookeeperClusterResolver.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.zookeeper.connector;
import java.util.Optional;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.netflix.titus.ext.zookeeper.ZookeeperConfiguration;
/**
* Default implementation reads Zookeeper connection string from configuration.
* More sophisticated implementations might resolve this information from DNS or other name service (Eureka).
*/
@Singleton
public class DefaultZookeeperClusterResolver implements ZookeeperClusterResolver {
private final String zkConnectionString;
@Inject
public DefaultZookeeperClusterResolver(ZookeeperConfiguration config) {
zkConnectionString = config.getZkConnectionString();
}
@Override
public Optional<String> resolve() {
return Optional.of(zkConnectionString);
}
}
| 1,359 |
0 | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper/connector/ZookeeperHealthIndicator.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.zookeeper.connector;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.netflix.runtime.health.api.Health;
import com.netflix.runtime.health.api.HealthIndicator;
import com.netflix.runtime.health.api.HealthIndicatorCallback;
import org.apache.curator.framework.imps.CuratorFrameworkState;
@Singleton
public class ZookeeperHealthIndicator implements HealthIndicator {
private final CuratorService curatorService;
@Inject
public ZookeeperHealthIndicator(CuratorService curatorService) {
this.curatorService = curatorService;
}
@Override
public void check(HealthIndicatorCallback healthCallback) {
CuratorFrameworkState state = curatorService.getCurator().getState();
if (state == CuratorFrameworkState.STARTED) {
healthCallback.inform(Health.healthy().build());
} else {
healthCallback.inform(Health.unhealthy().withDetail("state", state).build());
}
}
}
| 1,360 |
0 | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper/connector/CuratorService.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.zookeeper.connector;
import org.apache.curator.framework.CuratorFramework;
/**
* Creates and manages lifecycle of {@link CuratorFramework}.
*/
public interface CuratorService {
CuratorFramework getCurator();
}
| 1,361 |
0 | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper/connector/CuratorServiceImpl.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.zookeeper.connector;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicInteger;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.patterns.PolledMeter;
import com.netflix.titus.ext.zookeeper.ZookeeperConfiguration;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.imps.GzipCompressionProvider;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This service implementation is responsible for managing the lifecycle of a {@link org.apache.curator.framework.CuratorFramework}
* instance.
*/
@Singleton
public class CuratorServiceImpl implements CuratorService {
private static final Logger LOG = LoggerFactory.getLogger(CuratorServiceImpl.class);
private final CuratorFramework curator;
private final AtomicInteger isConnectedGauge;
@Inject
public CuratorServiceImpl(ZookeeperConfiguration configs, ZookeeperClusterResolver clusterResolver, Registry registry) {
isConnectedGauge = PolledMeter.using(registry).withName("titusMaster.curator.isConnected").monitorValue(new AtomicInteger());
Optional<String> connectString = clusterResolver.resolve();
if (!connectString.isPresent()) {
// Fail early if connection to zookeeper not defined
LOG.error("Zookeeper connectivity details not found");
throw new IllegalStateException("Zookeeper connectivity details not found");
}
curator = CuratorFrameworkFactory.builder()
.compressionProvider(new GzipCompressionProvider())
.connectionTimeoutMs(configs.getZkConnectionTimeoutMs())
.retryPolicy(new ExponentialBackoffRetry(configs.getZkConnectionRetrySleepMs(), configs.getZkConnectionMaxRetries()))
.connectString(connectString.get())
.build();
}
private void setupCuratorListener() {
LOG.info("Setting up curator state change listener");
curator.getConnectionStateListenable().addListener((client, newState) -> {
if (newState.isConnected()) {
LOG.info("Curator connected");
isConnectedGauge.set(1);
} else {
// ToDo: determine if it is safe to restart our service instead of committing suicide
LOG.error("Curator connection lost");
isConnectedGauge.set(0);
}
});
}
@PostConstruct
public void start() {
isConnectedGauge.set(0);
setupCuratorListener();
curator.start();
}
@PreDestroy
public void shutdown() {
try {
curator.close();
} catch (Exception e) {
// A shutdown failure should not affect the subsequent shutdowns, so we just warn here
LOG.warn("Failed to shut down the curator service: {}", e.getMessage(), e);
} finally {
isConnectedGauge.set(0);
}
}
public CuratorFramework getCurator() {
return curator;
}
}
| 1,362 |
0 | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper/connector/ZookeeperConnectorModule.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.zookeeper.connector;
import javax.inject.Singleton;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
import com.netflix.archaius.ConfigProxyFactory;
import com.netflix.runtime.health.guice.HealthModule;
import com.netflix.titus.ext.zookeeper.ZookeeperConfiguration;
public class ZookeeperConnectorModule extends AbstractModule {
@Override
protected void configure() {
bind(CuratorService.class).to(CuratorServiceImpl.class);
bind(ZookeeperClusterResolver.class).to(DefaultZookeeperClusterResolver.class);
install(new HealthModule() {
@Override
protected void configureHealth() {
bindAdditionalHealthIndicator().to(ZookeeperHealthIndicator.class);
}
});
}
@Provides
@Singleton
public ZookeeperConfiguration getZookeeperConfiguration(ConfigProxyFactory factory) {
return factory.newProxy(ZookeeperConfiguration.class);
}
}
| 1,363 |
0 | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper/supervisor/ZkLeaderVerificator.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.zookeeper.supervisor;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import javax.annotation.PostConstruct;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.netflix.titus.api.supervisor.service.LeaderActivator;
import com.netflix.titus.api.supervisor.service.MasterDescription;
import com.netflix.titus.api.supervisor.service.MasterMonitor;
import com.netflix.titus.common.util.SystemExt;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* 12/09/2014
* This is being done as a 'big hammer' kind of workaround for a ZooKeeper error we encountered
* for leader election. We have two callback routes from ZooKeeper: LeaderLatch and NodeCache.
* Master uses LeaderLatch to be notified of when it needs to become leader and when to stop being one.
* The node cache is used by clients to be notified of Leader changes. We make sure that the two match.
* That is, we get node cache notifications and make sure that its view of whether or not we are the
* leader matches LeaderLatch's view. If not, we panic and exit. This was observed to be inconsistent in
* prod at least once.
* We publish public hostname in master description, that's how we will know if ZK thinks we are the leader.
*/
@Singleton
public class ZkLeaderVerificator {
private static final Logger logger = LoggerFactory.getLogger(ZkLeaderVerificator.class);
private final MasterMonitor masterMonitor;
private final LeaderActivator leaderActivator;
@Inject
public ZkLeaderVerificator(MasterMonitor masterMonitor, LeaderActivator leaderActivator) {
this.masterMonitor = masterMonitor;
this.leaderActivator = leaderActivator;
}
@PostConstruct
void setupZKLeaderVerification() {
final String myHostname = System.getenv("EC2_PUBLIC_HOSTNAME");
final String myLocalIP = System.getenv("EC2_LOCAL_IPV4");
if (myHostname == null || myHostname.isEmpty()) {
logger.warn("Did not find public hostname variable, OK if not running cloud");
return;
}
if (myLocalIP == null || myLocalIP.isEmpty()) {
logger.warn("Did not find local IP variable, OK if not running cloud");
return;
}
logger.info("Setting up ZK leader verification with myHostname=" + myHostname + ", localIP=" + myLocalIP);
long delay = 20;
final AtomicReference<MasterDescription> ref = new AtomicReference<>();
masterMonitor.getLeaderObservable()
.doOnNext(ref::set)
.subscribe();
final AtomicInteger falseCount = new AtomicInteger(0);
final int MAX_FALSE_COUNTS = 10;
new ScheduledThreadPoolExecutor(1).scheduleWithFixedDelay(
new Runnable() {
@Override
public void run() {
boolean foundFault = false;
try {
if (leaderActivator.isLeader()) {
logger.info("I'm leader, masterDescription=" + ref.get());
if (ref.get() != null && !myHostname.equals(ref.get().getHostname()) && !myLocalIP.equals(ref.get().getHostname())) {
foundFault = true;
logger.warn("ZK says leader is " + ref.get().getHostname() + ", not us (" + myHostname + ")");
if (falseCount.incrementAndGet() > MAX_FALSE_COUNTS) {
logger.error("Too many attempts failed to verify ZK leader status, exiting!");
SystemExt.forcedProcessExit(5);
}
}
}
} catch (Exception e) {
logger.warn("Error verifying leader status: " + e.getMessage(), e);
}
if (!foundFault) {
falseCount.set(0);
}
}
},
delay, delay, TimeUnit.SECONDS);
}
}
| 1,364 |
0 | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper/supervisor/ZookeeperMasterMonitor.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.zookeeper.supervisor;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Supplier;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.titus.api.json.ObjectMappers;
import com.netflix.titus.common.runtime.SystemLogEvent;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.Evaluators;
import com.netflix.titus.common.util.rx.ObservableExt;
import com.netflix.titus.ext.zookeeper.ZookeeperConstants;
import com.netflix.titus.ext.zookeeper.ZookeeperPaths;
import com.netflix.titus.ext.zookeeper.connector.CuratorService;
import com.netflix.titus.ext.zookeeper.connector.CuratorUtils;
import com.netflix.titus.api.supervisor.model.MasterInstance;
import com.netflix.titus.api.supervisor.model.MasterState;
import com.netflix.titus.api.supervisor.model.MasterStatus;
import com.netflix.titus.api.supervisor.service.LeaderActivator;
import com.netflix.titus.api.supervisor.service.MasterDescription;
import com.netflix.titus.api.supervisor.service.MasterMonitor;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.recipes.cache.ChildData;
import org.apache.curator.framework.recipes.cache.NodeCache;
import org.apache.curator.framework.recipes.cache.TreeCache;
import org.apache.curator.framework.recipes.cache.TreeCacheEvent;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Completable;
import rx.Observable;
import rx.Scheduler;
import rx.Subscription;
import rx.schedulers.Schedulers;
import rx.subjects.BehaviorSubject;
import rx.subjects.SerializedSubject;
import rx.subjects.Subject;
import static com.netflix.titus.master.supervisor.endpoint.grpc.SupervisorGrpcModelConverters.toCoreMasterInstance;
import static com.netflix.titus.master.supervisor.endpoint.grpc.SupervisorGrpcModelConverters.toGrpcMasterInstance;
/**
* A monitor that monitors the status of Titus masters.
*/
@Singleton
public class ZookeeperMasterMonitor implements MasterMonitor {
private static final Logger logger = LoggerFactory.getLogger(ZookeeperMasterMonitor.class);
/**
* During the system start time the full information about the local instance state is not known, so
* instead we assign a default value which indicates this fact. Before the bootstrap completes this default
* should be replaced with a correct value, or the JVM process should terminate.
*/
@VisibleForTesting
static final MasterInstance UNKNOWN_MASTER_INSTANCE = MasterInstance.newBuilder()
.withInstanceId("unknownId")
.withInstanceGroupId("unknownId")
.withIpAddress("0.0.0.0")
.withStatus(MasterStatus.newBuilder()
.withState(MasterState.Starting)
.withMessage("TitusMaster instance state not known yet")
.build()
)
.withServerPorts(Collections.emptyList())
.withLabels(Collections.emptyMap())
.build();
private static final long OWN_MASTER_REFRESH_INTERVAL_MS = 30_000;
private final CuratorFramework curator;
private final TitusRuntime titusRuntime;
private final String leaderPath;
private final SerializedSubject<MasterDescription, MasterDescription> leaderSubject;
private final AtomicReference<MasterDescription> latestLeader = new AtomicReference<>();
private final NodeCache leaderMonitor;
private final String allMastersPath;
private final TreeCache masterMonitor;
private volatile List<MasterInstance> knownMasterInstances = Collections.emptyList();
private final Subject<List<MasterInstance>, List<MasterInstance>> masterUpdates = new SerializedSubject<>(
BehaviorSubject.create(Collections.emptyList())
);
private final Observable<List<MasterInstance>> masterUpdatesObserver = ObservableExt.protectFromMissingExceptionHandlers(
masterUpdates.asObservable(),
logger
);
private volatile MasterInstance ownMasterInstance = UNKNOWN_MASTER_INSTANCE;
private final Subscription refreshOwnMasterInstanceSubscriber;
@Inject
public ZookeeperMasterMonitor(ZookeeperPaths zkPaths, CuratorService curatorService, TitusRuntime titusRuntime) {
this(zkPaths, curatorService.getCurator(), null, titusRuntime, Schedulers.computation());
}
public ZookeeperMasterMonitor(ZookeeperPaths zkPaths,
CuratorFramework curator,
MasterDescription initValue,
TitusRuntime titusRuntime,
Scheduler scheduler) {
this.curator = curator;
this.titusRuntime = titusRuntime;
this.leaderPath = zkPaths.getLeaderAnnouncementPath();
this.leaderSubject = BehaviorSubject.create(initValue).toSerialized();
this.leaderMonitor = new NodeCache(curator, leaderPath);
this.latestLeader.set(initValue);
this.allMastersPath = zkPaths.getAllMastersPath();
this.masterMonitor = new TreeCache(curator, allMastersPath);
this.refreshOwnMasterInstanceSubscriber = ObservableExt.schedule(
ZookeeperConstants.METRICS_ROOT + "masterMonitor.ownInstanceRefreshScheduler",
titusRuntime.getRegistry(),
"reRegisterOwnMasterInstanceInZookeeper",
registerOwnMasterInstance(() -> ownMasterInstance),
OWN_MASTER_REFRESH_INTERVAL_MS, OWN_MASTER_REFRESH_INTERVAL_MS, TimeUnit.MILLISECONDS,
scheduler
).subscribe();
}
@PostConstruct
public void start() {
leaderMonitor.getListenable().addListener(this::retrieveLeader);
masterMonitor.getListenable().addListener(this::retrieveAllMasters);
try {
leaderMonitor.start();
masterMonitor.start();
} catch (Exception e) {
throw new IllegalStateException("Failed to start master node monitor: " + e.getMessage(), e);
}
logger.info("The ZK master monitor is started");
}
@PreDestroy
public void shutdown() {
ObservableExt.safeUnsubscribe(refreshOwnMasterInstanceSubscriber);
try {
leaderMonitor.close();
masterMonitor.close();
logger.info("ZK master monitor is shut down");
} catch (IOException e) {
throw new RuntimeException("Failed to close the ZK node monitor: " + e.getMessage(), e);
}
}
@Override
public Observable<MasterDescription> getLeaderObservable() {
return leaderSubject.asObservable();
}
@Override
public MasterDescription getLatestLeader() {
return latestLeader.get();
}
@Override
public MasterInstance getCurrentMasterInstance() {
return ownMasterInstance;
}
@Override
public Completable updateOwnMasterInstance(MasterInstance self) {
return registerOwnMasterInstance(() -> self)
.doOnSubscribe(s -> this.ownMasterInstance = self)
.doOnCompleted(() -> logger.info("Updated own MasterInstance state to: {}", self));
}
private Completable registerOwnMasterInstance(Supplier<MasterInstance> source) {
return Completable
.defer(() -> {
MasterInstance self = source.get();
String fullPath = allMastersPath + '/' + self.getInstanceId();
CuratorUtils.createPathIfNotExist(curator, fullPath, false);
return CuratorUtils.setData(curator, fullPath, toGrpcMasterInstance(self).toByteArray());
})
.doOnError(e -> logger.warn("Couldn't update own MasterInstance data in Zookeeper", e));
}
@Override
public Observable<List<MasterInstance>> observeMasters() {
return masterUpdatesObserver;
}
private void retrieveLeader() {
try {
curator
.sync() // sync with ZK before reading
.inBackground(
curator
.getData()
.inBackground((client, event) -> {
try {
MasterDescription description = ObjectMappers.defaultMapper().readValue(event.getData(), MasterDescription.class);
logger.info("New master retrieved: {}", description);
latestLeader.set(description);
leaderSubject.onNext(description);
} catch (Exception e) {
logger.error("Bad value in the leader path: {}", e.getMessage());
}
})
.forPath(leaderPath)
)
.forPath(leaderPath);
} catch (Exception e) {
String errorMessage = "Failed to retrieve updated master information: " + e.getMessage();
titusRuntime.getSystemLogService().submit(SystemLogEvent.newBuilder()
.withComponent(LeaderActivator.COMPONENT)
.withPriority(SystemLogEvent.Priority.Warn)
.withCategory(SystemLogEvent.Category.Transient)
.withMessage(errorMessage)
.build()
);
logger.error(errorMessage, e);
}
}
private void retrieveAllMasters(CuratorFramework curator, TreeCacheEvent cacheEvent) {
logger.debug("Received TreeCacheEvent: {}", cacheEvent);
Map<String, ChildData> currentChildren = Evaluators.getOrDefault(
masterMonitor.getCurrentChildren(allMastersPath), Collections.emptyMap()
);
List<MasterInstance> updatedMasterList = new ArrayList<>();
for (Map.Entry<String, ChildData> entry : currentChildren.entrySet()) {
parseMasterInstanceData(entry.getValue()).ifPresent(updatedMasterList::add);
}
if (!knownMasterInstances.equals(updatedMasterList)) {
logger.info("Detected change in TitusMaster state and/or topology: {}", updatedMasterList);
knownMasterInstances = updatedMasterList;
masterUpdates.onNext(Collections.unmodifiableList(updatedMasterList));
}
}
private Optional<MasterInstance> parseMasterInstanceData(ChildData childData) {
try {
com.netflix.titus.grpc.protogen.MasterInstance grpcMasterInstance = com.netflix.titus.grpc.protogen.MasterInstance.parseFrom(childData.getData());
return Optional.of(toCoreMasterInstance(grpcMasterInstance));
} catch (Exception e) {
titusRuntime.getCodeInvariants().unexpectedError("Found invalid MasterInstance protobuf error at: " + childData.getPath(), e);
return Optional.empty();
}
}
} | 1,365 |
0 | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper/supervisor/ZookeeperSupervisorModule.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.zookeeper.supervisor;
import com.google.inject.AbstractModule;
import com.netflix.titus.api.supervisor.service.LeaderElector;
import com.netflix.titus.api.supervisor.service.MasterMonitor;
public final class ZookeeperSupervisorModule extends AbstractModule {
@Override
protected void configure() {
bind(LeaderElector.class).to(ZookeeperLeaderElector.class).asEagerSingleton();
bind(MasterMonitor.class).to(ZookeeperMasterMonitor.class);
bind(ZkLeaderVerificator.class).asEagerSingleton();
}
}
| 1,366 |
0 | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper | Create_ds/titus-control-plane/titus-ext/zookeeper/src/main/java/com/netflix/titus/ext/zookeeper/supervisor/ZookeeperLeaderElector.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.zookeeper.supervisor;
import java.util.Collections;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicReference;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.titus.api.json.ObjectMappers;
import com.netflix.titus.common.runtime.SystemLogEvent;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.SystemExt;
import com.netflix.titus.common.util.rx.ObservableExt;
import com.netflix.titus.ext.zookeeper.ZookeeperPaths;
import com.netflix.titus.ext.zookeeper.connector.CuratorService;
import com.netflix.titus.ext.zookeeper.connector.CuratorUtils;
import com.netflix.titus.api.supervisor.model.MasterState;
import com.netflix.titus.api.supervisor.service.LeaderActivator;
import com.netflix.titus.api.supervisor.service.LeaderElector;
import com.netflix.titus.api.supervisor.service.MasterDescription;
import io.netty.util.concurrent.DefaultThreadFactory;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.recipes.leader.LeaderLatch;
import org.apache.curator.framework.recipes.leader.LeaderLatchListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.subjects.BehaviorSubject;
import static org.apache.zookeeper.KeeperException.Code.OK;
@Singleton
public class ZookeeperLeaderElector implements LeaderElector {
private static final Logger logger = LoggerFactory.getLogger(ZookeeperLeaderElector.class);
private final LeaderActivator leaderActivator;
private final ZookeeperPaths zookeeperPaths;
private final TitusRuntime titusRuntime;
private final ObjectMapper jsonMapper;
private final MasterDescription masterDescription;
private final CuratorFramework curator;
// The path where a selected leader announces itself.
private final String leaderPath;
private final BehaviorSubject<MasterState> electionSubject = BehaviorSubject.create();
private final Observable<MasterState> electionObserver = ObservableExt.protectFromMissingExceptionHandlers(electionSubject, logger);
private final AtomicReference<LeaderElectionProcess> leaderElectionProcessRef = new AtomicReference<>();
@Inject
public ZookeeperLeaderElector(LeaderActivator leaderActivator,
CuratorService curatorService,
ZookeeperPaths zookeeperPaths,
MasterDescription masterDescription,
TitusRuntime titusRuntime) {
this.leaderActivator = leaderActivator;
this.zookeeperPaths = zookeeperPaths;
this.titusRuntime = titusRuntime;
this.jsonMapper = ObjectMappers.defaultMapper();
this.masterDescription = masterDescription;
this.curator = curatorService.getCurator();
this.leaderPath = zookeeperPaths.getLeaderAnnouncementPath();
initialize();
}
private void initialize() {
CuratorUtils.createPathIfNotExist(curator, leaderPath, true);
}
@PreDestroy
public void shutdown() {
LeaderElectionProcess process = leaderElectionProcessRef.getAndSet(null);
if (process != null) {
process.close();
}
}
@Override
public boolean join() {
synchronized (leaderElectionProcessRef) {
if (leaderElectionProcessRef.get() == null) {
leaderElectionProcessRef.set(new LeaderElectionProcess());
return true;
}
}
return false;
}
@Override
public boolean leaveIfNotLeader() {
synchronized (leaderElectionProcessRef) {
LeaderElectionProcess process = leaderElectionProcessRef.get();
if (process == null) {
return false;
}
if (process.leaveIfNotLeader()) {
leaderElectionProcessRef.set(null);
return true;
}
}
return false;
}
@Override
public Observable<MasterState> awaitElection() {
return electionObserver;
}
private class LeaderElectionProcess {
private final LeaderLatch leaderLatch;
private volatile boolean leaderFlag;
private volatile boolean closed;
private LeaderElectionProcess() {
this.leaderLatch = createNewLeaderLatch(zookeeperPaths.getLeaderElectionPath());
try {
leaderLatch.start();
} catch (Exception e) {
String errorMessage = "Failed to create a leader elector for master: " + e.getMessage();
titusRuntime.getSystemLogService().submit(SystemLogEvent.newBuilder()
.withComponent(LeaderActivator.COMPONENT)
.withCategory(SystemLogEvent.Category.Transient)
.withPriority(SystemLogEvent.Priority.Fatal)
.withMessage(errorMessage)
.withContext(Collections.singletonMap("error", e.getMessage()))
.build()
);
throw new IllegalStateException(errorMessage, e);
}
}
private boolean leaveIfNotLeader() {
synchronized (this) {
if (leaderFlag) {
return false;
}
// Not a leader yet, so it is safe to leave the leader election process.
close();
}
if (leaderActivator.isLeader()) {
logger.error("Unexpected to be a leader. Terminating the JVM process");
SystemExt.forcedProcessExit(-1);
}
return true;
}
private void close() {
closed = true;
try {
leaderLatch.close();
} catch (Exception e) {
logger.warn("Error when leaving the leader election process", e);
}
}
private LeaderLatch createNewLeaderLatch(String leaderPath) {
final LeaderLatch newLeaderLatch = new LeaderLatch(curator, leaderPath, "127.0.0.1");
newLeaderLatch.addListener(
new LeaderLatchListener() {
@Override
public void isLeader() {
announceLeader();
}
@Override
public void notLeader() {
leaderActivator.stopBeingLeader();
}
}, Executors.newSingleThreadExecutor(new DefaultThreadFactory("LeaderLatchListener-%s")));
return newLeaderLatch;
}
private void announceLeader() {
try {
logger.info("Announcing leader");
byte[] masterDescriptionBytes = jsonMapper.writeValueAsBytes(masterDescription);
// There is no need to lock anything because we ensure only leader will write to the leader path
curator
.setData()
.inBackground((client, event) -> {
if (event.getResultCode() == OK.intValue()) {
synchronized (LeaderElectionProcess.this) {
terminateIfClosed();
leaderFlag = true;
electionSubject.onNext(MasterState.LeaderActivating);
leaderActivator.becomeLeader();
electionSubject.onNext(MasterState.LeaderActivated);
}
} else {
logger.warn("Failed to elect leader from path {} with event {}", leaderPath, event);
}
}).forPath(leaderPath, masterDescriptionBytes);
} catch (Exception e) {
throw new RuntimeException("Failed to announce leader: " + e.getMessage(), e);
}
}
private void terminateIfClosed() {
if (closed) {
logger.error("Received leader activation request after initiating withdrawal from the leader election process. Terminating the JVM process");
SystemExt.forcedProcessExit(-1);
}
}
}
}
| 1,367 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/test/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/test/java/com/netflix/titus/ext/cassandra/store/CassAppScalePolicyStoreTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.store;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import java.util.UUID;
import java.util.stream.Collectors;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.titus.api.appscale.model.AlarmConfiguration;
import com.netflix.titus.api.appscale.model.AutoScalingPolicy;
import com.netflix.titus.api.appscale.model.ComparisonOperator;
import com.netflix.titus.api.appscale.model.MetricAggregationType;
import com.netflix.titus.api.appscale.model.PolicyConfiguration;
import com.netflix.titus.api.appscale.model.PolicyStatus;
import com.netflix.titus.api.appscale.model.PolicyType;
import com.netflix.titus.api.appscale.model.Statistic;
import com.netflix.titus.api.appscale.model.StepAdjustment;
import com.netflix.titus.api.appscale.model.StepAdjustmentType;
import com.netflix.titus.api.appscale.model.StepScalingPolicyConfiguration;
import com.netflix.titus.api.json.ObjectMappers;
import com.netflix.titus.testkit.junit.category.IntegrationNotParallelizableTest;
import org.assertj.core.api.Assertions;
import org.cassandraunit.CassandraCQLUnit;
import org.cassandraunit.dataset.cql.ClassPathCQLDataSet;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@Category(IntegrationNotParallelizableTest.class)
public class CassAppScalePolicyStoreTest {
private static Logger log = LoggerFactory.getLogger(CassAppScalePolicyStoreTest.class);
private static final long STARTUP_TIMEOUT = 60_000L;
private static final String CONFIGURATION_FILE_NAME = "relocated-cassandra.yaml";
private CassandraStoreConfiguration configuration;
@Rule
public CassandraCQLUnit cassandraCQLUnit = new CassandraCQLUnit(
new ClassPathCQLDataSet("tables.cql", "titus_integration_tests"),
CONFIGURATION_FILE_NAME,
STARTUP_TIMEOUT
);
private static String POLICY_1_ID = UUID.randomUUID().toString();
private static String POLICY_2_ID = UUID.randomUUID().toString();
private static String POLICY_3_ID = UUID.randomUUID().toString();
@Before
public void setup() {
this.configuration = mock(CassandraStoreConfiguration.class);
when(configuration.getConcurrencyLimit()).thenReturn(10);
}
private void loadTestData() throws Exception {
Session session = cassandraCQLUnit.getSession();
String insertStmt = "INSERT INTO app_scale_policy(ref_id, job_id, status, value) VALUES(?, ?, ?, ?);";
PreparedStatement stmt = session.prepare(insertStmt);
// record 1
String jobId = "job-1";
String serializedValue = ObjectMappers.appScalePolicyMapper().writeValueAsString(buildAutoScalingPolicy(jobId).getPolicyConfiguration());
BoundStatement boundStatement = stmt.bind(UUID.fromString(POLICY_1_ID), jobId, PolicyStatus.Pending.name(), serializedValue);
session.execute(boundStatement);
// record 2
String jobIdTwo = "job-2";
String serializedValueTwo = ObjectMappers.appScalePolicyMapper().writeValueAsString(buildAutoScalingPolicy(jobIdTwo).getPolicyConfiguration());
boundStatement = stmt.bind(UUID.fromString(POLICY_2_ID), jobIdTwo, PolicyStatus.Pending.name(), serializedValueTwo);
session.execute(boundStatement);
// record 3
boundStatement = stmt.bind(UUID.fromString(POLICY_3_ID), jobId, PolicyStatus.Pending.name(), serializedValue);
session.execute(boundStatement);
// insert job-policy relationship
insertStmt = "INSERT INTO app_scale_jobs(job_id, ref_id) VALUES(?, ?);";
stmt = session.prepare(insertStmt);
boundStatement = stmt.bind("job-1", UUID.fromString(POLICY_1_ID));
session.execute(boundStatement);
boundStatement = stmt.bind("job-1", UUID.fromString(POLICY_3_ID));
session.execute(boundStatement);
boundStatement = stmt.bind("job-2", UUID.fromString(POLICY_2_ID));
session.execute(boundStatement);
}
@Test
public void verifyStoreInit() throws Exception {
Session session = cassandraCQLUnit.getSession();
loadTestData();
CassAppScalePolicyStore store = new CassAppScalePolicyStore(session, configuration, new DefaultRegistry());
store.init().await();
List<AutoScalingPolicy> allPolicies = store.retrievePolicies(false).toList().toBlocking().first();
Assertions.assertThat(allPolicies.size()).isEqualTo(3);
List<AutoScalingPolicy> jobOnePolicies = store.retrievePoliciesForJob("job-1").toList().toBlocking().first();
Assertions.assertThat(jobOnePolicies.size()).isEqualTo(2);
List<String> refIdList = jobOnePolicies.stream().map(as -> as.getRefId()).collect(Collectors.toList());
Assertions.assertThat(refIdList).containsOnly(POLICY_1_ID, POLICY_3_ID);
List<AutoScalingPolicy> jobTwoPolicies = store.retrievePoliciesForJob("job-2").toList().toBlocking().first();
Assertions.assertThat(jobTwoPolicies.size()).isEqualTo(1);
List<String> jobTwoRefIdList = jobTwoPolicies.stream().map(as -> as.getRefId()).collect(Collectors.toList());
Assertions.assertThat(jobTwoRefIdList).isEqualTo(Arrays.asList(POLICY_2_ID));
// verify metric lower/upper bounds
List<StepAdjustment> stepAdjustments = jobTwoPolicies.stream()
.flatMap(as -> as.getPolicyConfiguration().getStepScalingPolicyConfiguration().getSteps().stream())
.collect(Collectors.toList());
Assertions.assertThat(stepAdjustments.size()).isEqualTo(1);
Assertions.assertThat(stepAdjustments.get(0).getMetricIntervalUpperBound()).isEqualTo(Optional.empty());
Assertions.assertThat(stepAdjustments.get(0).getMetricIntervalLowerBound().get()).isEqualTo(Double.valueOf(0));
}
@Test
public void checkStoreAndRetrieve() throws Exception {
Session session = cassandraCQLUnit.getSession();
CassAppScalePolicyStore store = new CassAppScalePolicyStore(session, configuration, new DefaultRegistry());
String jobId = UUID.randomUUID().toString();
Observable<String> respRefId = store.storePolicy(buildAutoScalingPolicy(jobId));
String refId = respRefId.toBlocking().first();
Assertions.assertThat(refId).isNotNull().isNotEmpty();
Observable<AutoScalingPolicy> autoScalingPolicyObservable = store.retrievePolicyForRefId(refId);
AutoScalingPolicy autoScalingPolicy = autoScalingPolicyObservable.toBlocking().first();
Assertions.assertThat(autoScalingPolicy.getRefId()).isEqualTo(refId);
Assertions.assertThat(autoScalingPolicy.getStatus()).isEqualTo(PolicyStatus.Pending);
Observable<String> respRefIdTwo = store.storePolicy(buildAutoScalingPolicy(jobId));
String refIdTwo = respRefIdTwo.toBlocking().first();
Assertions.assertThat(refIdTwo).isNotNull().isNotEmpty();
autoScalingPolicyObservable = store.retrievePoliciesForJob(jobId);
List<AutoScalingPolicy> autoScalingPolicies = autoScalingPolicyObservable.toList().toBlocking().first();
Assertions.assertThat(autoScalingPolicies.size()).isEqualTo(2);
List<String> refIdList = autoScalingPolicies.stream().map(ap -> ap.getRefId()).collect(Collectors.toList());
Assertions.assertThat(refIdList).isEqualTo(Arrays.asList(refId, refIdTwo));
Assertions.assertThat(autoScalingPolicies.get(1).getStatus()).isEqualTo(PolicyStatus.Pending);
autoScalingPolicyObservable = store.retrievePoliciesForJob("invalidJobId");
List<AutoScalingPolicy> emptyPolicies = autoScalingPolicyObservable.toList().toBlocking().first();
Assertions.assertThat(emptyPolicies.size()).isEqualTo(0);
}
@Test
public void checkUpdates() throws Exception {
Session session = cassandraCQLUnit.getSession();
CassAppScalePolicyStore store = new CassAppScalePolicyStore(session, configuration, new DefaultRegistry());
String jobId = UUID.randomUUID().toString();
Observable<String> respRefId = store.storePolicy(buildAutoScalingPolicy(jobId));
String refId = respRefId.toBlocking().first();
Assertions.assertThat(refId).isNotNull().isNotEmpty();
// update policyId
String policyId = "PolicyARN";
store.updatePolicyId(refId, policyId).await();
String getPolicyIdQuery = "SELECT policy_id from app_scale_policy where ref_id = ?;";
BoundStatement stmt = session.prepare(getPolicyIdQuery).bind(UUID.fromString(refId));
ResultSet rs = session.execute(stmt);
List<Row> rows = rs.all();
Assertions.assertThat(rows.size()).isEqualTo(1);
String policyIdStored = rows.get(0).getString(CassAppScalePolicyStore.COLUMN_POLICY_ID);
Assertions.assertThat(policyIdStored).isEqualTo(policyId);
// update alarmId
String alarmId = "AlarmARM";
store.updateAlarmId(refId, alarmId).await();
String getAlarmIdQuery = "SELECT alarm_id from app_scale_policy where ref_id = ?;";
stmt = session.prepare(getAlarmIdQuery).bind(UUID.fromString(refId));
rs = session.execute(stmt);
rows = rs.all();
Assertions.assertThat(rows.size()).isEqualTo(1);
String alarmIdStored = rows.get(0).getString(CassAppScalePolicyStore.COLUMN_ALARM_ID);
Assertions.assertThat(alarmIdStored).isEqualTo(alarmId);
// update policy status
PolicyStatus status = PolicyStatus.Applied;
store.updatePolicyStatus(refId, status).await();
String getPolicyStatusQuery = "SELECT status from app_scale_policy where ref_id = ?;";
stmt = session.prepare(getPolicyStatusQuery).bind(UUID.fromString(refId));
rs = session.execute(stmt);
rows = rs.all();
Assertions.assertThat(rows.size()).isEqualTo(1);
PolicyStatus updatedStatus = PolicyStatus.valueOf(rows.get(0).getString(CassAppScalePolicyStore.COLUMN_STATUS));
Assertions.assertThat(updatedStatus).isEqualTo(status);
String errorStatusMessage = "Got Trumped";
store.updateStatusMessage(refId, errorStatusMessage).await();
String getStatusMessageQuery = "SELECT status_message from app_scale_policy where ref_id = ?;";
stmt = session.prepare(getStatusMessageQuery).bind(UUID.fromString(refId));
rs = session.execute(stmt);
rows = rs.all();
Assertions.assertThat(rows.size()).isEqualTo(1);
String statusMessage = rows.get(0).getString(CassAppScalePolicyStore.COLUMN_STATUS_MESSAGE);
Assertions.assertThat(statusMessage).isEqualTo(errorStatusMessage);
// read policy by refId
Observable<AutoScalingPolicy> autoScalingPolicyObservable = store.retrievePolicyForRefId(refId);
AutoScalingPolicy autoScalingPolicy = autoScalingPolicyObservable.toBlocking().first();
Assertions.assertThat(autoScalingPolicy.getRefId()).isEqualTo(refId);
Assertions.assertThat(autoScalingPolicy.getStatus()).isEqualTo(status);
Assertions.assertThat(autoScalingPolicy.getAlarmId()).isEqualTo(alarmId);
Assertions.assertThat(autoScalingPolicy.getPolicyId()).isEqualTo(policyId);
// update alarm threshold
AlarmConfiguration alarmConfiguration = autoScalingPolicy.getPolicyConfiguration().getAlarmConfiguration();
double currentThreshold = alarmConfiguration.getThreshold();
int thresholdIncrement = 10;
AlarmConfiguration newAlarmConfiguration = AlarmConfiguration.newBuilder()
.withStatistic(alarmConfiguration.getStatistic())
.withName(alarmConfiguration.getName())
.withMetricNamespace(alarmConfiguration.getMetricNamespace())
.withMetricName(alarmConfiguration.getMetricName())
.withComparisonOperator(alarmConfiguration.getComparisonOperator())
.withThreshold(alarmConfiguration.getThreshold() + thresholdIncrement)
.withPeriodSec(alarmConfiguration.getPeriodSec())
.withEvaluationPeriods(alarmConfiguration.getEvaluationPeriods())
.withActionsEnabled(alarmConfiguration.getActionsEnabled().get()).build();
PolicyConfiguration newPolicyConfig = PolicyConfiguration.newBuilder()
.withPolicyType(autoScalingPolicy.getPolicyConfiguration().getPolicyType())
.withStepScalingPolicyConfiguration(autoScalingPolicy.getPolicyConfiguration().getStepScalingPolicyConfiguration())
.withName(autoScalingPolicy.getPolicyConfiguration().getName())
.withPolicyType(autoScalingPolicy.getPolicyConfiguration().getPolicyType())
.withAlarmConfiguration(newAlarmConfiguration).build();
AutoScalingPolicy newPolicy = AutoScalingPolicy.newBuilder()
.withAutoScalingPolicy(autoScalingPolicy)
.withPolicyConfiguration(newPolicyConfig).build();
store.updatePolicyConfiguration(newPolicy).await();
autoScalingPolicyObservable = store.retrievePolicyForRefId(refId);
AutoScalingPolicy updatedPolicy = autoScalingPolicyObservable.toBlocking().first();
Assertions.assertThat(updatedPolicy.getPolicyConfiguration().getAlarmConfiguration().getName())
.isEqualTo(newAlarmConfiguration.getName());
Assertions.assertThat(updatedPolicy.getPolicyConfiguration().getAlarmConfiguration().getThreshold())
.isEqualTo(currentThreshold + thresholdIncrement);
}
@Test
public void checkSerialization() throws Exception {
AlarmConfiguration alarmConfiguration = AlarmConfiguration.newBuilder()
.withActionsEnabled(true)
.withComparisonOperator(ComparisonOperator.GreaterThanThreshold)
.withEvaluationPeriods(1)
.withPeriodSec(60)
.withThreshold(2.5)
.withMetricName("CPUUtilization")
.withMetricNamespace("AWS/EC2")
.withName("job-1")
.withStatistic(Statistic.Average)
.build();
String serializedValue = ObjectMappers.appScalePolicyMapper().writeValueAsString(alarmConfiguration);
alarmConfiguration = ObjectMappers.appScalePolicyMapper().readValue(serializedValue.getBytes(), AlarmConfiguration.class);
Assertions.assertThat(alarmConfiguration.getName()).isEqualTo("job-1");
}
private AutoScalingPolicy buildAutoScalingPolicy(String jobId) {
AlarmConfiguration alarmConfiguration = AlarmConfiguration.newBuilder()
.withActionsEnabled(true)
.withComparisonOperator(ComparisonOperator.GreaterThanThreshold)
.withEvaluationPeriods(1)
.withPeriodSec(60)
.withMetricName("CPUUtilization")
.withMetricNamespace("AWS/EC2")
.withName(jobId)
.withStatistic(Statistic.Average)
.build();
StepAdjustment stepAdjustment = StepAdjustment.newBuilder()
.withMetricIntervalLowerBound(0)
.withScalingAdjustment(1)
.build();
StepScalingPolicyConfiguration stepScalingPolicyConfiguration = StepScalingPolicyConfiguration.newBuilder()
.withAdjustmentType(StepAdjustmentType.ChangeInCapacity)
.withCoolDownSec(60)
.withMetricAggregatorType(MetricAggregationType.Average)
.withMinAdjustmentMagnitude(1)
.withSteps(Arrays.asList(stepAdjustment))
.build();
PolicyConfiguration policyConfiguration = PolicyConfiguration.newBuilder()
.withAlarmConfiguration(alarmConfiguration)
.withStepScalingPolicyConfiguration(stepScalingPolicyConfiguration)
.withPolicyType(PolicyType.StepScaling)
.withName(jobId)
.build();
AutoScalingPolicy autoScalingPolicy = AutoScalingPolicy.newBuilder()
.withPolicyConfiguration(policyConfiguration)
.withStatus(PolicyStatus.Pending)
.withStatusMessage("ICE-ed by AWS")
.withJobId(jobId)
.build();
return autoScalingPolicy;
}
}
| 1,368 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/test/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/test/java/com/netflix/titus/ext/cassandra/store/CassandraLoadBalancerStoreTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.store;
import java.time.Duration;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.google.common.collect.ImmutableMap;
import com.netflix.titus.api.loadbalancer.model.JobLoadBalancer;
import com.netflix.titus.api.loadbalancer.model.JobLoadBalancerState;
import com.netflix.titus.api.loadbalancer.model.LoadBalancerTarget;
import com.netflix.titus.api.loadbalancer.model.LoadBalancerTargetState;
import com.netflix.titus.api.loadbalancer.model.sanitizer.LoadBalancerSanitizerBuilder;
import com.netflix.titus.common.model.sanitizer.EntitySanitizer;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.testkit.junit.category.IntegrationNotParallelizableTest;
import org.assertj.core.util.IterableUtil;
import org.cassandraunit.CassandraCQLUnit;
import org.cassandraunit.dataset.cql.ClassPathCQLDataSet;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static com.netflix.titus.api.loadbalancer.model.JobLoadBalancer.State.ASSOCIATED;
import static com.netflix.titus.api.loadbalancer.model.JobLoadBalancer.State.DISSOCIATED;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@Category(IntegrationNotParallelizableTest.class)
public class CassandraLoadBalancerStoreTest {
private static Logger logger = LoggerFactory.getLogger(CassandraLoadBalancerStoreTest.class);
private static final long STARTUP_TIMEOUT = 60_000L;
private static final String CONFIGURATION_FILE_NAME = "relocated-cassandra.yaml";
private static final long TIMEOUT_MS = 30_000;
@Rule
public CassandraCQLUnit cassandraCQLUnit = new CassandraCQLUnit(
new ClassPathCQLDataSet("tables.cql", "titus_integration_tests"),
CONFIGURATION_FILE_NAME,
STARTUP_TIMEOUT
);
/**
* Tests loading existing Cassandra data on store init.
*
* @throws Exception
*/
@Test
public void verifyStoreInit() throws Exception {
loadTestData(generateTestData(5, 10, 1));
assertThat(getInitdStore()).isNotNull();
}
/**
* Tests retrieval of a data set loaded on initialization.
*
* @throws Exception
*/
@Test
public void testRetrieveLoadBalancers() throws Exception {
TestData data = generateTestData(10, 20, 1);
loadTestData(data);
CassandraLoadBalancerStore store = getInitdStore();
// Check that all expected data was loaded
checkDataSetExists(store, data.getAssociations());
}
/**
* Adds load balancers to jobs and checks that they were added properly.
*
* @throws Exception
*/
@Test
public void testAddLoadBalancers() throws Exception {
Map<JobLoadBalancer, JobLoadBalancer.State> testData = generateTestData(10, 20, 1).getAssociations();
CassandraLoadBalancerStore store = getInitdStore();
// Apply the testData to the store
testData.forEach((jobLoadBalancer, state) -> {
assertThat(store.addOrUpdateLoadBalancer(jobLoadBalancer, state).await(TIMEOUT_MS, TimeUnit.MILLISECONDS)).isTrue();
});
// Check that all expected data was loaded
checkDataSetExists(store, testData);
}
@Test
public void testAssociationStateIsCaseInsensitive() throws Exception {
Session session = cassandraCQLUnit.getSession();
PreparedStatement insertStmt = session.prepare("INSERT INTO load_balancer_jobs(job_id, load_balancer_id, state) VALUES(?, ?, ?);");
ResultSet rs1 = session.execute(insertStmt.bind("job-1", "lb-1", "Associated"));
assertThat(rs1.isExhausted()).isTrue();
assertThat(rs1.wasApplied()).isTrue();
ResultSet rs2 = session.execute(insertStmt.bind("job-2", "lb-2", "Dissociated"));
assertThat(rs2.isExhausted()).isTrue();
assertThat(rs2.wasApplied()).isTrue();
ResultSet rs3 = session.execute(insertStmt.bind("job-3", "lb-3", "aSsOcIaTeD"));
assertThat(rs3.isExhausted()).isTrue();
assertThat(rs3.wasApplied()).isTrue();
CassandraLoadBalancerStore store = getInitdStore();
assertThat(store.getAssociations()).containsExactlyInAnyOrder(
new JobLoadBalancerState(new JobLoadBalancer("job-1", "lb-1"), ASSOCIATED),
new JobLoadBalancerState(new JobLoadBalancer("job-2", "lb-2"), DISSOCIATED),
new JobLoadBalancerState(new JobLoadBalancer("job-3", "lb-3"), ASSOCIATED)
);
}
@Test
public void testRemoveLoadBalancer() throws Exception {
Map<JobLoadBalancer, JobLoadBalancer.State> testData = generateTestData(10, 20, 1).getAssociations();
CassandraLoadBalancerStore store = getInitdStore();
testData.forEach((jobLoadBalancer, state) ->
assertThat(store.addOrUpdateLoadBalancer(jobLoadBalancer, state).await(TIMEOUT_MS, TimeUnit.MILLISECONDS)).isTrue()
);
testData.forEach((jobLoadBalancer, state) ->
assertThat(store.removeLoadBalancer(jobLoadBalancer).await(TIMEOUT_MS, TimeUnit.MILLISECONDS)).isTrue()
);
Map<String, List<JobLoadBalancerState>> byJobId = store.getAssociations().stream()
.collect(Collectors.groupingBy(JobLoadBalancerState::getJobId));
testData.forEach((jobLoadBalancer, state) -> {
assertThat(byJobId.get(jobLoadBalancer.getJobId())).isNullOrEmpty();
});
}
@Test
public void testDissociateLoadBalancer() throws Exception {
Map<JobLoadBalancer, JobLoadBalancer.State> testData = generateTestData(10, 20, 1).getAssociations();
CassandraLoadBalancerStore store = getInitdStore();
testData.forEach((jobLoadBalancer, state) -> {
assertThat(store.addOrUpdateLoadBalancer(jobLoadBalancer, ASSOCIATED).await(TIMEOUT_MS, TimeUnit.MILLISECONDS)).isTrue();
});
testData.forEach((jobLoadBalancer, state) -> {
assertThat(store.addOrUpdateLoadBalancer(jobLoadBalancer, DISSOCIATED).await(TIMEOUT_MS, TimeUnit.MILLISECONDS)).isTrue();
});
Map<String, List<JobLoadBalancerState>> byJobId = store.getAssociations().stream()
.collect(Collectors.groupingBy(JobLoadBalancerState::getJobId));
testData.forEach((jobLoadBalancer, state) ->
byJobId.get(jobLoadBalancer.getJobId()).forEach(loadBalancerState -> {
assertThat(testData).containsKey(new JobLoadBalancer(jobLoadBalancer.getJobId(), loadBalancerState.getLoadBalancerId()));
assertThat(loadBalancerState.getState()).isEqualTo(DISSOCIATED);
})
);
}
@Test(timeout = TIMEOUT_MS)
public void testAddTargets() throws Exception {
Map<LoadBalancerTarget, LoadBalancerTarget.State> testData = generateTestData(10, 20, 1).getTargets();
Map<String, List<LoadBalancerTargetState>> expectedTargetsByLoadBalancer = testData.entrySet().stream()
.map(entry -> new LoadBalancerTargetState(entry.getKey(), entry.getValue()))
.collect(Collectors.groupingBy(t -> t.getLoadBalancerTarget().getLoadBalancerId()));
CassandraLoadBalancerStore store = getInitdStore();
store.addOrUpdateTargets(testData.entrySet().stream()
.map(LoadBalancerTargetState::from)
.collect(Collectors.toList())
).block();
expectedTargetsByLoadBalancer.forEach((loadBalancerId, expectedTargets) ->
assertThat(store.getLoadBalancerTargets(loadBalancerId).collectList().block())
.containsExactlyInAnyOrder(IterableUtil.toArray(expectedTargets))
);
int totalCount = expectedTargetsByLoadBalancer.values().stream().mapToInt(List::size).sum();
Session session = cassandraCQLUnit.getSession();
ResultSet resultSet = session.execute("SELECT COUNT(*) FROM load_balancer_targets;");
assertThat(resultSet.one().getLong(0)).isEqualTo(totalCount);
}
@Test(timeout = TIMEOUT_MS)
public void testUpdateTarget() throws Exception {
Session session = cassandraCQLUnit.getSession();
BoundStatement countStmt = session.prepare("SELECT COUNT(*) FROM load_balancer_targets;").bind();
PreparedStatement stateStmt = session.prepare("SELECT state FROM load_balancer_targets WHERE load_balancer_id = ? AND ip_address = ?;");
assertThat(session.execute(countStmt).one().getLong(0)).isEqualTo(0);
LoadBalancerTarget target = new LoadBalancerTarget("lb-1", "task-1", "1.1.1.1");
CassandraLoadBalancerStore store = getInitdStore();
store.addOrUpdateTargets(target.withState(LoadBalancerTarget.State.REGISTERED)).block();
assertThat(session.execute(countStmt).one().getLong(0)).isEqualTo(1);
Row registered = session.execute(stateStmt.bind("lb-1", "1.1.1.1")).one();
assertThat(registered.getString("state")).isEqualTo("REGISTERED");
store.addOrUpdateTargets(target.withState(LoadBalancerTarget.State.DEREGISTERED)).block();
assertThat(session.execute(countStmt).one().getLong(0)).isEqualTo(1);
Row deregistered = session.execute(stateStmt.bind("lb-1", "1.1.1.1")).one();
assertThat(deregistered.getString("state")).isEqualTo("DEREGISTERED");
}
@Test
public void testOnlyDeregisteredTargetsAreRemoved() throws Exception {
Map<LoadBalancerTarget, LoadBalancerTarget.State> targets = ImmutableMap.of(
new LoadBalancerTarget("lb-1", "task1", "1.1.1.1"), LoadBalancerTarget.State.REGISTERED,
new LoadBalancerTarget("lb-1", "task2", "2.2.2.2"), LoadBalancerTarget.State.DEREGISTERED,
new LoadBalancerTarget("lb-2", "task1", "1.1.1.1"), LoadBalancerTarget.State.DEREGISTERED,
new LoadBalancerTarget("lb-2", "task3", "3.3.3.3"), LoadBalancerTarget.State.DEREGISTERED
);
loadTestData(new TestData(Collections.emptyMap(), targets));
CassandraLoadBalancerStore store = getInitdStore();
store.removeDeregisteredTargets(targets.keySet()).block(Duration.ofSeconds(10));
List<LoadBalancerTargetState> targets1 = store.getLoadBalancerTargets("lb-1").collectList().block(Duration.ofSeconds(5));
assertThat(targets1).hasSize(1);
assertThat(targets1.get(0).getIpAddress()).isEqualTo("1.1.1.1");
List<LoadBalancerTargetState> targets2 = store.getLoadBalancerTargets("lb-2").collectList().block(Duration.ofSeconds(5));
assertThat(targets2).isEmpty();
}
@Test
public void testParallelUpdates() throws Exception {
Map<JobLoadBalancer, JobLoadBalancer.State> testData = generateTestData(100, 20, 1).getAssociations();
CassandraLoadBalancerStore store = getInitdStore();
// Create an thread pool to generate concurrent updates
ExecutorService executorService = Executors.newFixedThreadPool(10);
testData.forEach((jobLoadBalancer, state) ->
executorService.execute(() -> store.addOrUpdateLoadBalancer(jobLoadBalancer, state)
.await(TIMEOUT_MS, TimeUnit.MILLISECONDS))
);
// Wait till all jobs were submitted
executorService.shutdown();
assertThat(executorService.awaitTermination(10, TimeUnit.SECONDS)).isTrue();
// Verify data is consistent
checkDataSetExists(store, testData);
}
/**
* Tests that all data is returned across multiple pages.
*
* @throws Exception
*/
@Test
public void testGetPage() throws Exception {
int numTestJobs = 100;
int numTestLbs = 20;
TestData testData = generateTestData(numTestJobs, numTestLbs, 1);
Map<JobLoadBalancer, JobLoadBalancer.State> associations = testData.getAssociations();
HashSet<JobLoadBalancer> unverifiedData = new HashSet<>(associations.keySet());
// Load data on init
loadTestData(testData);
CassandraLoadBalancerStore store = getInitdStore();
// Read little pages at a time until we're told we've read everything.
int pageSize = 7;
int currentPageOffset = 0;
List<JobLoadBalancer> jobLoadBalancerPage;
do {
jobLoadBalancerPage = store.getAssociationsPage(currentPageOffset, pageSize);
jobLoadBalancerPage.forEach(jobLoadBalancer -> {
assertThat(unverifiedData.remove(jobLoadBalancer)).isTrue();
});
// Make sure all but the last page is full size
if ((numTestJobs * numTestLbs) - currentPageOffset >= pageSize) {
assertThat(jobLoadBalancerPage.size()).isEqualTo(pageSize);
} else {
assertThat(jobLoadBalancerPage.size()).isEqualTo((numTestJobs * numTestLbs) - currentPageOffset);
}
currentPageOffset += jobLoadBalancerPage.size();
// Make sure we've stopped before reading beyond the data set size
assertThat(currentPageOffset <= numTestJobs * numTestLbs).isTrue();
} while (jobLoadBalancerPage.size() > 0);
// Make sure all of the data was checked
assertThat(unverifiedData.isEmpty()).isTrue();
}
/**
* Tests that all data is returned in a single overly large page request.
*
* @throws Exception
*/
@Test
public void testGetFullPage() throws Exception {
loadTestData(generateTestData(10, 20, 1));
CassandraLoadBalancerStore store = getInitdStore();
List<JobLoadBalancer> jobLoadBalancerPage = store.getAssociationsPage(0, (10 * 20) + 1);
assertThat(jobLoadBalancerPage.size()).isEqualTo(10 * 20);
}
/**
* Returns a map of data to be inserted that can be used for later verification.
*/
private TestData generateTestData(int numJobs, int numLoadBalancersPerJob, int numTasksPerJob) {
Map<JobLoadBalancer, JobLoadBalancer.State> associations = new HashMap<>();
Map<LoadBalancerTarget, LoadBalancerTarget.State> targets = new HashMap<>();
for (int i = 0; i < numJobs; i++) {
String jobId = UUID.randomUUID().toString();
for (int j = 0; j < numLoadBalancersPerJob; j++) {
String loadBalancerId = UUID.randomUUID().toString();
JobLoadBalancer jobLoadBalancer = new JobLoadBalancer(jobId, jobId + "-" + loadBalancerId);
associations.put(jobLoadBalancer, ASSOCIATED);
for (int t = 0; t < numTasksPerJob; t++) {
targets.put(new LoadBalancerTarget(
loadBalancerId, "task-" + t, String.format("%s.%s.%s.%s", i, t, t, t)
), LoadBalancerTarget.State.REGISTERED);
}
}
}
assertThat(associations.size()).isEqualTo(numJobs * numLoadBalancersPerJob);
assertThat(targets.size()).isEqualTo(numJobs * numLoadBalancersPerJob * numTasksPerJob);
return new TestData(associations, targets);
}
private void loadTestData(Pair<Map<JobLoadBalancer, JobLoadBalancer.State>, Map<LoadBalancerTarget, LoadBalancerTarget.State>> data) {
loadTestData(data.getLeft(), data.getRight());
}
/**
* Inserts data in C* for use during tests.
*/
private void loadTestData(Map<JobLoadBalancer, JobLoadBalancer.State> associations, Map<LoadBalancerTarget,
LoadBalancerTarget.State> targets) {
Session session = cassandraCQLUnit.getSession();
PreparedStatement associationStmt = session.prepare("INSERT INTO load_balancer_jobs(job_id, load_balancer_id, state) VALUES(?, ?, ?);");
PreparedStatement targetStmt = session.prepare("INSERT INTO load_balancer_targets(load_balancer_id, ip_address, task_id, state) VALUES(?, ?, ?, ?);");
associations.forEach((jobLoadBalancer, state) -> {
BoundStatement boundStatement = associationStmt.bind(
jobLoadBalancer.getJobId(),
jobLoadBalancer.getLoadBalancerId(),
state.name());
ResultSet rs = session.execute(boundStatement);
assertThat(rs.isExhausted()).isTrue();
assertThat(rs.wasApplied()).isTrue();
});
targets.forEach((target, state) -> {
BoundStatement boundStatement = targetStmt.bind(
target.getLoadBalancerId(),
target.getIpAddress(),
target.getTaskId(),
state.name());
ResultSet rs = session.execute(boundStatement);
assertThat(rs.isExhausted()).isTrue();
assertThat(rs.wasApplied()).isTrue();
});
}
/**
* Creates, loads, and returns a store instance based on what was already in Cassandra.
*/
private CassandraLoadBalancerStore getInitdStore() {
Session session = cassandraCQLUnit.getSession();
CassandraStoreConfiguration configuration = mock(CassandraStoreConfiguration.class);
when(configuration.getLoadBalancerWriteConcurrencyLimit()).thenReturn(10);
when(configuration.getLoadBalancerDeleteConcurrencyLimit()).thenReturn(1);
EntitySanitizer entitySanitizer = new LoadBalancerSanitizerBuilder().build();
CassandraLoadBalancerStore store = new CassandraLoadBalancerStore(configuration, entitySanitizer, session);
store.init();
return store;
}
/**
* Returns the set of JobIds in a test data map.
*
* @return
*/
private Set<String> getJobIdsFromTestData(Map<JobLoadBalancer, JobLoadBalancer.State> testData) {
Set<String> jobIdSet = new HashSet<>();
testData.keySet()
.forEach(jobLoadBalancer -> jobIdSet.add(jobLoadBalancer.getJobId()));
return jobIdSet;
}
/**
* Checks if a provided data set fully exists in the store. The method is
* expected to assert if the check is false.
*
* @throws Exception
*/
private void checkDataSetExists(CassandraLoadBalancerStore store, Map<JobLoadBalancer, JobLoadBalancer.State> testData) throws Exception {
Set<JobLoadBalancer> observableVerificationSet = new HashSet<>(testData.keySet());
Set<JobLoadBalancer> listVerificationSet = new HashSet<>(observableVerificationSet);
Set<String> jobIdSet = getJobIdsFromTestData(testData);
Map<String, List<JobLoadBalancerState>> byJobId = store.getAssociations().stream()
.collect(Collectors.groupingBy(JobLoadBalancerState::getJobId));
jobIdSet.forEach(jobId -> {
// Verify we get the correct load balancers in the correct state
byJobId.get(jobId)
.forEach(loadBalancerState -> {
// Verify that all of the returned data was in the test data.
JobLoadBalancer jobLoadBalancer = loadBalancerState.getJobLoadBalancer();
assertThat(jobLoadBalancer.getJobId().equals(jobId)).isTrue();
assertThat(testData.containsKey(jobLoadBalancer)).isTrue();
assertThat(testData.get(jobLoadBalancer))
.isEqualTo(loadBalancerState.getState());
// Mark that this job/load balancer was checked
assertThat(observableVerificationSet.contains(jobLoadBalancer)).isTrue();
assertThat(observableVerificationSet.remove(jobLoadBalancer)).isTrue();
logger.debug("Verified job {} has load balancer id {} in state {}",
jobId,
loadBalancerState.getLoadBalancerId(),
loadBalancerState.getState());
});
// Verify the secondary indexes return the correct state
store.getAssociatedLoadBalancersSetForJob(jobId)
.forEach(jobLoadBalancer -> {
logger.info("Verifying jobLoadBalancer {}", jobLoadBalancer);
assertThat(jobLoadBalancer.getJobId().equals(jobId)).isTrue();
assertThat(testData.containsKey(jobLoadBalancer)).isTrue();
assertThat(testData.get(jobLoadBalancer))
.isEqualTo(ASSOCIATED);
// Mark that this job/load balancer was checked
assertThat(listVerificationSet.contains(jobLoadBalancer)).isTrue();
assertThat(listVerificationSet.remove(jobLoadBalancer)).isTrue();
logger.debug("Verified job load balancer {}", jobLoadBalancer);
});
});
// Verify that all of the test data was checked.
assertThat(observableVerificationSet.isEmpty()).isTrue();
assertThat(listVerificationSet.isEmpty()).isTrue();
}
/**
* Generics sanity
*/
private static class TestData extends Pair<Map<JobLoadBalancer, JobLoadBalancer.State>, Map<LoadBalancerTarget, LoadBalancerTarget.State>> {
public TestData(Map<JobLoadBalancer, JobLoadBalancer.State> associations, Map<LoadBalancerTarget, LoadBalancerTarget.State> targets) {
super(associations, targets);
}
public Map<JobLoadBalancer, JobLoadBalancer.State> getAssociations() {
return getLeft();
}
public Map<LoadBalancerTarget, LoadBalancerTarget.State> getTargets() {
return getRight();
}
}
}
| 1,369 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/test/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/test/java/com/netflix/titus/ext/cassandra/store/CassStoreHelperTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.store;
import java.util.Arrays;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Session;
import com.netflix.titus.testkit.junit.category.IntegrationNotParallelizableTest;
import org.cassandraunit.CassandraCQLUnit;
import org.cassandraunit.dataset.cql.ClassPathCQLDataSet;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import rx.Observable;
import rx.schedulers.Schedulers;
import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker;
import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto;
import static com.datastax.driver.core.querybuilder.QueryBuilder.select;
import static org.assertj.core.api.Assertions.assertThat;
@Category(IntegrationNotParallelizableTest.class)
public class CassStoreHelperTest {
private static final long STARTUP_TIMEOUT = 60_000L;
/**
* As Cassandra uses memory mapped files there are sometimes issues with virtual disks storing the project files.
* To solve this issue, we relocate the default embedded Cassandra folder to /var/tmp/embeddedCassandra.
*/
private static final String CONFIGURATION_FILE_NAME = "relocated-cassandra.yaml";
@Rule
public CassandraCQLUnit cassandraCQLUnit = new CassandraCQLUnit(
new ClassPathCQLDataSet("tables.cql", "titus_integration_tests"),
CONFIGURATION_FILE_NAME,
STARTUP_TIMEOUT
);
@Test
public void paginatedResultSetCanBeFetchedAsync() {
int numberOfRecords = 500;
int numberOfPages = 10;
Session session = cassandraCQLUnit.getSession();
PreparedStatement insertStmt = session.prepare(insertInto("app_scale_jobs").values(
Arrays.asList("job_id", "ref_id"),
Arrays.asList(bindMarker(), bindMarker())
));
for (int i = 0; i < numberOfRecords; i++) {
ResultSet resultSet = session.execute(insertStmt.bind("job-" + i, UUID.randomUUID()));
assertThat(resultSet.wasApplied()).isTrue();
}
PreparedStatement loadStmt = session.prepare(select("job_id", "ref_id").from("app_scale_jobs"));
Observable<ResultSet> results = new CassStoreHelper(session, Schedulers.io()).execute(loadStmt.bind()
// force pagination, and pages to be fetched on demand as the ResultSet is iterated on
.setFetchSize(numberOfRecords / numberOfPages));
results.doOnNext(rows -> rows.forEach(row ->
assertThat(row.getString(0)).startsWith("job-"))
).toCompletable().await(1, TimeUnit.MINUTES);
}
@Test(expected = IllegalStateException.class /* the datastax driver complains that page fetching will cause a deadlock */ )
public void paginatedResultInCassandraThreadsThrowsException() {
int numberOfRecords = 500;
int numberOfPages = 10;
Session session = cassandraCQLUnit.getSession();
PreparedStatement insertStmt = session.prepare(insertInto("app_scale_jobs").values(
Arrays.asList("job_id", "ref_id"),
Arrays.asList(bindMarker(), bindMarker())
));
for (int i = 0; i < numberOfRecords; i++) {
ResultSet resultSet = session.execute(insertStmt.bind("job-" + i, UUID.randomUUID()));
assertThat(resultSet.wasApplied()).isTrue();
}
PreparedStatement loadStmt = session.prepare(select("job_id", "ref_id").from("app_scale_jobs"));
Observable<ResultSet> results = new CassStoreHelper(session, Schedulers.immediate()).execute(loadStmt.bind()
// force pagination, and pages to be fetched on demand as the ResultSet is iterated on
.setFetchSize(numberOfRecords / numberOfPages));
results.doOnNext(rows -> rows.forEach(row ->
assertThat(row.getString(0)).startsWith("job-"))
).toCompletable().await(1, TimeUnit.MINUTES);
}
} | 1,370 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/test/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/test/java/com/netflix/titus/ext/cassandra/store/TestCassandraStoreConfiguration.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.store;
public class TestCassandraStoreConfiguration implements CassandraStoreConfiguration {
@Override
public String getV2KeySpace() {
return "dev";
}
@Override
public boolean isFailOnInconsistentCapacityGroupData() {
return true;
}
@Override
public boolean isFailOnInconsistentAgentData() {
return true;
}
@Override
public boolean isFailOnInconsistentLoadBalancerData() {
return true;
}
@Override
public boolean isFailOnInconsistentSchedulerData() {
return true;
}
@Override
public int getConcurrencyLimit() {
return 10;
}
@Override
public int getLoadBalancerWriteConcurrencyLimit() {
return 100;
}
@Override
public int getLoadBalancerDeleteConcurrencyLimit() {
return 10;
}
@Override
public boolean isTracingEnabled() {
return false;
}
}
| 1,371 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/test/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/test/java/com/netflix/titus/ext/cassandra/store/CassandraJobStoreTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.store;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.UUID;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Session;
import com.google.protobuf.NullValue;
import com.google.protobuf.Struct;
import com.google.protobuf.Value;
import com.netflix.titus.api.jobmanager.model.job.BatchJobTask;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.JobModel;
import com.netflix.titus.api.jobmanager.model.job.JobState;
import com.netflix.titus.api.jobmanager.model.job.JobStatus;
import com.netflix.titus.api.jobmanager.model.job.PlatformSidecar;
import com.netflix.titus.api.jobmanager.model.job.ServiceJobTask;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.TaskStatus;
import com.netflix.titus.api.jobmanager.model.job.Version;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.api.jobmanager.model.job.retry.ExponentialBackoffRetryPolicy;
import com.netflix.titus.api.jobmanager.model.job.volume.SharedContainerVolumeSource;
import com.netflix.titus.api.jobmanager.model.job.volume.Volume;
import com.netflix.titus.api.jobmanager.store.JobStore;
import com.netflix.titus.api.jobmanager.store.JobStoreException;
import com.netflix.titus.api.json.ObjectMappers;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.testkit.junit.category.IntegrationNotParallelizableTest;
import com.netflix.titus.testkit.model.job.JobDescriptorGenerator;
import com.netflix.titus.testkit.model.job.JobGenerator;
import org.cassandraunit.CassandraCQLUnit;
import org.cassandraunit.dataset.cql.ClassPathCQLDataSet;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import rx.Completable;
import rx.Observable;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.fail;
@Category(IntegrationNotParallelizableTest.class)
public class CassandraJobStoreTest {
public static final int MAX_CONCURRENCY = 10;
private static final long STARTUP_TIMEOUT_MS = 30_000L;
private static final int INITIAL_BUCKET_COUNT = 1;
private static final int MAX_BUCKET_SIZE = 10;
/**
* As Cassandra uses memory mapped files there are sometimes issues with virtual disks storing the project files.
* To solve this issue, we relocate the default embedded Cassandra folder to /var/tmp/embeddedCassandra.
*/
private static final String CONFIGURATION_FILE_NAME = "relocated-cassandra.yaml";
private static final CassandraStoreConfiguration CONFIGURATION = new TestCassandraStoreConfiguration();
@Rule
public CassandraCQLUnit cassandraCqlUnit = new CassandraCQLUnit(
new ClassPathCQLDataSet("tables.cql", "titus_integration_tests"),
CONFIGURATION_FILE_NAME,
STARTUP_TIMEOUT_MS
);
@Test
public void testRetrieveJobs() {
Session session = cassandraCqlUnit.getSession();
JobStore bootstrappingStore = getJobStore(session);
Job<BatchJobExt> job = createBatchJobObject();
bootstrappingStore.storeJob(job).await();
JobStore store = getJobStore(session);
store.init().await();
Pair<List<Job<?>>, Integer> jobsAndErrors = store.retrieveJobs().toBlocking().first();
assertThat(jobsAndErrors.getLeft()).hasSize(1);
assertThat(jobsAndErrors.getRight()).isEqualTo(0);
checkRetrievedJob(job, jobsAndErrors.getLeft().get(0));
// Check that archive access does not return anything.
try {
store.retrieveArchivedJob(job.getId()).toBlocking().first();
fail("Should not return active job");
} catch (JobStoreException e) {
assertThat(e.getErrorCode()).isEqualTo(JobStoreException.ErrorCode.JOB_DOES_NOT_EXIST);
}
}
@Test
public void testRetrieveBatchJob() {
doRetrieveJob(createBatchJobObject());
}
@Test
public void testRetrieveServiceJob() {
doRetrieveJob(createServiceJobObject());
}
@Test
public void testRetrieveJobWithVolumes() {
doRetrieveJob(createServiceJobWithVolumesObject());
}
@Test
public void testPlatformSidecarJob() {
doRetrieveJob(createServiceJobWithPlatformSidecarsObject());
}
private <E extends JobDescriptor.JobDescriptorExt> void doRetrieveJob(Job<E> job) {
JobStore store = getJobStore();
store.storeJob(job).await();
store.init().await();
Pair<List<Job<?>>, Integer> jobsAndErrors = store.retrieveJobs().toBlocking().first();
checkRetrievedJob(job, jobsAndErrors.getLeft().get(0));
}
@Test
public void testStoreJob() {
JobStore store = getJobStore();
Job<BatchJobExt> job = createBatchJobObject();
store.init().await();
store.storeJob(job).await();
Pair<List<Job<?>>, Integer> jobsAndErrors = store.retrieveJobs().toBlocking().first();
checkRetrievedJob(job, jobsAndErrors.getLeft().get(0));
}
/**
* Create enough jobs to evenly be bucketed across multiple rows. Delete 1 job per bucket. Add back enough jobs to fill
* in the deleted jobs plus an extra bucket worth as a new bucket was created when reaching the max of all the original buckets.
*/
@Test
public void testActiveJobIdDistribution() {
int numberOfJobsToCreate = 100;
int numberOfBuckets = numberOfJobsToCreate / MAX_BUCKET_SIZE;
Session session = cassandraCqlUnit.getSession();
JobStore store = getJobStore(session);
store.init().await();
List<Job<?>> createdJobs = new ArrayList<>();
List<Completable> completables = new ArrayList<>();
for (int i = 0; i < numberOfJobsToCreate; i++) {
Job<BatchJobExt> job = createBatchJobObject();
createdJobs.add(job);
completables.add(store.storeJob(job));
}
Completable.merge(Observable.from(completables), MAX_CONCURRENCY).await();
Pair<List<Job<?>>, Integer> retrievedJobsAndErrors = store.retrieveJobs().toBlocking().first();
assertThat(retrievedJobsAndErrors.getLeft()).hasSize(numberOfJobsToCreate);
assertItemsPerBucket(session, numberOfBuckets, MAX_BUCKET_SIZE);
int j = 0;
int jobsRemoved = 0;
completables = new ArrayList<>();
while (j < numberOfJobsToCreate) {
Job<?> jobToRemove = createdJobs.get(j);
completables.add(store.deleteJob(jobToRemove));
j += MAX_BUCKET_SIZE;
jobsRemoved++;
}
Completable.merge(Observable.from(completables), MAX_CONCURRENCY).await();
assertItemsPerBucket(session, numberOfBuckets, MAX_BUCKET_SIZE - 1);
completables = new ArrayList<>();
for (int i = 0; i < jobsRemoved + MAX_BUCKET_SIZE; i++) {
Job<BatchJobExt> job = createBatchJobObject();
completables.add(store.storeJob(job));
}
Completable.merge(Observable.from(completables), MAX_CONCURRENCY).await();
retrievedJobsAndErrors = store.retrieveJobs().toBlocking().first();
assertThat(retrievedJobsAndErrors.getLeft()).hasSize(numberOfJobsToCreate + MAX_BUCKET_SIZE);
assertItemsPerBucket(session, numberOfBuckets + 1, MAX_BUCKET_SIZE);
}
private void assertItemsPerBucket(Session session, int numberOfBuckets, int expectedNumberOfItemsPerBucket) {
for (int i = 0; i < numberOfBuckets; i++) {
ResultSet resultSet = session.execute("SELECT COUNT(*) FROM active_job_ids WHERE bucket = " + i);
long numberOfItemsInBucket = resultSet.one().getLong(0);
assertThat(numberOfItemsInBucket).isEqualTo(expectedNumberOfItemsPerBucket);
}
}
@Test
public void testUpdateJob() {
JobStore store = getJobStore();
Job<BatchJobExt> job = createBatchJobObject();
store.init().await();
store.storeJob(job).await();
Pair<List<Job<?>>, Integer> jobsAndErrors = store.retrieveJobs().toBlocking().first();
checkRetrievedJob(job, jobsAndErrors.getLeft().get(0));
Job<BatchJobExt> newJob = job.toBuilder()
.withStatus(JobStatus.newBuilder().withState(JobState.Finished).build())
.build();
store.updateJob(newJob).await();
Pair<List<Job<?>>, Integer> newJobsAndErrors = store.retrieveJobs().toBlocking().first();
checkRetrievedJob(newJob, newJobsAndErrors.getLeft().get(0));
}
@Test
public void testDeleteJob() {
JobStore store = getJobStore();
Job<BatchJobExt> job = createBatchJobObject();
store.init().await();
store.storeJob(job).await();
Pair<List<Job<?>>, Integer> jobsAndErrors = store.retrieveJobs().toBlocking().first();
checkRetrievedJob(job, jobsAndErrors.getLeft().get(0));
store.deleteJob(job).await();
jobsAndErrors = store.retrieveJobs().toBlocking().first();
assertThat(jobsAndErrors.getLeft()).isEmpty();
}
@Test
public void testRetrieveTasksForJob() {
JobStore store = getJobStore();
Job<BatchJobExt> job = createBatchJobObject();
store.init().await();
store.storeJob(job).await();
Pair<List<Job<?>>, Integer> jobsAndErrors = store.retrieveJobs().toBlocking().first();
checkRetrievedJob(job, jobsAndErrors.getLeft().get(0));
Task task = createTaskObject(job);
store.storeTask(task).await();
Pair<List<Task>, Integer> tasks = store.retrieveTasksForJob(job.getId()).toBlocking().first();
checkRetrievedTask(task, tasks.getLeft().get(0));
// Check that archive access does not return anything.
Task archivedTask = store.retrieveArchivedTasksForJob(job.getId()).toBlocking().firstOrDefault(null);
assertThat(archivedTask).isNull();
}
@Test
public void testRetrieveTask() {
JobStore store = getJobStore();
Job<BatchJobExt> job = createBatchJobObject();
store.init().await();
store.storeJob(job).await();
Pair<List<Job<?>>, Integer> jobsAndErrors = store.retrieveJobs().toBlocking().first();
checkRetrievedJob(job, jobsAndErrors.getLeft().get(0));
Task task = createTaskObject(job);
store.storeTask(task).await();
Task retrievedTask = store.retrieveTask(task.getId()).toBlocking().first();
checkRetrievedTask(task, retrievedTask);
// Check that archive access does not return anything.
try {
store.retrieveArchivedTask(task.getId()).toBlocking().first();
fail("Should not return active task");
} catch (JobStoreException e) {
assertThat(e.getErrorCode()).isEqualTo(JobStoreException.ErrorCode.TASK_DOES_NOT_EXIST);
}
}
@Test
public void testStoreTask() {
JobStore store = getJobStore();
Job<BatchJobExt> job = createBatchJobObject();
store.init().await();
store.storeJob(job).await();
Pair<List<Job<?>>, Integer> jobsAndErrors = store.retrieveJobs().toBlocking().first();
checkRetrievedJob(job, jobsAndErrors.getLeft().get(0));
Task task = createTaskObject(job);
store.storeTask(task).await();
Task retrievedTask = store.retrieveTask(task.getId()).toBlocking().first();
checkRetrievedTask(task, retrievedTask);
}
@Test
public void testUpdateTask() {
JobStore store = getJobStore();
Job<BatchJobExt> job = createBatchJobObject();
store.init().await();
store.storeJob(job).await();
Pair<List<Job<?>>, Integer> jobsAndErrors = store.retrieveJobs().toBlocking().first();
checkRetrievedJob(job, jobsAndErrors.getLeft().get(0));
Task task = createTaskObject(job);
store.storeTask(task).await();
Task retrievedTask = store.retrieveTask(task.getId()).toBlocking().first();
checkRetrievedTask(task, retrievedTask);
BatchJobTask newTask = BatchJobTask.newBuilder((BatchJobTask) task)
.withStatus(TaskStatus.newBuilder().withState(TaskState.Finished).build())
.build();
store.updateTask(newTask).await();
Task newRetrievedTask = store.retrieveTask(newTask.getId()).toBlocking().first();
checkRetrievedTask(newTask, newRetrievedTask);
}
@Test
public void testReplaceTask() {
JobStore store = getJobStore();
Job<BatchJobExt> job = createBatchJobObject();
store.init().await();
store.storeJob(job).await();
Pair<List<Job<?>>, Integer> jobsAndErrors = store.retrieveJobs().toBlocking().first();
checkRetrievedJob(job, jobsAndErrors.getLeft().get(0));
Task firstTask = createTaskObject(job);
store.storeTask(firstTask).await();
Task retrievedTask = store.retrieveTask(firstTask.getId()).toBlocking().first();
checkRetrievedTask(firstTask, retrievedTask);
Task secondTask = createTaskObject(job);
store.replaceTask(firstTask, secondTask).await();
Pair<List<Task>, Integer> tasks = store.retrieveTasksForJob(job.getId()).toBlocking().first();
assertThat(tasks.getLeft()).hasSize(1);
assertThat(tasks.getRight()).isEqualTo(0);
checkRetrievedTask(secondTask, tasks.getLeft().get(0));
}
@Test
public void testDeleteTask() {
JobStore store = getJobStore();
Job<BatchJobExt> job = createBatchJobObject();
store.init().await();
store.storeJob(job).await();
Pair<List<Job<?>>, Integer> jobsAndErrors = store.retrieveJobs().toBlocking().first();
checkRetrievedJob(job, jobsAndErrors.getLeft().get(0));
Task task = createTaskObject(job);
store.storeTask(task).await();
Task retrievedTask = store.retrieveTask(task.getId()).toBlocking().first();
checkRetrievedTask(task, retrievedTask);
store.deleteTask(task).await();
Pair<List<Task>, Integer> tasks = store.retrieveTasksForJob(job.getId()).toBlocking().first();
assertThat(tasks.getLeft()).isEmpty();
}
@Test
public void testRetrieveArchivedJob() {
testRetrieveArchivedJob(true);
}
@Test
public void testRetrieveArchivedJobFromActiveTable() {
testRetrieveArchivedJob(false);
}
private void testRetrieveArchivedJob(boolean archive) {
JobStore store = getJobStore();
Job<BatchJobExt> job = createFinishedBatchJobObject();
store.init().await();
store.storeJob(job).await();
if (archive) {
store.deleteJob(job).await();
}
Job archivedJob = store.retrieveArchivedJob(job.getId()).toBlocking().first();
checkRetrievedJob(job, archivedJob);
}
@Test
public void testRetrieveArchivedTasksForJob() {
testRetrieveArchivedTasksForJob(true);
}
@Test
public void testRetrieveArchivedTasksForJobFromActiveTable() {
testRetrieveArchivedTasksForJob(false);
}
private void testRetrieveArchivedTasksForJob(boolean archive) {
JobStore store = getJobStore();
Job<BatchJobExt> job = createFinishedBatchJobObject();
store.init().await();
store.storeJob(job).await();
Pair<List<Job<?>>, Integer> jobsAndErrors = store.retrieveJobs().toBlocking().first();
checkRetrievedJob(job, jobsAndErrors.getLeft().get(0));
Task task = createFinishedTaskObject(job);
store.storeTask(task).await();
if (archive) {
store.deleteTask(task).await();
}
Task archivedTask = store.retrieveArchivedTasksForJob(job.getId()).toBlocking().first();
checkRetrievedTask(task, archivedTask);
}
@Test
public void testRetrieveArchivedTask() {
testRetrieveArchivedTask(true);
}
@Test
public void testRetrieveArchivedTaskFromActiveTable() {
testRetrieveArchivedTask(false);
}
private void testRetrieveArchivedTask(boolean archive) {
JobStore store = getJobStore();
Job<BatchJobExt> job = createFinishedBatchJobObject();
store.init().await();
store.storeJob(job).await();
Pair<List<Job<?>>, Integer> jobsAndErrors = store.retrieveJobs().toBlocking().first();
checkRetrievedJob(job, jobsAndErrors.getLeft().get(0));
Task task = createFinishedTaskObject(job);
store.storeTask(task).await();
if (archive) {
store.deleteTask(task).await();
}
Task archivedTask = store.retrieveArchivedTask(task.getId()).toBlocking().first();
checkRetrievedTask(task, archivedTask);
}
@Test
public void testMoveTask() {
JobStore store = getJobStore();
store.init().await();
Job<ServiceJobExt> jobFrom = createServiceJobObject();
store.storeJob(jobFrom).await();
Job<ServiceJobExt> jobTo = createServiceJobObject();
store.storeJob(jobTo).await();
Task task = createServiceTaskObject(jobFrom);
store.storeTask(task).await();
Job<ServiceJobExt> updatedFromJob = JobFunctions.incrementJobSize(jobFrom, -1);
Job<ServiceJobExt> updatedToJob = JobFunctions.incrementJobSize(jobTo, 1);
Task updatedTask = task.toBuilder().withJobId(updatedToJob.getId()).build();
store.moveTask(updatedFromJob, updatedToJob, updatedTask).await();
// Load jobFrom from store
Job<?> jobFromLoaded = store.retrieveJob(jobFrom.getId()).toBlocking().first();
assertThat(JobFunctions.getJobDesiredSize(jobFromLoaded)).isEqualTo(0);
Pair<List<Task>, Integer> jobFromTasksLoaded = store.retrieveTasksForJob(jobFrom.getId()).toBlocking().first();
assertThat(jobFromTasksLoaded.getLeft()).hasSize(0);
// Load jobTo from store
Job<?> jobToLoaded = store.retrieveJob(jobTo.getId()).toBlocking().first();
assertThat(JobFunctions.getJobDesiredSize(jobToLoaded)).isEqualTo(2);
Pair<List<Task>, Integer> jobToTasksLoaded = store.retrieveTasksForJob(jobTo.getId()).toBlocking().first();
assertThat(jobToTasksLoaded.getLeft()).hasSize(1);
jobToTasksLoaded.getLeft().forEach(t -> assertThat(t.getJobId()).isEqualTo(jobTo.getId()));
}
@Test
public void testRetrieveArchivedTaskCountForJob() {
JobStore store = getJobStore();
Job<BatchJobExt> job = createFinishedBatchJobObject();
store.init().await();
store.storeJob(job).await();
Pair<List<Job<?>>, Integer> jobsAndErrors = store.retrieveJobs().toBlocking().first();
checkRetrievedJob(job, jobsAndErrors.getLeft().get(0));
Task task = createFinishedTaskObject(job);
store.storeTask(task).await();
store.deleteTask(task).await();
Long count = store.retrieveArchivedTaskCountForJob(job.getId()).toBlocking().first();
assertThat(count).isEqualTo(1);
}
@Test
public void testDeleteArchivedTask() {
JobStore store = getJobStore();
Job<BatchJobExt> job = createFinishedBatchJobObject();
store.init().await();
store.storeJob(job).await();
Pair<List<Job<?>>, Integer> jobsAndErrors = store.retrieveJobs().toBlocking().first();
checkRetrievedJob(job, jobsAndErrors.getLeft().get(0));
Task task = createFinishedTaskObject(job);
store.storeTask(task).await();
store.deleteTask(task).await();
Long count = store.retrieveArchivedTaskCountForJob(job.getId()).toBlocking().first();
assertThat(count).isEqualTo(1);
store.deleteArchivedTask(job.getId(), task.getId()).await();
Long count2 = store.retrieveArchivedTaskCountForJob(job.getId()).toBlocking().first();
assertThat(count2).isEqualTo(0);
}
private JobStore getJobStore() {
return getJobStore(null);
}
private JobStore getJobStore(Session session) {
if (session == null) {
session = cassandraCqlUnit.getSession();
}
return new CassandraJobStore(CONFIGURATION, session, TitusRuntimes.internal(),
ObjectMappers.storeMapper(), INITIAL_BUCKET_COUNT, MAX_BUCKET_SIZE);
}
private Job<BatchJobExt> createBatchJobObject() {
return JobGenerator.batchJobs(JobDescriptorGenerator.oneTaskBatchJobDescriptor()).getValue();
}
private Job createFinishedBatchJobObject() {
return JobFunctions.changeJobStatus(
createBatchJobObject(),
JobStatus.newBuilder().withState(JobState.Finished).build()
);
}
private Job<ServiceJobExt> createServiceJobObject() {
ExponentialBackoffRetryPolicy exponential = JobModel.newExponentialBackoffRetryPolicy()
.withInitialDelayMs(10)
.withMaxDelayMs(100)
.withRetries(5)
.build();
JobDescriptor<ServiceJobExt> jobDescriptor = JobDescriptorGenerator.oneTaskServiceJobDescriptor().but(jd ->
jd.getExtensions().toBuilder().withRetryPolicy(exponential).build()
);
return JobGenerator.serviceJobs(jobDescriptor).getValue();
}
private Job<ServiceJobExt> createServiceJobWithVolumesObject() {
List<Volume> volumes = Collections.singletonList(createTestVolume());
JobDescriptor<ServiceJobExt> jobDescriptor = JobDescriptorGenerator.oneTaskServiceJobDescriptor().but(jd ->
jd.toBuilder().withVolumes(volumes).build());
return JobGenerator.serviceJobs(jobDescriptor).getValue();
}
/**
* createServiceJobWithPlatformSidecarsObject is an extra strenuous test for the CassandraJobStore
* suite, as it exercises all the things needed to ensure that the complex arguments field
* is properly serialized correctly.
*/
private Job<ServiceJobExt> createServiceJobWithPlatformSidecarsObject() {
Struct.Builder args = Struct.newBuilder();
args.putFields("foo", Value.newBuilder().setStringValue("bar").build());
args.putFields("baz", Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build());
args.putFields("num", Value.newBuilder().setNumberValue(42.0).build());
PlatformSidecar ps1 = PlatformSidecar.newBuilder()
.withName("testSidecar")
.withChannel("testChannel")
.withArguments("{\"foo\":true,\"bar\":3.0}")
.build();
List<PlatformSidecar> platformSidecars = Collections.singletonList(ps1);
JobDescriptor<ServiceJobExt> jobDescriptor = JobDescriptorGenerator.oneTaskServiceJobDescriptor().but(jd ->
jd.toBuilder().withPlatformSidecars(platformSidecars).build()
);
return JobGenerator.serviceJobs(jobDescriptor).getValue();
}
private Volume createTestVolume() {
SharedContainerVolumeSource volumeSource = new SharedContainerVolumeSource("sourceContainer", "sourcePath");
return new Volume("testVolume", volumeSource);
}
private Task createTaskObject(Job<BatchJobExt> job) {
return JobGenerator.oneBatchTask().toBuilder().withJobId(job.getId()).build();
}
private Task createFinishedTaskObject(Job<BatchJobExt> job) {
return JobFunctions.changeTaskStatus(createTaskObject(job), TaskStatus.newBuilder().withState(TaskState.Finished).build());
}
private Task createServiceTaskObject(Job<ServiceJobExt> job) {
String taskId = UUID.randomUUID().toString();
return ServiceJobTask.newBuilder()
.withId(taskId)
.withJobId(job.getId())
.withStatus(TaskStatus.newBuilder()
.withState(TaskState.Accepted)
.withTimestamp(System.currentTimeMillis())
.build()
)
.build();
}
private void checkRetrievedJob(Job<?> job, Job<?> retrievedJob) {
if (job.getVersion().equals(Version.undefined())) {
assertThat(retrievedJob.getVersion().getTimestamp()).isEqualTo(job.getStatus().getTimestamp());
assertThat(retrievedJob.toBuilder().withVersion(Version.undefined()).build()).isEqualTo(job);
} else {
assertThat(retrievedJob).isEqualTo(job);
}
}
private void checkRetrievedTask(Task task, Task retrievedTask) {
if (task.getVersion().equals(Version.undefined())) {
assertThat(retrievedTask.getVersion().getTimestamp()).isEqualTo(task.getStatus().getTimestamp());
assertThat(retrievedTask.toBuilder().withVersion(Version.undefined()).build()).isEqualTo(task);
} else {
assertThat(retrievedTask).isEqualTo(task);
}
}
} | 1,372 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/util/StoreTransactionLogger.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.util;
import java.util.HashMap;
import java.util.Map;
import java.util.function.Function;
import java.util.function.Supplier;
import com.datastax.driver.core.ConsistencyLevel;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.Statement;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Help class to write store (CRUD) transaction log.
*/
public class StoreTransactionLogger {
private static final Logger logger = LoggerFactory.getLogger("CassandraStoreTransactionLogger");
private static final int SUMMARY_MAX_SIZE = 128;
private final Map<Class<?>, Function<Object, String>> entityKeySelectors;
private final Map<Class<?>, Function<Object, String>> entityFormatters;
private StoreTransactionLogger(Map<Class<?>, Function<Object, String>> entityKeySelectors,
Map<Class<?>, Function<Object, String>> entityFormatters) {
this.entityKeySelectors = entityKeySelectors;
this.entityFormatters = entityFormatters;
}
public <T> void logBeforeCreate(PreparedStatement statement, String method, T entity) {
log(statement, method, entity, "BeforeCreate");
}
public <T> void logAfterCreate(PreparedStatement statement, String method, T entity) {
log(statement, method, entity, "AfterCreate");
}
public <T> void logBeforeUpdate(PreparedStatement statement, String method, T entity) {
log(statement, method, entity, "BeforeUpdate");
}
public <T> void logAfterUpdate(PreparedStatement statement, String method, T entity) {
log(statement, method, entity, "AfterUpdate");
}
public void logBeforeRead(PreparedStatement statement, String key, String method) {
tryLog(() -> doFormat(key, statement.getQueryKeyspace(), method, "BeforeRead", statement.getConsistencyLevel(), "<N/A>"));
}
public <T> void logAfterRead(PreparedStatement statement, String method, T entity) {
log(statement, method, entity, "AfterRead");
}
public <T> void logBeforeDelete(PreparedStatement statement, String method, T entity) {
log(statement, method, entity, "BeforeDelete");
}
public <T> void logAfterDelete(PreparedStatement statement, String method, T entity) {
log(statement, method, entity, "AfterDelete");
}
private <T> void log(PreparedStatement statement, String operation, T entity, String crudAction) {
tryLog(() -> doFormat(doFormatKey(entity), statement.getQueryKeyspace(), operation, crudAction, statement.getConsistencyLevel(), doFormatEntity(entity)));
}
public <T> void logBeforeCreate(Statement statement, String method, T entity) {
log(statement, method, entity, "BeforeCreate");
}
public <T> void logAfterCreate(Statement statement, String method, T entity) {
log(statement, method, entity, "AfterCreate");
}
public <T> void logBeforeUpdate(Statement statement, String method, T entity) {
log(statement, method, entity, "BeforeUpdate");
}
public <T> void logAfterUpdate(Statement statement, String method, T entity) {
log(statement, method, entity, "AfterUpdate");
}
public void logBeforeRead(Statement statement, String key, String method) {
tryLog(() -> doFormat(key, statement.getKeyspace(), method, "BeforeRead", statement.getConsistencyLevel(), "<N/A>"));
}
public <T> void logAfterRead(Statement statement, String method, T entity) {
log(statement, method, entity, "AfterRead");
}
public <T> void logBeforeDelete(Statement statement, String method, T entity) {
log(statement, method, entity, "BeforeDelete");
}
public <T> void logAfterDelete(Statement statement, String method, T entity) {
log(statement, method, entity, "AfterDelete");
}
private <T> void log(Statement statement, String operation, T entity, String crudAction) {
tryLog(() -> doFormat(doFormatKey(entity), statement.getKeyspace(), operation, crudAction, statement.getConsistencyLevel(), doFormatEntity(entity)));
}
private void tryLog(Supplier<String> messageProducer) {
try {
logger.info(messageProducer.get());
} catch (Exception e) {
logger.error("Logging error: {}", e.getMessage());
if (logger.isDebugEnabled()) {
logger.debug("Stack trace", e);
}
}
}
private String doFormatKey(Object entity) {
if (entity == null) {
return "<null>";
}
Function<Object, String> keySelector = entityKeySelectors.get(entity.getClass());
if (keySelector != null) {
return keySelector.apply(entity);
}
return entity.getClass().getSimpleName() + '#' + Integer.toHexString(System.identityHashCode(entity));
}
private String doFormatEntity(Object entity) {
if (entity == null) {
return "<null>";
}
Function<Object, String> formatter = entityFormatters.get(entity.getClass());
if (formatter != null) {
return formatter.apply(entity);
}
return entity.toString().substring(SUMMARY_MAX_SIZE);
}
private String doFormat(String key,
String table,
String method,
String crudAction,
ConsistencyLevel consistencyLevel,
String summary) {
return String.format(
"key=%s, table=%-15s method=%-20s crudAction=%-8s consistencyLevel=%-5s summary=%s",
key,
table,
method,
crudAction,
consistencyLevel,
summary
);
}
public static Builder newBuilder() {
return new Builder();
}
public static class Builder {
private final Map<Class<?>, Function<Object, String>> entityKeySelectors = new HashMap<>();
private final Map<Class<?>, Function<Object, String>> entityFormatters = new HashMap<>();
public <T> Builder withEntityKeySelectors(Class<T> entityType, Function<T, String> entityKeySelector) {
entityKeySelectors.put(entityType, (Function) entityKeySelector);
return this;
}
public <T> Builder withEntityFormatter(Class<T> type, Function<T, String> formatter) {
entityFormatters.put(type, (Function) formatter);
return this;
}
public StoreTransactionLogger build() {
return new StoreTransactionLogger(entityKeySelectors, entityFormatters);
}
}
}
| 1,373 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/executor/FuturePaginatedQuery.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.executor;
import java.util.List;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.function.Supplier;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
import rx.Observable;
import rx.Subscriber;
import rx.subscriptions.Subscriptions;
public class FuturePaginatedQuery<PAGE, T> {
private final Subscriber<? super T> subscriber;
private final Function<PAGE, ListenableFuture<PAGE>> next;
private final BiFunction<PAGE, Integer, List<T>> itemsFetcher;
private final Function<PAGE, Boolean> endStatusFetcher;
private volatile boolean cancelled;
private volatile ListenableFuture<PAGE> currentFuture;
private volatile int fetchedItemsCount;
public FuturePaginatedQuery(Subscriber<? super T> subscriber,
Supplier<ListenableFuture<PAGE>> initial,
Function<PAGE, ListenableFuture<PAGE>> next,
BiFunction<PAGE, Integer, List<T>> itemsFetcher,
Function<PAGE, Boolean> endStatusFetcher) {
this.subscriber = subscriber;
this.next = next;
this.itemsFetcher = itemsFetcher;
this.endStatusFetcher = endStatusFetcher;
subscriber.add(Subscriptions.create(this::cancel));
handleReply(initial.get());
}
private void cancel() {
cancelled = true;
if (currentFuture != null) {
currentFuture.cancel(true);
}
}
private void handleReply(ListenableFuture<PAGE> pageFuture) {
currentFuture = pageFuture;
Futures.addCallback(pageFuture, new FutureCallback<PAGE>() {
@Override
public void onSuccess(PAGE page) {
if (!cancelled) {
List<T> pageItems = itemsFetcher.apply(page, fetchedItemsCount);
fetchedItemsCount += pageItems.size();
pageItems.forEach(subscriber::onNext);
if (endStatusFetcher.apply(page)) {
subscriber.onCompleted();
} else {
if (!cancelled) {
handleReply(next.apply(page));
}
}
}
}
@Override
public void onFailure(Throwable error) {
if (!cancelled) {
subscriber.onError(error);
}
}
}, MoreExecutors.directExecutor());
}
/**
* Converts futures based paginated query into an observable.
*/
public static <PAGE, T> Observable<T> paginatedQuery(Supplier<ListenableFuture<PAGE>> initial,
Function<PAGE, ListenableFuture<PAGE>> next,
BiFunction<PAGE, Integer, List<T>> itemsFetcher,
Function<PAGE, Boolean> endStatusFetcher) {
return Observable.create(subscriber -> new FuturePaginatedQuery<>(subscriber, initial, next, itemsFetcher, endStatusFetcher));
}
}
| 1,374 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/executor/MetadataOperations.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.executor;
import java.util.ArrayList;
import java.util.List;
import com.datastax.driver.core.Metadata;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.TokenRange;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class MetadataOperations {
private static final Logger logger = LoggerFactory.getLogger(MetadataOperations.class);
private Session session;
private int split;
private final List<TokenRange> ranges;
public MetadataOperations(ExecutionContext context) {
this.session = context.getSession();
this.split = context.getSplit();
this.ranges = buildTokenRanges();
}
public List<TokenRange> getRanges() {
return ranges;
}
private List<TokenRange> buildTokenRanges() {
List<TokenRange> result = new ArrayList<>();
Metadata metadata = session.getCluster().getMetadata();
for (TokenRange range : metadata.getTokenRanges()) {
for (TokenRange split : range.splitEvenly(split)) {
result.addAll(split.unwrap());
}
}
logger.info("Configured with {} token ranges, and {} splits", metadata.getTokenRanges(), result.size());
return result;
}
}
| 1,375 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/executor/ExecutionContext.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.executor;
import java.util.List;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.TokenRange;
public class ExecutionContext {
private final Session session;
private final int pageSize;
private final int split;
private final MetadataOperations meta;
public ExecutionContext(Session session, int pageSize, int split) {
this.session = session;
this.pageSize = pageSize;
this.split = split;
this.meta = new MetadataOperations(this);
}
public Session getSession() {
return session;
}
public int getSplit() {
return split;
}
public List<TokenRange> getTokenRanges() {
return meta.getRanges();
}
public int getPageSize() {
return pageSize;
}
}
| 1,376 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/executor/QueryOperations.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.executor;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Function;
import java.util.stream.Collectors;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.PagingIterable;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.TokenRange;
import com.netflix.titus.common.util.tuple.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
class QueryOperations {
private static final Logger logger = LoggerFactory.getLogger(QueryOperations.class);
private final Session session;
private final List<TokenRange> tokenRanges;
private final int pageSize;
public QueryOperations(ExecutionContext context) {
this.session = context.getSession();
this.tokenRanges = context.getTokenRanges();
this.pageSize = context.getPageSize();
}
Observable<Map<String, Map<String, Object>>> executeQuery(BoundStatement statement, Function<String, Class<?>> entityTypeResolver) {
return executeQueryInternal(statement.bind(), entityTypeResolver)
.toList()
.map(all -> {
Map<String, Map<String, Object>> result = new HashMap<>();
all.forEach(cell -> {
Map<String, Object> rowResult = result.computeIfAbsent(cell.getRowId(), k -> new HashMap<>());
rowResult.put(cell.getColumn(), cell.getValue());
});
return result;
});
}
Map<String, Map<String, Object>> executeQueryBlocking(BoundStatement statement, Function<String, Class<?>> entityTypeResolver) {
return executeQuery(statement, entityTypeResolver).toBlocking().first();
}
Observable<Map<String, Object>> executeRowQuery(BoundStatement statement, String rowId, Function<String, Class<?>> entityTypeResolver) {
return executeQueryInternal(statement, entityTypeResolver)
.toList()
.map(all -> {
Map<String, Object> result = new HashMap<>();
all.forEach(cell -> {
if (!cell.getRowId().equals(rowId)) {
logger.warn("Read inconsistency requested - expected all columns for row {}, but found result for row {}", rowId, cell.getRowId());
} else {
result.put(cell.getColumn(), cell.getValue());
}
});
return result;
});
}
Optional<Map<String, Object>> executeRowQueryBlocking(BoundStatement statement, String rowId, Function<String, Class<?>> entityTypeResolver) {
return Optional.ofNullable(
executeRowQuery(statement, rowId, entityTypeResolver).toBlocking().firstOrDefault(null)
);
}
@Deprecated
public Observable<Cell> executeRawRangeQuery(PreparedStatement statement, Function<String, Class<?>> entityTypeResolver) {
List<Observable<Cell>> allQueries = tokenRanges.stream()
.map(range -> statement.bind().setToken("min", range.getStart()).setToken("max", range.getEnd()))
.map(boundStatement -> executeQueryInternal(boundStatement, entityTypeResolver).onBackpressureBuffer())
.collect(Collectors.toList());
return Observable.merge(allQueries);
}
public Observable<Pair<Object, Object>> executeRawRangeQuery2(String keyName, String valueName, PreparedStatement statement, Optional<Class<?>> entityType) {
List<Observable<Pair<Object, Object>>> allQueries = tokenRanges.stream()
.map(range -> statement.bind().setToken("min", range.getStart()).setToken("max", range.getEnd()))
.map(boundStatement -> executeQueryInternal2(keyName, valueName, boundStatement, entityType).onBackpressureBuffer())
.collect(Collectors.toList());
return Observable.merge(allQueries);
}
public Observable<Map<String, Map<String, Object>>> executeRangeQuery(PreparedStatement statement, Function<String, Class<?>> entityTypeResolver) {
return executeRawRangeQuery(statement, entityTypeResolver).toList()
.map(all -> {
Map<String, Map<String, Object>> result = new HashMap<>();
all.forEach(cell -> {
Map<String, Object> rowResult = result.computeIfAbsent(cell.getRowId(), k -> new HashMap<>());
rowResult.put(cell.getColumn(), cell.getValue());
});
return result;
});
}
private Observable<Cell> executeQueryInternal(BoundStatement boundStatement, Function<String, Class<?>> entityTypeResolver) {
boundStatement.setFetchSize(pageSize);
return FuturePaginatedQuery.paginatedQuery(
() -> session.executeAsync(boundStatement),
ResultSet::fetchMoreResults,
(rs, total) -> {
int remaining = rs.getAvailableWithoutFetching();
List<Cell> pageItems = new ArrayList<>(remaining);
for (Row row : rs) {
String key = row.getString("key");
String column = row.getString("column1");
if (entityTypeResolver == null) {
String value = row.getString("value");
pageItems.add(new Cell(key, column, value));
} else {
Class<?> entityType = entityTypeResolver.apply(column);
if (entityType == null) {
logger.debug("Ignoring column {}", column);
} else {
Object value = row.get("value", entityType);
pageItems.add(new Cell(key, column, value));
}
}
if (--remaining == 0) {
break;
}
}
logger.debug("Got page with {} items (total {})", pageItems.size(), total + pageItems.size());
return pageItems;
},
PagingIterable::isFullyFetched
);
}
private Observable<Pair<Object, Object>> executeQueryInternal2(String keyName, String valueName, BoundStatement boundStatement, Optional<Class<?>> type) {
boundStatement.setFetchSize(pageSize);
return FuturePaginatedQuery.paginatedQuery(
() -> session.executeAsync(boundStatement),
ResultSet::fetchMoreResults,
(rs, total) -> {
int remaining = rs.getAvailableWithoutFetching();
List<Pair<Object, Object>> pageItems = new ArrayList<>(remaining);
for (Row row : rs) {
Object key = row.getObject(keyName);
if (!type.isPresent()) {
String value = row.getString(valueName);
pageItems.add(Pair.of(key, value));
} else {
Class<?> entityType = type.get();
Object value = row.get(valueName, entityType);
pageItems.add(Pair.of(key, value));
}
if (--remaining == 0) {
break;
}
}
logger.debug("Got page with {} items (total {})", pageItems.size(), total + pageItems.size());
return pageItems;
},
PagingIterable::isFullyFetched
);
}
}
| 1,377 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/executor/UpdateOperations.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.executor;
import java.io.IOException;
import javax.annotation.Nullable;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.ResultSetFuture;
import com.datastax.driver.core.Session;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.MoreExecutors;
import rx.Observable;
import rx.subscriptions.Subscriptions;
public class UpdateOperations {
private final Session session;
public UpdateOperations(ExecutionContext context) {
this.session = context.getSession();
}
Observable<Void> executeUpdate(BoundStatement statement) {
return Observable.create(subscriber -> {
ResultSetFuture resultSetFuture;
try {
resultSetFuture = session.executeAsync(statement);
} catch (Exception e) {
subscriber.onError(e);
return;
}
Futures.addCallback(resultSetFuture, new FutureCallback<ResultSet>() {
@Override
public void onSuccess(@Nullable ResultSet rows) {
if (rows != null && !rows.isExhausted()) {
subscriber.onError(new IOException("Update operation should return empty result set"));
} else {
subscriber.onCompleted();
}
}
@Override
public void onFailure(Throwable e) {
subscriber.onError(e);
}
}, MoreExecutors.directExecutor());
subscriber.add(Subscriptions.create(() -> resultSetFuture.cancel(false)));
});
}
void executeBlockingUpdate(BoundStatement statement) {
executeUpdate(statement).toBlocking().firstOrDefault(null);
}
}
| 1,378 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/executor/Cell.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.executor;
/**
*/
public class Cell {
private final String rowId;
private final String column;
private final Object value;
public Cell(String rowId, String column, Object value) {
this.rowId = rowId;
this.column = column;
this.value = value;
}
public String getRowId() {
return rowId;
}
public String getColumn() {
return column;
}
public Object getValue() {
return value;
}
@Override
public String toString() {
return "Cell{" +
"rowId='" + rowId + '\'' +
", column='" + column + '\'' +
", value=" + value +
'}';
}
}
| 1,379 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/executor/AsyncCassandraExecutor.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.executor;
import java.util.Map;
import java.util.Optional;
import java.util.function.Function;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.Session;
import com.netflix.titus.common.util.tuple.Pair;
import rx.Observable;
/**
* RxJava enabled Cassandra statement executor for dynamic column family definitions (key/column1/value).
*/
public class AsyncCassandraExecutor {
private final ExecutionContext context;
private final UpdateOperations updateOperations;
private final QueryOperations queryOperations;
public AsyncCassandraExecutor(Session session, int pageSize, int split) {
this.context = new ExecutionContext(session, pageSize, split);
this.updateOperations = new UpdateOperations(context);
this.queryOperations = new QueryOperations(context);
}
public Session getSession() {
return context.getSession();
}
public Observable<Void> executeUpdate(BoundStatement statement) {
return updateOperations.executeUpdate(statement);
}
/**
* Returns map of { rowId -> { columnId -> entity }}. Single row may contain many columns, and each column value is
* a JSON document, that is mapped to its corresponding type.
*/
public Observable<Map<String, Map<String, Object>>> query(BoundStatement statement, Function<String, Class<?>> entityTypeResolver) {
return queryOperations.executeQuery(statement, entityTypeResolver);
}
/**
* Returns map of { rowId -> { columnId -> entity }}. Single row may contain many columns, and each column value is
* a JSON document, that is mapped to its corresponding type.
*/
public Observable<Map<String, Map<String, Object>>> rangeQuery(PreparedStatement statement) {
return queryOperations.executeRangeQuery(statement, null);
}
/**
* Returns map of { rowId -> { columnId -> entity }}. Single row may contain many columns, and each column value is
* a JSON document, that is mapped to its corresponding type.
*/
public Observable<Map<String, Map<String, Object>>> rangeQuery(PreparedStatement statement, Function<String, Class<?>> entityTypeResolver) {
return queryOperations.executeRangeQuery(statement, entityTypeResolver);
}
/**
* Reads { rowId, columnId, value } entries from Cassandra. The value is not deserialized.
*/
@Deprecated
public Observable<Cell> rawRangeQuery(PreparedStatement statement) {
return queryOperations.executeRawRangeQuery(statement, null);
}
/**
* Reads { primary_key, value } entries from Cassandra. The value is not deserialized.
*/
public Observable<Pair<Object, Object>> rawRangeQuery2(String keyName, String valueName, PreparedStatement statement) {
return queryOperations.executeRawRangeQuery2(keyName, valueName, statement, Optional.empty());
}
/**
* Reads { rowId, columnId, value } entries from Cassandra.
*/
public Observable<Cell> rawRangeQuery(PreparedStatement statement, Function<String, Class<?>> entityTypeResolver) {
return queryOperations.executeRawRangeQuery(statement, entityTypeResolver);
}
/**
* {@see #query}
*/
public Map<String, Map<String, Object>> queryBlocking(BoundStatement statement, Function<String, Class<?>> entityTypeResolver) {
return query(statement, entityTypeResolver).toBlocking().first();
}
public Observable<Map<String, Object>> rowQuery(BoundStatement statement, String rowId, Function<String, Class<?>> entityTypeResolver) {
return queryOperations.executeRowQuery(statement, rowId, entityTypeResolver);
}
public Optional<Map<String, Object>> rowQueryBlocking(BoundStatement statement, String rowId, Function<String, Class<?>> entityTypeResolver) {
return Optional.ofNullable(
rowQuery(statement, rowId, entityTypeResolver).toBlocking().firstOrDefault(null)
);
}
}
| 1,380 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool/CassandraUtils.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.tool;
import java.util.List;
import java.util.stream.Collectors;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.ColumnMetadata;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.TableMetadata;
import com.datastax.driver.core.exceptions.TruncateException;
import com.google.common.base.Preconditions;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.ext.cassandra.executor.AsyncCassandraExecutor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
public class CassandraUtils {
private static final Logger logger = LoggerFactory.getLogger(CassandraUtils.class);
private static final int MAX_CONCURRENCY = 1000;
public static final int PAGE_SIZE = 1000;
public static final int SPLIT = 2;
public static void truncateTable(CommandContext context, String table) {
for (int i = 0; i < 3; i++) {
if (truncateTableInternal(context, table)) {
return;
}
}
}
private static boolean truncateTableInternal(CommandContext context, String table) {
PreparedStatement truncateStatement = context.getTargetSession().prepare(
"TRUNCATE \"" + table + "\""
);
try {
context.getTargetCassandraExecutor().executeUpdate(truncateStatement.bind()).toBlocking().firstOrDefault(null);
} catch (TruncateException e) {
// Check if the table is empty
logger.info("Couldn't complete the truncate operation. Checking if the table is empty: {}", table);
Pair<Object, Object> value = readTwoColumnTable(context.getTargetSession(), table).take(1).toBlocking().firstOrDefault(null);
if (value == null) {
// Truncate failed, but the table is empty. It is ok to move on.
logger.info("Truncate deemed as successful, as the table is empty: {}", table);
return true;
}
if (e.getMessage().contains("Cannot achieve consistency level ALL")) {
logger.warn("Recoverable truncate operations for table {}. Cause: {}", table, e.getMessage());
return false;
}
// Not recoverable error. Re-throw it.
throw e;
}
logger.info("Truncated table {}.{}", truncateStatement.getQueryKeyspace(), table);
return true;
}
public static Pair<String, String> resolveColumnNamesInTwoColumnTable(Session sourceSession, String table) {
TableMetadata tableMetadata = sourceSession.getCluster().getMetadata()
.getKeyspace(sourceSession.getLoggedKeyspace())
.getTable(table);
String primaryKey = tableMetadata.getPartitionKey().get(0).getName();
List<String> valueColumns = tableMetadata.getColumns().stream()
.map(ColumnMetadata::getName)
.filter(c -> !c.equals(primaryKey))
.collect(Collectors.toList());
Preconditions.checkState(valueColumns.size() == 1, "Expected one non primary key column, and is: %s", valueColumns);
String valueColumn = valueColumns.get(0);
return Pair.of(primaryKey, valueColumn);
}
public static Observable<Pair<Object, Object>> readTwoColumnTable(Session sourceSession, String table) {
Pair<String, String> columnNames = resolveColumnNamesInTwoColumnTable(sourceSession, table);
String primaryKey = columnNames.getLeft();
String valueColumn = columnNames.getRight();
PreparedStatement queryAllStatement = sourceSession.prepare(
String.format("SELECT * FROM %s WHERE token(%s) > :min AND token(%s) <= :max", table, primaryKey, primaryKey)
);
AsyncCassandraExecutor executor = new AsyncCassandraExecutor(sourceSession, PAGE_SIZE, SPLIT);
return executor.rawRangeQuery2(primaryKey, valueColumn, queryAllStatement);
}
public static long writeIntoTwoColumnTable(Session targetSession, String table, Observable<Pair<Object, Object>> sourceData) {
Pair<String, String> columnNames = resolveColumnNamesInTwoColumnTable(targetSession, table);
String primaryKey = columnNames.getLeft();
String valueColumn = columnNames.getRight();
PreparedStatement insertStatement = targetSession.prepare(
String.format("INSERT INTO %s (%s, %s) VALUES (?, ?)", table, primaryKey, valueColumn)
);
AsyncCassandraExecutor executor = new AsyncCassandraExecutor(targetSession, PAGE_SIZE, SPLIT);
long recordCount = sourceData
.flatMap(pair -> {
BoundStatement boundStatement = insertStatement.bind(pair.getLeft(), pair.getRight());
return executor
.executeUpdate(boundStatement)
.cast(Long.class)
.concatWith(Observable.just(1L));
}, MAX_CONCURRENCY)
.reduce(0L, (acc, v) -> acc + v)
.toBlocking().firstOrDefault(null);
return recordCount;
}
public static void copyTable(CommandContext context, String table) {
long recordCount = writeIntoTwoColumnTable(
context.getTargetSession(),
table,
readTwoColumnTable(context.getSourceSession(), table)
);
logger.info("Copied {} records from table {}.{} to {}.{}", recordCount,
context.getSourceKeySpace(), table,
context.getTargetKeySpace(), table
);
}
}
| 1,381 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool/CassTool.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.tool;
import java.util.Map;
import com.google.common.collect.ImmutableMap;
import com.netflix.titus.ext.cassandra.tool.command.CreateKeyspaceCommand;
import com.netflix.titus.ext.cassandra.tool.command.DeleteKeyspaceCommand;
import com.netflix.titus.ext.cassandra.tool.command.JobCopyCommand;
import com.netflix.titus.ext.cassandra.tool.command.JobReconcilerCommand;
import com.netflix.titus.ext.cassandra.tool.command.JobSnapshotDownloadCommand;
import com.netflix.titus.ext.cassandra.tool.command.JobSnapshotUploadCommand;
import com.netflix.titus.ext.cassandra.tool.command.JobTruncateCommand;
import com.netflix.titus.ext.cassandra.tool.command.TestStoreLoadCommand;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class CassTool extends AbstractCassTool {
private static final Logger logger = LoggerFactory.getLogger(CassTool.class);
public static final Map<String, Command> COMMANDS = ImmutableMap.<String, Command>builder()
.put("jobReconciler", new JobReconcilerCommand())
.put("jobCopy", new JobCopyCommand())
.put("jobTruncate", new JobTruncateCommand())
.put("jobSnapshotDownload", new JobSnapshotDownloadCommand())
.put("jobSnapshotUpload", new JobSnapshotUploadCommand())
.put("createKeyspace", new CreateKeyspaceCommand())
.put("deleteKeyspace", new DeleteKeyspaceCommand())
.put("testStoreLoad", new TestStoreLoadCommand())
.build();
public CassTool(String[] args) {
super(args, COMMANDS);
}
@Override
protected Options buildOptions(Command command) {
Options options = super.buildOptions(command);
options.addOption(Option.builder("H")
.longOpt("target")
.desc("The cassandra target host")
.hasArg()
.required()
.build());
options.addOption(Option.builder("p")
.longOpt("port")
.desc("The cassandra port")
.hasArg()
.required()
.build());
return options;
}
@Override
protected CommandContext newContext(CommandLine cli) {
return DefaultCommandContext.newCommandContext(cli);
}
public static void main(String[] args) {
boolean execOK = false;
try {
execOK = new CassTool(args).execute();
} catch (IllegalArgumentException e) {
logger.error(e.getMessage(), e);
System.exit(-1);
}
System.exit(execOK ? 0 : -2);
}
}
| 1,382 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool/Command.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.tool;
import org.apache.commons.cli.Options;
/**
*/
public interface Command {
enum CommandType {
NoKeySpace, // A command does not interact with Cassandra
TargetKeySpace, // A command operates on a single Cassandra keyspace (target)
SourceTargetKeySpaces // A command operates on two Cassandra keyspaces (source and target)
}
String getDescription();
CommandType getCommandType();
Options getOptions();
void execute(CommandContext commandContext) throws Exception;
}
| 1,383 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool/DefaultCommandContext.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.tool;
import java.util.List;
import java.util.Optional;
import java.util.function.Function;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.ConsistencyLevel;
import com.datastax.driver.core.QueryOptions;
import com.datastax.driver.core.Session;
import com.google.common.base.Preconditions;
import com.netflix.titus.common.util.StringExt;
import com.netflix.titus.ext.cassandra.executor.AsyncCassandraExecutor;
import org.apache.commons.cli.CommandLine;
public class DefaultCommandContext implements CommandContext {
private static final int PAGE_SIZE = 1000;
private static final int SPLIT = 2;
private final CommandLine commandLine;
private final Session defaultSession;
private final Optional<CassSession> sourceSession;
private final Optional<CassSession> targetSession;
public DefaultCommandContext(CommandLine commandLine,
Session defaultSession,
Function<String, Session> sourceSessionFactory,
Function<String, Session> targetSessionFactory) {
this.commandLine = commandLine;
this.defaultSession = defaultSession;
this.sourceSession = commandLine.hasOption('s')
? Optional.of(new CassSession(sourceSessionFactory, commandLine.getOptionValue('s')))
: Optional.empty();
this.targetSession = commandLine.hasOption('t')
? Optional.of(new CassSession(targetSessionFactory, commandLine.getOptionValue('t')))
: Optional.empty();
}
@Override
public void shutdown() {
}
@Override
public CommandLine getCommandLine() {
return commandLine;
}
@Override
public Session getDefaultSession() {
return defaultSession;
}
@Override
public Session getSourceSession() {
Preconditions.checkState(sourceSession.isPresent(), "Cassandra source keyspace not defined");
return sourceSession.get().getOrCreateSession();
}
@Override
public Session getTargetSession() {
Preconditions.checkState(targetSession.isPresent(), "Cassandra target keyspace not defined");
return targetSession.get().getOrCreateSession();
}
@Override
public String getSourceKeySpace() {
Preconditions.checkState(sourceSession.isPresent(), "Cassandra source keyspace not defined");
return sourceSession.get().getKeySpace();
}
@Override
public String getTargetKeySpace() {
Preconditions.checkState(targetSession.isPresent(), "Cassandra target keyspace not defined");
return targetSession.get().getKeySpace();
}
@Override
public AsyncCassandraExecutor getSourceCassandraExecutor() {
Preconditions.checkState(sourceSession.isPresent(), "Cassandra source keyspace not defined");
return sourceSession.get().getOrCreateExecutor();
}
@Override
public AsyncCassandraExecutor getTargetCassandraExecutor() {
Preconditions.checkState(targetSession.isPresent(), "Cassandra target keyspace not defined");
return targetSession.get().getOrCreateExecutor();
}
public static CommandContext newCommandContext(CommandLine commandLine) {
List<String> ips = StringExt.splitByComma(commandLine.getOptionValue("H"));
int sourcePort = Integer.parseInt(commandLine.getOptionValue("p"));
QueryOptions queryOptions = new QueryOptions()
.setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
Cluster cluster = Cluster.builder()
.addContactPoints((String[]) ips.toArray())
.withPort(sourcePort)
.withQueryOptions(queryOptions)
.build();
return new DefaultCommandContext(
commandLine,
cluster.newSession(),
sourceKeySpace -> cluster.connect('"' + sourceKeySpace + '"'),
targetKeySpace -> cluster.connect('"' + targetKeySpace + '"')
) {
@Override
public void shutdown() {
cluster.close();
}
};
}
class CassSession {
private final Function<String, Session> sessionFactory;
private final String keySpace;
private Session session;
private AsyncCassandraExecutor executor;
CassSession(Function<String, Session> sessionFactory, String keySpace) {
this.sessionFactory = sessionFactory;
this.keySpace = keySpace;
}
String getKeySpace() {
return keySpace;
}
Session getOrCreateSession() {
if (session == null) {
session = sessionFactory.apply(keySpace);
}
return session;
}
AsyncCassandraExecutor getOrCreateExecutor() {
if (executor == null) {
executor = createCassExecutor(getOrCreateSession());
}
return executor;
}
private AsyncCassandraExecutor createCassExecutor(Session session) {
return new AsyncCassandraExecutor(session, PAGE_SIZE, SPLIT);
}
}
}
| 1,384 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool/AbstractCassTool.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.tool;
import java.io.PrintWriter;
import java.util.Arrays;
import java.util.Map;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class AbstractCassTool {
private static final Logger logger = LoggerFactory.getLogger(AbstractCassTool.class);
private final String cmdName;
private final boolean helpRequested;
private final CommandContext context;
private final Map<String, Command> commands;
protected AbstractCassTool(String[] args, Map<String, Command> commands) {
this.commands = commands;
this.helpRequested = args.length == 0 || hasHelpOption(args);
if (helpRequested) {
this.context = null;
this.cmdName = null;
} else {
this.cmdName = args[0];
Command command = findCommand();
String[] cmdOptions = Arrays.copyOfRange(args, 1, args.length);
CommandLine cli = parseOptions(cmdOptions, buildOptions(command));
this.context = newContext(cli);
}
}
protected abstract CommandContext newContext(CommandLine cli);
public boolean execute() {
if (helpRequested) {
printHelp();
return true;
}
Command cmdExec = findCommand();
logger.info("Executing command {}...", cmdName);
long startTime = System.currentTimeMillis();
try {
cmdExec.execute(context);
} catch (Exception e) {
logger.error("Command execution failure", e);
return false;
} finally {
context.shutdown();
}
logger.info("Command {} executed in {}[ms]", cmdName, System.currentTimeMillis() - startTime);
return true;
}
private Command findCommand() {
Command cmd = commands.get(cmdName);
if (cmd == null) {
throw new IllegalArgumentException("Unrecognized command " + cmdName);
}
return cmd;
}
protected Options buildOptions(Command command) {
Options options = new Options();
options.addOption(Option.builder("h").longOpt("help").desc("This help information").build());
switch (command.getCommandType()) {
case NoKeySpace:
break;
case TargetKeySpace:
options.addOption(Option.builder("t").longOpt("target").hasArg().required().desc("Target keyspace name").build());
break;
case SourceTargetKeySpaces:
options.addOption(Option.builder("s").longOpt("source").hasArg().required().desc("Source keyspace name").build());
options.addOption(Option.builder("t").longOpt("target").hasArg().required().desc("Target keyspace name").build());
break;
}
// Merge common options with command specific ones
command.getOptions().getOptions().forEach(options::addOption);
return options;
}
private boolean hasHelpOption(String[] args) {
for (int i = 0; i < args.length; i++) {
if (args[i].equals("-h") || args[i].equals("--help")) {
return true;
}
}
return false;
}
private CommandLine parseOptions(String[] args, Options options) {
CommandLineParser parser = new DefaultParser();
CommandLine cli;
try {
cli = parser.parse(options, args, true);
} catch (ParseException e) {
throw new IllegalArgumentException(e.getMessage());
}
if (cli.getArgList().size() > 0) {
throw new IllegalArgumentException("Too many command line arguments: " + cli.getArgList());
}
return cli;
}
private void printHelp() {
PrintWriter writer = new PrintWriter(System.out);
HelpFormatter formatter = new HelpFormatter();
writer.println("Usage: CassTool <cmd> [<option1>... ]");
writer.println();
writer.println("Commands:");
commands.forEach((name, cmd) -> {
writer.println(name + ": " + cmd.getDescription());
formatter.printOptions(writer, 128, buildOptions(cmd), 4, 4);
writer.println();
});
writer.flush();
}
}
| 1,385 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool/StorePreconditions.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.tool;
import java.util.regex.Pattern;
import com.google.common.base.Preconditions;
import com.netflix.titus.common.util.StringExt;
/**
* A collection of invariant predicates/enforcers.
*/
public final class StorePreconditions {
private static Pattern[] DEV_STACK_PATTERNS = new Pattern[]{
Pattern.compile(".*dev.*"),
Pattern.compile(".*(test|Testing).*"),
Pattern.compile(".*backup.*"),
Pattern.compile(".*BACKUP.*"),
};
private StorePreconditions() {
}
/**
* Check if keyspace belongs to development or test stack. Certain commands are destructive, and should be never
* allowed on production stacks.
*/
public static boolean isDevOrBackupStack(String keySpaceName) {
Preconditions.checkArgument(StringExt.isNotEmpty(keySpaceName), "Expected keyspace name not null");
for (Pattern p : DEV_STACK_PATTERNS) {
if (p.matcher(keySpaceName).matches()) {
return true;
}
}
return false;
}
}
| 1,386 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool/CommandContext.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.tool;
import com.datastax.driver.core.Session;
import com.netflix.titus.ext.cassandra.executor.AsyncCassandraExecutor;
import org.apache.commons.cli.CommandLine;
/**
*
*/
public interface CommandContext {
CommandLine getCommandLine();
/**
* Returns session not associated with any keyspace.
*/
Session getDefaultSession();
/**
* Returns session associated with target (mutated) keyspace.
*/
Session getTargetSession();
String getTargetKeySpace();
AsyncCassandraExecutor getTargetCassandraExecutor();
/**
* Returns session associated with source (read-only) keyspace.
*/
Session getSourceSession();
String getSourceKeySpace();
AsyncCassandraExecutor getSourceCassandraExecutor();
void shutdown();
}
| 1,387 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool/CassandraSchemas.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.tool;
import java.util.List;
import com.netflix.titus.common.util.CollectionsExt;
import static java.util.Arrays.asList;
public class CassandraSchemas {
public static final String ACTIVE_JOB_IDS_TABLE = "active_job_ids";
public static final String ACTIVE_JOBS_TABLE = "active_jobs";
public static final String ACTIVE_TASK_IDS_TABLE = "active_task_ids";
public static final String ACTIVE_TASKS_TABLE = "active_tasks";
public static final String ARCHIVED_JOBS_TABLE = "archived_jobs";
public static final String ARCHIVED_TASK_IDS_TABLE = "archived_task_ids";
public static final String ARCHIVED_TASKS_TABLE = "archived_tasks";
public static final List<String> JOB_ACTIVE_TABLES = asList(
ACTIVE_JOB_IDS_TABLE, ACTIVE_JOBS_TABLE, ACTIVE_TASK_IDS_TABLE, ACTIVE_TASKS_TABLE
);
public static final List<String> JOB_ARCHIVE_TABLES = asList(
ARCHIVED_JOBS_TABLE, ARCHIVED_TASK_IDS_TABLE, ARCHIVED_TASKS_TABLE
);
public static final List<String> JOB_TABLES = CollectionsExt.merge(JOB_ACTIVE_TABLES, JOB_ARCHIVE_TABLES);
}
| 1,388 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool/snapshot/JobSnapshotDownloader.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.tool.snapshot;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import com.datastax.driver.core.Session;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.google.common.base.Preconditions;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.ext.cassandra.tool.CassandraSchemas;
import com.netflix.titus.ext.cassandra.tool.CassandraUtils;
import rx.Observable;
/**
* Downloads jobs active data from Cassandra database into set of files. A snapshot can be loaded back into
* Cassandra using {@link JobSnapshotLoader}.
*/
public class JobSnapshotDownloader {
private static final ObjectMapper MAPPER;
static {
MAPPER = new ObjectMapper();
MAPPER.enable(SerializationFeature.INDENT_OUTPUT);
}
private final Session session;
private final boolean includeArchived;
private final File outputFolder;
public JobSnapshotDownloader(Session session, boolean includeArchived, File output) {
this.session = session;
this.includeArchived = includeArchived;
Preconditions.checkArgument(!output.exists() || output.isDirectory(), "% exists and is not a directory", output);
this.outputFolder = output;
}
public void download() {
if (!outputFolder.exists()) {
Preconditions.checkState(outputFolder.mkdirs(), "Cannot create output folder: %s", outputFolder.getAbsolutePath());
}
writeIdBuckets(CassandraSchemas.ACTIVE_JOB_IDS_TABLE);
writeDataTable(CassandraSchemas.ACTIVE_JOBS_TABLE);
writeIdMappingTable(CassandraSchemas.ACTIVE_TASK_IDS_TABLE);
writeDataTable(CassandraSchemas.ACTIVE_TASKS_TABLE);
if (includeArchived) {
writeDataTable(CassandraSchemas.ARCHIVED_JOBS_TABLE);
writeIdMappingTable(CassandraSchemas.ARCHIVED_TASK_IDS_TABLE);
writeDataTable(CassandraSchemas.ARCHIVED_TASKS_TABLE);
}
}
private void writeDataTable(String table) {
File output = new File(outputFolder, table + ".json");
List<JsonNode> allItems = CassandraUtils.readTwoColumnTable(session, table)
.flatMap(p -> {
try {
return Observable.just(MAPPER.readTree((String) p.getRight()));
} catch (IOException e) {
return Observable.error(e);
}
})
.toList()
.toBlocking().first();
System.out.println(String.format("Writing %s rows from table %s to file: %s...", allItems.size(), table, output));
try {
MAPPER.writeValue(output, allItems);
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
/**
* A table holding buckets, where a key is bucket id of type int, and the value is a string.
*/
private void writeIdBuckets(String table) {
writeMapping(table, allItems -> {
Map<Integer, List<String>> buckets = new HashMap<>();
allItems.forEach(pair ->
buckets.computeIfAbsent((Integer) pair.getLeft(), b -> new ArrayList<>()).add((String) pair.getRight())
);
return buckets;
});
}
/**
* A table holding ids has two columns of string type. We encode these values as map entries in output JSON document.
*/
private void writeIdMappingTable(String table) {
writeMapping(table, allItems -> {
Map<String, List<String>> jobTaskMap = new HashMap<>();
allItems.forEach(pair ->
jobTaskMap.computeIfAbsent((String) pair.getLeft(), b -> new ArrayList<>()).add((String) pair.getRight())
);
return jobTaskMap;
});
}
private void writeMapping(String table, Function<List<Pair<Object, Object>>, Map<?, ?>> mapper) {
File output = new File(outputFolder, table + ".json");
List<Pair<Object, Object>> allItems = CassandraUtils.readTwoColumnTable(session, table)
.toList()
.toBlocking().first();
System.out.println(String.format("Writing %s rows from table %s to file: %s...", allItems.size(), table, output));
try {
MAPPER.writeValue(output, mapper.apply(allItems));
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
}
| 1,389 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool/snapshot/JobSnapshotLoader.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.tool.snapshot;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import com.datastax.driver.core.Session;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.base.Preconditions;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.ext.cassandra.tool.CassandraSchemas;
import com.netflix.titus.ext.cassandra.tool.CassandraUtils;
import rx.Observable;
/**
* Loads jobs active data from files into Cassandra database. A snapshot can be created using {@link JobSnapshotDownloader}.
*/
public class JobSnapshotLoader {
private static final ObjectMapper MAPPER;
static {
MAPPER = new ObjectMapper();
MAPPER.enable(SerializationFeature.INDENT_OUTPUT);
}
private final Session session;
private final File inputFolder;
public JobSnapshotLoader(Session session, File inputFolder) {
checkAllFilesExist(inputFolder);
this.session = session;
this.inputFolder = inputFolder;
}
private void checkAllFilesExist(File inputFolder) {
CassandraSchemas.JOB_ACTIVE_TABLES.forEach(table -> {
File input = new File(inputFolder, table + ".json");
Preconditions.checkArgument(input.isFile(), "File not found: %s", input);
});
}
public void load() {
readIdBuckets(CassandraSchemas.ACTIVE_JOB_IDS_TABLE);
readDataTable(CassandraSchemas.ACTIVE_JOBS_TABLE);
readIdMappingTable(CassandraSchemas.ACTIVE_TASK_IDS_TABLE);
readDataTable(CassandraSchemas.ACTIVE_TASKS_TABLE);
}
private void readDataTable(String table) {
ArrayNode jsonTree = (ArrayNode) readJsonTree(table);
List<Pair<Object, Object>> items = new ArrayList<>();
jsonTree.forEach(item -> {
try {
items.add(Pair.of(item.get("id").textValue(), MAPPER.writeValueAsString(item)));
} catch (JsonProcessingException e) {
throw new IllegalStateException(e);
}
});
long written = CassandraUtils.writeIntoTwoColumnTable(session, table, Observable.from(items));
System.out.println(String.format("Successfully writen %s entries into table %s", written, table));
}
/**
* A table holding buckets, where a key is bucket id of type int, and the value is a string.
*/
private void readIdBuckets(String table) {
ObjectNode jsonTree = (ObjectNode) readJsonTree(table);
List<Pair<Object, Object>> items = new ArrayList<>();
jsonTree.fieldNames().forEachRemaining(key -> {
ArrayNode values = (ArrayNode) jsonTree.get(key);
int bucketId = Integer.parseInt(key);
values.forEach(value -> {
items.add(Pair.of(bucketId, value.asText()));
});
});
long written = CassandraUtils.writeIntoTwoColumnTable(session, table, Observable.from(items));
System.out.println(String.format("Successfully writen %s entries into table %s", written, table));
}
/**
* A table holding ids has two columns of string type. We encode these values as map entries in output JSON document.
*/
private void readIdMappingTable(String table) {
ObjectNode jsonTree = (ObjectNode) readJsonTree(table);
List<Pair<Object, Object>> items = new ArrayList<>();
jsonTree.fieldNames().forEachRemaining(key -> {
ArrayNode values = (ArrayNode) jsonTree.get(key);
values.forEach(value -> items.add(Pair.of(key, value.asText())));
});
long written = CassandraUtils.writeIntoTwoColumnTable(session, table, Observable.from(items));
System.out.println(String.format("Successfully writen %s entries into table %s", written, table));
}
private JsonNode readJsonTree(String table) {
File input = new File(inputFolder, table + ".json");
try {
JsonNode jsonTree = MAPPER.readTree(input);
System.out.println(String.format("Loading %s rows into table %s to file: %s...", jsonTree.size(), table, input));
return jsonTree;
} catch (Exception e) {
throw new IllegalStateException(e);
}
}
}
| 1,390 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool/command/CreateKeyspaceCommand.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.tool.command;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import com.datastax.driver.core.Session;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.netflix.titus.common.util.IOExt;
import com.netflix.titus.common.util.StringExt;
import com.netflix.titus.ext.cassandra.tool.Command;
import com.netflix.titus.ext.cassandra.tool.CommandContext;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class CreateKeyspaceCommand implements Command {
private static final Logger logger = LoggerFactory.getLogger(CreateKeyspaceCommand.class);
private static final String TABLES_CQL_FILE = "tables.cql";
@Override
public String getDescription() {
return "Creates keyspace if it does not already exist and all the tables" +
"under that keyspace";
}
@Override
public CommandType getCommandType() {
return CommandType.NoKeySpace;
}
@Override
public Options getOptions() {
Options options = new Options();
options.addOption(Option.builder("k")
.longOpt("keyspace")
.desc("The name of the keyspace")
.hasArg()
.required()
.build());
options.addOption(Option.builder("r")
.longOpt("replication")
.desc("The replication configuration to use for keyspace creation")
.hasArg()
.required()
.build());
return options;
}
@Override
public void execute(CommandContext commandContext) throws Exception {
CommandLine commandLine = commandContext.getCommandLine();
String keyspaceOption = commandLine.getOptionValue("keyspace");
Set<String> keyspaces = new HashSet<>(StringExt.splitByComma(keyspaceOption));
Session session = commandContext.getDefaultSession();
Preconditions.checkArgument(!keyspaces.isEmpty(), "You must specify at least one keyspace.");
logger.info("Verifying that keyspaces: {} do not exist.", keyspaces);
for (String keyspace : keyspaces) {
boolean keyspaceExists = session.getCluster().getMetadata().getKeyspace(keyspace) != null;
if (keyspaceExists) {
throw new IllegalStateException("Keyspace: " + keyspace + " already exists. You must first delete it.");
}
}
String replication = commandLine.getOptionValue("replication");
List<String> cqlStatements = convertFileToCqlQueries();
for (String keyspace : keyspaces) {
logger.info("Creating keyspace: {}", keyspace);
session.execute("CREATE KEYSPACE " + keyspace + " " + replication);
session.execute("USE " + keyspace);
for (String cqlStatement : cqlStatements) {
session.execute(cqlStatement);
}
}
}
private List<String> convertFileToCqlQueries() throws IOException {
InputStream fileResourceAsStream = IOExt.getFileResourceAsStream(TABLES_CQL_FILE);
Preconditions.checkNotNull(fileResourceAsStream, TABLES_CQL_FILE + " was not found");
List<String> lines = IOExt.readLines(new InputStreamReader(fileResourceAsStream));
List<String> cqlQueries = new ArrayList<>();
StringBuilder cqlQuery = new StringBuilder();
for (String line : lines) {
if (Strings.isNullOrEmpty(line)) {
continue;
}
boolean queryEndLine = line.contains(";");
cqlQuery.append(line);
if (queryEndLine) {
cqlQueries.add(cqlQuery.toString());
cqlQuery = new StringBuilder();
} else {
cqlQuery.append("\n");
}
}
return cqlQueries;
}
}
| 1,391 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool/command/JobSnapshotDownloadCommand.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.tool.command;
import java.io.File;
import com.netflix.titus.ext.cassandra.tool.Command;
import com.netflix.titus.ext.cassandra.tool.CommandContext;
import com.netflix.titus.ext.cassandra.tool.snapshot.JobSnapshotDownloader;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
public class JobSnapshotDownloadCommand implements Command {
@Override
public String getDescription() {
return "Save job tables content to a disk in JSON format";
}
@Override
public CommandType getCommandType() {
return CommandType.TargetKeySpace;
}
@Override
public Options getOptions() {
Options options = new Options();
options.addOption(Option.builder("a").longOpt("archive").desc("Include archived tables").build());
options.addOption(Option.builder("o")
.longOpt("output folder")
.required()
.hasArg()
.desc("Output folder for snapshot files")
.build()
);
return options;
}
@Override
public void execute(CommandContext context) {
File output = new File(context.getCommandLine().getOptionValue('o'));
boolean includeArchived = context.getCommandLine().hasOption('a');
JobSnapshotDownloader downloader = new JobSnapshotDownloader(context.getTargetSession(), includeArchived, output);
downloader.download();
}
}
| 1,392 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool/command/DeleteKeyspaceCommand.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.tool.command;
import com.datastax.driver.core.Session;
import com.netflix.titus.ext.cassandra.tool.Command;
import com.netflix.titus.ext.cassandra.tool.CommandContext;
import org.apache.commons.cli.Options;
public class DeleteKeyspaceCommand implements Command {
@Override
public String getDescription() {
return "Deletes keyspace";
}
@Override
public CommandType getCommandType() {
return CommandType.TargetKeySpace;
}
@Override
public Options getOptions() {
return new Options();
}
@Override
public void execute(CommandContext commandContext) {
Session session = commandContext.getTargetSession();
String keyspace = commandContext.getTargetKeySpace();
if (keyspace.contains("main")) {
throw new IllegalArgumentException("Cannot delete keyspaces that contain the word main");
}
boolean keyspaceExists = session.getCluster().getMetadata().getKeyspace(keyspace) != null;
if (!keyspaceExists) {
throw new IllegalStateException("Keyspace: " + keyspace + " does not exist.");
}
session.execute("DROP KEYSPACE " + keyspace);
keyspaceExists = session.getCluster().getMetadata().getKeyspace(keyspace) != null;
if (keyspaceExists) {
throw new IllegalStateException("Keyspace: " + keyspace + " exists after deletion.");
}
}
}
| 1,393 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool/command/JobReconcilerCommand.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.tool.command;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.json.ObjectMappers;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.ext.cassandra.tool.CassandraUtils;
import com.netflix.titus.ext.cassandra.tool.Command;
import com.netflix.titus.ext.cassandra.tool.CommandContext;
import org.apache.commons.cli.Options;
import static com.netflix.titus.ext.cassandra.tool.CassandraSchemas.ACTIVE_JOBS_TABLE;
import static com.netflix.titus.ext.cassandra.tool.CassandraSchemas.ACTIVE_JOB_IDS_TABLE;
import static com.netflix.titus.ext.cassandra.tool.CassandraSchemas.ACTIVE_TASKS_TABLE;
import static com.netflix.titus.ext.cassandra.tool.CassandraSchemas.ACTIVE_TASK_IDS_TABLE;
public class JobReconcilerCommand implements Command {
@Override
public String getDescription() {
return "Report inconsistencies between V3 job/task tables";
}
@Override
public CommandType getCommandType() {
return CommandType.TargetKeySpace;
}
@Override
public Options getOptions() {
return new Options();
}
@Override
public void execute(CommandContext context) {
Reconciler reconciler = new Reconciler(context);
reconciler.loadAllTables();
reconciler.checkDataConsistency();
reconciler.printReport();
}
private class Reconciler {
private final CommandContext context;
private final Map<String, Integer> tableSizes = new HashMap<>();
private int violationCounter;
private final List<Pair<String, String>> violations = new ArrayList<>();
private Set<String> jobIds;
private List<Job<?>> jobs;
private Map<String, String> taskIdToJobIdMap;
private List<Task> tasks;
private Reconciler(CommandContext context) {
this.context = context;
}
private void loadAllTables() {
jobIds = loadJobIds();
System.out.println("Loaded jobIds: " + jobIds.size());
jobs = loadJobs();
System.out.println("Loaded job: " + jobs.size());
taskIdToJobIdMap = loadTaskIdToJobIdMapping();
System.out.println("Loaded taskIds: " + taskIdToJobIdMap.size());
tasks = loadTasks();
System.out.println("Loaded tasks: " + tasks.size());
}
private void checkDataConsistency() {
checkJobIdToJobMapping();
checkTaskToJobIdMapping();
checkTaskReachabilityFromJob();
}
private Set<String> loadJobIds() {
List<Pair<Object, Object>> bucketToJobIdList = CassandraUtils.readTwoColumnTable(context.getTargetSession(), ACTIVE_JOB_IDS_TABLE).toList().toBlocking().first();
tableSizes.put(ACTIVE_JOB_IDS_TABLE, bucketToJobIdList.size());
// Check that each job is associated with a unique bucket id.
Map<String, Integer> jobIdToBucketMap = new HashMap<>();
bucketToJobIdList.forEach(pair -> {
int bucketId = (Integer) pair.getLeft();
String jobId = (String) pair.getRight();
Integer previousBucketId = jobIdToBucketMap.get(jobId);
if (previousBucketId != null) {
recordViolation("multipleJobIdToBucketMappings", String.format("Job %s is mapped to buckets %s and %s", jobId, previousBucketId, bucketId), 1);
} else {
jobIdToBucketMap.put(jobId, bucketId);
}
});
return new HashSet<>(jobIdToBucketMap.keySet());
}
private List<Job<?>> loadJobs() {
List<Pair<Object, Object>> jobIdToJobList = CassandraUtils.readTwoColumnTable(context.getTargetSession(), ACTIVE_JOBS_TABLE).toList().toBlocking().first();
tableSizes.put(ACTIVE_JOBS_TABLE, jobIdToJobList.size());
return jobIdToJobList.stream()
.map(pair -> {
String jobId = (String) pair.getLeft();
String value = (String) pair.getRight();
try {
return (Job<?>) ObjectMappers.storeMapper().readValue(value, Job.class);
} catch (Exception e) {
recordViolation("badJobRecord", String.format("Job %s cannot be mapped to Job object: %s", jobId, e.getMessage()), 1);
return null;
}
})
.filter(Objects::nonNull)
.collect(Collectors.toList());
}
private Map<String, String> loadTaskIdToJobIdMapping() {
List<Pair<Object, Object>> jobIdToTaskIdList = CassandraUtils.readTwoColumnTable(context.getTargetSession(), ACTIVE_TASK_IDS_TABLE).toList().toBlocking().first();
tableSizes.put(ACTIVE_TASK_IDS_TABLE, jobIdToTaskIdList.size());
// Check that each task id is associated with exactly one job id.
Map<String, String> taskIdToJobIdMapping = new HashMap<>();
jobIdToTaskIdList.forEach(pair -> {
String jobId = (String) pair.getLeft();
String taskId = (String) pair.getRight();
String previousJobId = taskIdToJobIdMapping.get(taskId);
if (previousJobId != null) {
recordViolation("multipleTaskIdToJobIdMappings", String.format("Task %s is mapped to job %s and %s", taskId, previousJobId, jobId), 1);
} else {
taskIdToJobIdMapping.put(taskId, jobId);
}
});
return taskIdToJobIdMapping;
}
private List<Task> loadTasks() {
List<Pair<Object, Object>> taskIdToTaskList = CassandraUtils.readTwoColumnTable(context.getTargetSession(), ACTIVE_TASKS_TABLE).toList().toBlocking().first();
tableSizes.put(ACTIVE_TASKS_TABLE, taskIdToTaskList.size());
return taskIdToTaskList.stream()
.map(pair -> {
String taskId = (String) pair.getLeft();
String value = (String) pair.getRight();
try {
return ObjectMappers.storeMapper().readValue(value, Task.class);
} catch (Exception e) {
recordViolation("badTaskRecord", String.format("Task %s cannot be mapped to Task object: %s", taskId, e.getMessage()), 1);
return null;
}
})
.filter(Objects::nonNull)
.collect(Collectors.toList());
}
private void checkJobIdToJobMapping() {
Set<String> jobRecordIds = jobs.stream().map(Job::getId).collect(Collectors.toSet());
Set<String> unusedJobIds = CollectionsExt.copyAndRemove(jobIds, jobRecordIds);
if (!unusedJobIds.isEmpty()) {
recordViolation("unusedJobIds", String.format("Found jobIds not associated with any job record: %s", unusedJobIds), unusedJobIds.size());
}
Set<String> jobsWithNoBucketMapping = CollectionsExt.copyAndRemove(jobRecordIds, jobIds);
if (!jobsWithNoBucketMapping.isEmpty()) {
recordViolation("jobsWithNoBucketMapping", String.format("Found job records not associated with any jobId bucket: %s", jobsWithNoBucketMapping), jobsWithNoBucketMapping.size());
}
}
private void checkTaskToJobIdMapping() {
Set<String> taskRecordIds = tasks.stream().map(Task::getId).collect(Collectors.toSet());
Set<String> unusedTaskIds = CollectionsExt.copyAndRemove(taskIdToJobIdMap.keySet(), taskRecordIds);
if (!unusedTaskIds.isEmpty()) {
recordViolation("unusedTaskIds", String.format("Found taskIds not associated with any task record: %s", unusedTaskIds), unusedTaskIds.size());
}
Set<String> tasksWithNoJobIdMapping = CollectionsExt.copyAndRemove(taskRecordIds, taskIdToJobIdMap.keySet());
if (!tasksWithNoJobIdMapping.isEmpty()) {
recordViolation("tasksWithNoJobIdMapping", String.format("Found task records not associated with any jobId: %s", tasksWithNoJobIdMapping), tasksWithNoJobIdMapping.size());
}
}
private void checkTaskReachabilityFromJob() {
Map<String, List<Task>> jobIdToTaskMapping = new HashMap<>();
tasks.forEach(task -> jobIdToTaskMapping.computeIfAbsent(task.getJobId(), jid -> new ArrayList<>()).add(task));
Set<String> unknownJobs = CollectionsExt.copyAndRemove(jobIdToTaskMapping.keySet(), jobIds);
if (!unknownJobs.isEmpty()) {
Set<String> badTaskIds = tasks.stream()
.filter(t -> !jobIdToTaskMapping.containsKey(t.getJobId()) || unknownJobs.contains(t.getJobId()))
.map(Task::getId)
.collect(Collectors.toSet());
recordViolation(
"tasksNotAssociatedWithLoadableJob",
String.format("Found task records not associated with any loadable job: invalidJobIds=%s, taskIds=%s", unknownJobs, badTaskIds),
badTaskIds.size()
);
}
}
private void printReport() {
System.out.println("################################################################################");
System.out.println();
System.out.println("Loaded tables:");
tableSizes.forEach((table, size) -> System.out.println(table + ": " + size));
System.out.println();
System.out.println("Violations:");
if (violations.isEmpty()) {
System.out.println("No violations");
} else {
System.out.println("Total: " + violationCounter);
violations.forEach(pair -> System.out.println(pair.getLeft() + ": " + pair.getRight()));
}
System.out.println();
System.out.println("################################################################################");
}
private void recordViolation(String violationId, String text, int count) {
violationCounter += count;
violations.add(Pair.of(violationId, text));
}
}
}
| 1,394 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool/command/JobTruncateCommand.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.tool.command;
import java.util.List;
import com.google.common.base.Preconditions;
import com.netflix.titus.ext.cassandra.tool.CassandraSchemas;
import com.netflix.titus.ext.cassandra.tool.CassandraUtils;
import com.netflix.titus.ext.cassandra.tool.Command;
import com.netflix.titus.ext.cassandra.tool.CommandContext;
import com.netflix.titus.ext.cassandra.tool.StorePreconditions;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
/**
* Command to truncate job tables. This command can be run only for development or backup keyspaces.
*/
public class JobTruncateCommand implements Command {
@Override
public String getDescription() {
return "Truncate jobs tables";
}
@Override
public CommandType getCommandType() {
return CommandType.TargetKeySpace;
}
@Override
public Options getOptions() {
Options options = new Options();
options.addOption(Option.builder("a").longOpt("archive").desc("Truncate archive tables").build());
return options;
}
@Override
public void execute(CommandContext context) {
String targetKeySpace = context.getTargetSession().getLoggedKeyspace();
Preconditions.checkArgument(
StorePreconditions.isDevOrBackupStack(targetKeySpace),
"Can truncate non development/backup keyspaces: " + targetKeySpace
);
boolean includeArchive = context.getCommandLine().hasOption('a');
List<String> titusTables = includeArchive ? CassandraSchemas.JOB_TABLES : CassandraSchemas.JOB_ACTIVE_TABLES;
titusTables.forEach(table -> CassandraUtils.truncateTable(context, table));
}
}
| 1,395 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool/command/TestStoreLoadCommand.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.tool.command;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import com.datastax.driver.core.Session;
import com.netflix.titus.api.jobmanager.model.job.BatchJobTask;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobStatus;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.Version;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.store.JobStore;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.ext.cassandra.store.CassandraJobStore;
import com.netflix.titus.ext.cassandra.store.CassandraStoreConfiguration;
import com.netflix.titus.ext.cassandra.tool.Command;
import com.netflix.titus.ext.cassandra.tool.CommandContext;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import static com.netflix.titus.api.jobmanager.model.job.JobState.Accepted;
public class TestStoreLoadCommand implements Command {
private static final Logger logger = LoggerFactory.getLogger(TestStoreLoadCommand.class);
private static final int MAX_RETRIEVE_TASK_CONCURRENCY = 1_000;
private static final CassandraStoreConfiguration CONFIGURATION = new CassandraStoreConfiguration() {
@Override
public String getV2KeySpace() {
return "dev";
}
@Override
public boolean isFailOnInconsistentCapacityGroupData() {
return true;
}
@Override
public boolean isFailOnInconsistentAgentData() {
return true;
}
@Override
public boolean isFailOnInconsistentLoadBalancerData() {
return false;
}
@Override
public boolean isFailOnInconsistentSchedulerData() {
return false;
}
@Override
public int getConcurrencyLimit() {
return MAX_RETRIEVE_TASK_CONCURRENCY;
}
@Override
public int getLoadBalancerWriteConcurrencyLimit() {
return MAX_RETRIEVE_TASK_CONCURRENCY;
}
@Override
public int getLoadBalancerDeleteConcurrencyLimit() {
return 1;
}
@Override
public boolean isTracingEnabled() {
return false;
}
};
@Override
public String getDescription() {
return "Test the cassandra store implementation";
}
@Override
public CommandType getCommandType() {
return CommandType.TargetKeySpace;
}
@Override
public Options getOptions() {
Options options = new Options();
options.addOption(Option.builder("j")
.longOpt("jobs")
.desc("The number of the jobs to create")
.hasArg()
.required()
.build());
options.addOption(Option.builder("ta")
.longOpt("tasks")
.desc("The number of the tasks to create per job")
.hasArg()
.required()
.build());
options.addOption(Option.builder("c")
.longOpt("concurrency")
.desc("The number of observables to run in parallel")
.hasArg()
.required()
.build());
options.addOption(Option.builder("i")
.longOpt("iterations")
.desc("The number of load iterations to run")
.hasArg()
.required()
.build());
return options;
}
@Override
public void execute(CommandContext commandContext) {
CommandLine commandLine = commandContext.getCommandLine();
String keyspace = commandContext.getTargetKeySpace();
Integer jobs = Integer.valueOf(commandLine.getOptionValue("jobs"));
Integer tasks = Integer.valueOf(commandLine.getOptionValue("tasks"));
Integer concurrency = Integer.valueOf(commandLine.getOptionValue("concurrency"));
Integer iterations = Integer.valueOf(commandLine.getOptionValue("iterations"));
Session session = commandContext.getTargetSession();
boolean keyspaceExists = session.getCluster().getMetadata().getKeyspace(keyspace) != null;
if (!keyspaceExists) {
throw new IllegalStateException("Keyspace: " + keyspace + " does not exist. You must create it first.");
}
session.execute("USE " + keyspace);
JobStore titusStore = new CassandraJobStore(CONFIGURATION, session, TitusRuntimes.internal());
// Create jobs and tasks
long jobStartTime = System.currentTimeMillis();
List<Observable<Void>> createJobAndTasksObservables = new ArrayList<>();
for (int i = 0; i < jobs; i++) {
createJobAndTasksObservables.add(createJobAndTasksObservable(tasks, titusStore));
}
Observable.merge(createJobAndTasksObservables, concurrency).toBlocking().subscribe(
none -> {
},
e -> logger.error("Error creating jobs: ", e),
() -> {
logger.info("Created {} jobs with {} tasks in {}[ms]", jobs, tasks, System.currentTimeMillis() - jobStartTime);
}
);
// try loading jobs and tasks for i iterations
long loadTotalTime = 0L;
for (int i = 0; i < iterations; i++) {
long loadStartTime = System.currentTimeMillis();
List<Pair<Job, List<Task>>> pairs = new ArrayList<>();
titusStore.init().andThen(titusStore.retrieveJobs().flatMap(retrievedJobsAndErrors -> {
List<Job<?>> retrievedJobs = retrievedJobsAndErrors.getLeft();
List<Observable<Pair<Job, List<Task>>>> retrieveTasksObservables = new ArrayList<>();
for (Job job : retrievedJobs) {
Observable<Pair<Job, List<Task>>> retrieveTasksObservable = titusStore.retrieveTasksForJob(job.getId())
.map(taskList -> new Pair<>(job, taskList.getLeft()));
retrieveTasksObservables.add(retrieveTasksObservable);
}
return Observable.merge(retrieveTasksObservables, MAX_RETRIEVE_TASK_CONCURRENCY);
})).map(p -> {
pairs.add(p);
return null;
}).toBlocking().subscribe(
none -> {
},
e -> logger.error("Failed to load jobs from cassandra with error: ", e),
() -> {
}
);
long loadTime = System.currentTimeMillis() - loadStartTime;
logger.info("Loaded {} jobs from cassandra in {}[ms]", pairs.size(), loadTime);
loadTotalTime += loadTime;
}
logger.info("Average load time: {}[ms]", loadTotalTime / iterations);
}
private Observable<Void> createJobAndTasksObservable(int tasks, JobStore store) {
Job<BatchJobExt> job = createJobObject();
List<Task> taskList = new ArrayList<>();
for (int i = 0; i < tasks; i++) {
taskList.add(createTaskObject(job));
}
return store.storeJob(job).andThen(Observable.fromCallable(() -> {
List<Observable<Void>> observables = new ArrayList<>();
for (Task task : taskList) {
observables.add(store.storeTask(task).toObservable());
}
return observables;
})).flatMap(Observable::merge);
}
private Job<BatchJobExt> createJobObject() {
String jobId = UUID.randomUUID().toString();
JobDescriptor<BatchJobExt> jobDescriptor = JobDescriptor.<BatchJobExt>newBuilder()
.withExtensions(new BatchJobExt(1, 1, null, false))
.build();
JobStatus status = new JobStatus(Accepted, "code", "message", System.currentTimeMillis());
return new Job<>(jobId, jobDescriptor, status, new ArrayList<>(), Version.undefined());
}
private Task createTaskObject(Job<BatchJobExt> job) {
String taskId = UUID.randomUUID().toString();
return BatchJobTask.newBuilder()
.withId(taskId)
.withJobId(job.getId())
.build();
}
}
| 1,396 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool/command/JobCopyCommand.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.tool.command;
import java.util.List;
import com.google.common.base.Preconditions;
import com.netflix.titus.ext.cassandra.tool.CassandraSchemas;
import com.netflix.titus.ext.cassandra.tool.CassandraUtils;
import com.netflix.titus.ext.cassandra.tool.Command;
import com.netflix.titus.ext.cassandra.tool.CommandContext;
import com.netflix.titus.ext.cassandra.tool.StorePreconditions;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
public class JobCopyCommand implements Command {
@Override
public String getDescription() {
return "Copy job key space from source to target";
}
@Override
public CommandType getCommandType() {
return CommandType.SourceTargetKeySpaces;
}
@Override
public Options getOptions() {
Options options = new Options();
options.addOption(Option.builder("a").longOpt("archive").desc("Copy archive tables").build());
options.addOption(Option.builder("d").longOpt("clean").desc("Remove the original table content before copy").build());
return options;
}
@Override
public void execute(CommandContext context) {
String targetKeySpace = context.getTargetSession().getLoggedKeyspace();
Preconditions.checkArgument(
StorePreconditions.isDevOrBackupStack(targetKeySpace),
"Can copy to development/backup keyspaces only, not " + targetKeySpace
);
boolean includeArchive = context.getCommandLine().hasOption('a');
boolean clean = context.getCommandLine().hasOption('c');
List<String> titusTables = includeArchive ? CassandraSchemas.JOB_TABLES : CassandraSchemas.JOB_ACTIVE_TABLES;
if (clean) {
titusTables.forEach(table -> CassandraUtils.truncateTable(context, table));
}
titusTables.forEach(table -> CassandraUtils.copyTable(context, table));
}
}
| 1,397 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/tool/command/JobSnapshotUploadCommand.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.tool.command;
import java.io.File;
import com.netflix.titus.ext.cassandra.tool.Command;
import com.netflix.titus.ext.cassandra.tool.CommandContext;
import com.netflix.titus.ext.cassandra.tool.snapshot.JobSnapshotLoader;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
public class JobSnapshotUploadCommand implements Command {
@Override
public String getDescription() {
return "Loads job snapshot into Cassandra";
}
@Override
public CommandType getCommandType() {
return CommandType.TargetKeySpace;
}
@Override
public Options getOptions() {
Options options = new Options();
options.addOption(Option.builder("i")
.longOpt("input folder")
.required()
.hasArg()
.desc("Input folder containing snapshot files")
.build()
);
return options;
}
@Override
public void execute(CommandContext context) {
File output = new File(context.getCommandLine().getOptionValue('i'));
JobSnapshotLoader downloader = new JobSnapshotLoader(context.getTargetSession(), output);
downloader.load();
}
}
| 1,398 |
0 | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra | Create_ds/titus-control-plane/titus-ext/cassandra/src/main/java/com/netflix/titus/ext/cassandra/store/StoreTransactionLoggers.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.cassandra.store;
import com.netflix.titus.api.jobmanager.model.job.BatchJobTask;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.ServiceJobTask;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.ext.cassandra.util.StoreTransactionLogger;
public class StoreTransactionLoggers {
private static final StoreTransactionLogger INSTANCE = StoreTransactionLogger.newBuilder()
// Job
.withEntityKeySelectors(Job.class, Job::getId)
.withEntityFormatter(Job.class, StoreTransactionLoggers::toSummary)
// Task
.withEntityKeySelectors(BatchJobTask.class, BatchJobTask::getId)
.withEntityKeySelectors(ServiceJobTask.class, ServiceJobTask::getId)
.withEntityFormatter(BatchJobTask.class, StoreTransactionLoggers::toSummary)
.withEntityFormatter(ServiceJobTask.class, StoreTransactionLoggers::toSummary)
.build();
public static StoreTransactionLogger transactionLogger() {
return INSTANCE;
}
private static String toSummary(Job job) {
if (job == null) {
return "<null>";
}
return "{state=" + job.getStatus().getState() + '}';
}
private static String toSummary(Task task) {
if (task == null) {
return "<null>";
}
return "{state=" + task.getStatus().getState() + '}';
}
}
| 1,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.