index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/proto/ResourceClusterScaleSpec.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.proto;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.ContainerSkuID;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Builder;
import lombok.Value;
@Value
@Builder
public class ResourceClusterScaleSpec {
ClusterID clusterId;
ContainerSkuID skuId;
int minIdleToKeep;
int minSize;
int maxIdleToKeep;
int maxSize;
long coolDownSecs;
@JsonCreator
public ResourceClusterScaleSpec(
@JsonProperty("clusterId") final ClusterID clusterId,
@JsonProperty("skuId") final ContainerSkuID skuId,
@JsonProperty("minIdleToKeep") final int minIdleToKeep,
@JsonProperty("minSize") final int minSize,
@JsonProperty("maxIdleToKeep") final int maxIdleToKeep,
@JsonProperty("maxSize") final int maxSize,
@JsonProperty("coolDownSecs") final long coolDownSecs) {
this.clusterId = clusterId;
this.skuId = skuId;
this.minIdleToKeep = minIdleToKeep;
this.minSize = minSize;
this.maxIdleToKeep = maxIdleToKeep;
this.maxSize = maxSize;
this.coolDownSecs = coolDownSecs;
}
}
| 8,100 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/proto/GetClusterUsageResponse.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.proto;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import java.util.List;
import lombok.Builder;
import lombok.Singular;
import lombok.Value;
@Value
@Builder
public class GetClusterUsageResponse {
ClusterID clusterID;
@Singular
List<UsageByGroupKey> usages;
@Value
@Builder
public static class UsageByGroupKey {
String usageGroupKey;
int idleCount;
int totalCount;
}
}
| 8,101 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/proto/GetTaskExecutorsRequest.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.proto;
import java.util.Map;
import lombok.Value;
@Value
public class GetTaskExecutorsRequest {
Map<String, String> attributes;
}
| 8,102 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/proto/UpgradeClusterContainersRequest.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.proto;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import lombok.Builder;
import lombok.Value;
@Value
@Builder(toBuilder = true)
public class UpgradeClusterContainersRequest {
ClusterID clusterId;
String region;
String optionalImageId;
String optionalSkuId;
MantisResourceClusterEnvType optionalEnvType;
int optionalBatchMaxSize;
boolean forceUpgradeOnSameImage;
boolean enableSkuSpecUpgrade;
}
| 8,103 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/proto/MantisResourceClusterEnvType.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.proto;
public enum MantisResourceClusterEnvType {
Default,
Dev,
Test,
Prod
}
| 8,104 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/proto/MantisResourceClusterSpec.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.proto;
import io.mantisrx.master.resourcecluster.resourceprovider.ResourceClusterProvider;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.ContainerSkuID;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Map;
import java.util.Set;
import lombok.Builder;
import lombok.EqualsAndHashCode;
import lombok.Singular;
import lombok.Value;
/**
* Contract class to define a Mantis resource cluster. This contract provides the abstraction to provide a generic
* definition from Mantis control perspective, and it's up to the implementations of each
* {@link ResourceClusterProvider} to translate this spec
* to corresponding framework's cluster/node(s) definition.
*/
@Value
@Builder
public class MantisResourceClusterSpec {
String name;
/**
* ID fields maps to cluster name or spinnaker app name.
*/
ClusterID id;
String ownerName;
String ownerEmail;
MantisResourceClusterEnvType envType;
@Singular
Set<SkuTypeSpec> skuSpecs;
@Singular
Map<String, String> clusterMetadataFields;
/** [Note] The @JsonCreator + @JasonProperty is needed when using this class with mixed shaded/non-shaded Jackson.
* The new @Jacksonized annotation is currently not usable with shaded Jackson here.
*/
@JsonCreator
public MantisResourceClusterSpec(
@JsonProperty("name") final String name,
@JsonProperty("id") final ClusterID id,
@JsonProperty("ownerName") final String ownerName,
@JsonProperty("ownerEmail") final String ownerEmail,
@JsonProperty("envType") final MantisResourceClusterEnvType envType,
@JsonProperty("skuSpecs") final Set<SkuTypeSpec> skuSpecs,
@JsonProperty("clusterMetadataFields") final Map<String, String> clusterMetadataFields) {
this.name = name;
this.id = id;
this.ownerName = ownerName;
this.ownerEmail = ownerEmail;
this.envType = envType;
this.skuSpecs = skuSpecs;
this.clusterMetadataFields = clusterMetadataFields;
}
@Builder
@Value
@EqualsAndHashCode(onlyExplicitlyIncluded = true)
public static class SkuTypeSpec {
@EqualsAndHashCode.Include
ContainerSkuID skuId;
SkuCapacity capacity;
String imageId;
int cpuCoreCount;
int memorySizeInMB;
int networkMbps;
int diskSizeInMB;
@Singular
Map<String, String> skuMetadataFields;
@JsonCreator
public SkuTypeSpec(
@JsonProperty("skuId") final ContainerSkuID skuId,
@JsonProperty("capacity") final SkuCapacity capacity,
@JsonProperty("imageId") final String imageId,
@JsonProperty("cpuCoreCount") final int cpuCoreCount,
@JsonProperty("memorySizeInBytes") final int memorySizeInMB,
@JsonProperty("networkMbps") final int networkMbps,
@JsonProperty("diskSizeInBytes") final int diskSizeInMB,
@JsonProperty("skuMetadataFields") final Map<String, String> skuMetadataFields) {
this.skuId = skuId;
this.capacity = capacity;
this.imageId = imageId;
this.cpuCoreCount = cpuCoreCount;
this.memorySizeInMB = memorySizeInMB;
this.networkMbps = networkMbps;
this.diskSizeInMB = diskSizeInMB;
this.skuMetadataFields = skuMetadataFields;
}
}
/**
* This class defined the capacity required for the given skuId mapping to hosting framework nodes
* e.g. containers/virtual machines.
*/
@Builder
@Value
public static class SkuCapacity {
ContainerSkuID skuId;
int minSize;
int maxSize;
int desireSize;
@JsonCreator
public SkuCapacity(
@JsonProperty("skuId") final ContainerSkuID skuId,
@JsonProperty("minSize") final int minSize,
@JsonProperty("maxSize") final int maxSize,
@JsonProperty("desireSize") final int desireSize
) {
this.skuId = skuId;
this.minSize = minSize;
this.maxSize = maxSize;
this.desireSize = desireSize;
}
}
}
| 8,105 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/proto/ResourceClusterScaleRuleProto.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.proto;
import com.netflix.spectator.impl.Preconditions;
import io.mantisrx.master.jobcluster.proto.BaseRequest;
import io.mantisrx.master.jobcluster.proto.BaseResponse;
import io.mantisrx.server.core.domain.ArtifactID;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.ContainerSkuID;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
import lombok.Builder;
import lombok.EqualsAndHashCode;
import lombok.NonNull;
import lombok.Singular;
import lombok.Value;
public class ResourceClusterScaleRuleProto {
@Builder
@Value
public static class GetResourceClusterScaleRulesRequest {
ClusterID clusterId;
}
@Value
public static class GetResourceClusterScaleRulesResponse extends BaseResponse {
ClusterID clusterId;
@Singular
List<ResourceClusterScaleRule> rules;
@Builder
@JsonCreator
public GetResourceClusterScaleRulesResponse(
@JsonProperty("requestId") final long requestId,
@JsonProperty("responseCode") final ResponseCode responseCode,
@JsonProperty("message") final String message,
@JsonProperty("clusterId") final ClusterID clusterId,
@JsonProperty("rules") final List<ResourceClusterScaleRule> rules) {
super(requestId, responseCode, message);
this.rules = rules;
this.clusterId = clusterId;
}
}
@Builder
@Value
public static class CreateResourceClusterScaleRuleRequest {
ClusterID clusterId;
ResourceClusterScaleRule rule;
}
/**
* Create all scale rules to the given cluster id. This shall override/delete any existing rules.
*/
@Builder
@Value
public static class CreateAllResourceClusterScaleRulesRequest {
ClusterID clusterId;
@Singular
@NonNull
List<ResourceClusterScaleRule> rules;
}
@Value
@Builder
public static class ResourceClusterScaleRule {
ClusterID clusterId;
ContainerSkuID skuId;
int minIdleToKeep;
int minSize;
int maxIdleToKeep;
int maxSize;
long coolDownSecs;
}
@EqualsAndHashCode(callSuper = true)
@Value
public static class JobArtifactsToCacheRequest extends BaseRequest {
ClusterID clusterID;
@Singular
@NonNull
List<ArtifactID> artifacts;
public JobArtifactsToCacheRequest(@JsonProperty("clusterID") ClusterID clusterID, @JsonProperty("artifacts") List<ArtifactID> artifacts) {
super();
Preconditions.checkNotNull(clusterID, "clusterID cannot be null");
Preconditions.checkNotNull(artifacts, "artifacts cannot be null");
this.clusterID = clusterID;
this.artifacts = artifacts;
}
}
}
| 8,106 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/proto/GetClusterIdleInstancesResponse.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.proto;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.ContainerSkuID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorID;
import java.util.List;
import lombok.Builder;
import lombok.Singular;
import lombok.Value;
@Value
@Builder
public class GetClusterIdleInstancesResponse {
ClusterID clusterId;
ContainerSkuID skuId;
@Singular
List<TaskExecutorID> instanceIds;
int desireSize;
}
| 8,107 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/proto/UpgradeClusterContainersResponse.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.proto;
import io.mantisrx.master.jobcluster.proto.BaseResponse;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Builder;
import lombok.Value;
@Value
public class UpgradeClusterContainersResponse extends BaseResponse {
ClusterID clusterId;
String region;
String optionalSkuId;
MantisResourceClusterEnvType optionalEnvType;
@Builder
@JsonCreator
public UpgradeClusterContainersResponse(
@JsonProperty("requestId") final long requestId,
@JsonProperty("responseCode") final ResponseCode responseCode,
@JsonProperty("message") final String message,
@JsonProperty("clusterId") final ClusterID clusterId,
@JsonProperty("region") final String region,
@JsonProperty("optionalSkuId") String optionalSkuId,
@JsonProperty("optionalEnvType") MantisResourceClusterEnvType optionalEnvType) {
super(requestId, responseCode, message);
this.clusterId = clusterId;
this.optionalSkuId = optionalSkuId;
this.region = region;
this.optionalEnvType = optionalEnvType;
}
}
| 8,108 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/proto/GetClusterIdleInstancesRequest.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.proto;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.ContainerSkuID;
import lombok.Builder;
import lombok.Value;
@Value
@Builder
public class GetClusterIdleInstancesRequest {
ClusterID clusterID;
ContainerSkuID skuId;
int maxInstanceCount;
int desireSize;
}
| 8,109 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/proto/GetResourceClusterSpecRequest.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.proto;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import lombok.Builder;
import lombok.Value;
@Builder
@Value
public class GetResourceClusterSpecRequest {
ClusterID id;
}
| 8,110 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/proto/ScaleResourceResponse.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.proto;
import io.mantisrx.master.jobcluster.proto.BaseResponse;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.ContainerSkuID;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Builder;
import lombok.Value;
@Value
public class ScaleResourceResponse extends BaseResponse {
ClusterID clusterId;
ContainerSkuID skuId;
String region;
MantisResourceClusterEnvType envType;
int desireSize;
@Builder
@JsonCreator
public ScaleResourceResponse(
@JsonProperty("requestId") final long requestId,
@JsonProperty("responseCode") final ResponseCode responseCode,
@JsonProperty("message") final String message,
@JsonProperty("clusterId") final ClusterID clusterId,
@JsonProperty("skuId") ContainerSkuID skuId,
@JsonProperty("region") String region,
@JsonProperty("envType") MantisResourceClusterEnvType envType,
@JsonProperty("desireSize") int desireSize) {
super(requestId, responseCode, message);
this.clusterId = clusterId;
this.skuId = skuId;
this.region = region;
this.envType = envType;
this.desireSize = desireSize;
}
}
| 8,111 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/writable/ResourceClusterSpecWritable.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.writable;
import io.mantisrx.master.resourcecluster.proto.MantisResourceClusterSpec;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Builder;
import lombok.Value;
/**
* Persistence contract of resource cluster spec.
*/
@Value
@Builder
public class ResourceClusterSpecWritable {
String version;
ClusterID id;
MantisResourceClusterSpec clusterSpec;
/** [Note] The @JsonCreator + @JasonProperty is needed when using this class with mixed shaded/non-shaded Jackson.
* The new @Jacksonized annotation is currently not usable with shaded Jackson here.
*/
@JsonCreator
public ResourceClusterSpecWritable(
@JsonProperty("version") final String version,
@JsonProperty("id") final ClusterID id,
@JsonProperty("clusterSpec") final MantisResourceClusterSpec clusterSpec) {
this.version = version;
this.id = id;
this.clusterSpec = clusterSpec;
}
}
| 8,112 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/writable/ResourceClusterScaleRulesWritable.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.writable;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleSpec;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Map;
import lombok.Builder;
import lombok.Singular;
import lombok.Value;
@Value
@Builder(toBuilder = true)
public class ResourceClusterScaleRulesWritable {
ClusterID clusterId;
String version;
/**
* [Note] Using composite type as key will cause a bug during ser/deser where key object's toString is invoked
* instead of using the ser result and this will cause unexpected behavior (key=(object string from toString())
* during deser. Thus using plain string (e.g. resourceID) instead.
*/
@Singular
Map<String, ResourceClusterScaleSpec> scaleRules;
@JsonCreator
public ResourceClusterScaleRulesWritable(
@JsonProperty("clusterId") final ClusterID clusterId,
@JsonProperty("version") final String version,
@JsonProperty("rules") final Map<String, ResourceClusterScaleSpec> scaleRules) {
this.clusterId = clusterId;
this.version = version;
this.scaleRules = scaleRules;
}
}
| 8,113 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/writable/RegisteredResourceClustersWritable.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.writable;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Map;
import lombok.Builder;
import lombok.Singular;
import lombok.Value;
/**
* Persistency contract of registered resource clusters.
*/
@Value
@Builder(toBuilder = true)
public class RegisteredResourceClustersWritable {
@Singular
Map<String, ClusterRegistration> clusters;
@JsonCreator
public RegisteredResourceClustersWritable(
@JsonProperty("clusters") final Map<String, ClusterRegistration> clusters) {
this.clusters = clusters;
}
@Value
@Builder
public static class ClusterRegistration {
ClusterID clusterId;
String version;
/** [Note] The @JsonCreator + @JasonProperty is needed when using this class with mixed shaded/non-shaded Jackson.
* The new @Jacksonized annotation is currently not usable with shaded Jackson here.
*/
@JsonCreator
public ClusterRegistration(
@JsonProperty("clusterId") final ClusterID clusterId,
@JsonProperty("version") final String version) {
this.clusterId = clusterId;
this.version = version;
}
}
}
| 8,114 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/resourceprovider/NoopResourceClusterProvider.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.resourceprovider;
import io.mantisrx.master.resourcecluster.proto.ProvisionResourceClusterRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterProvisionSubmissionResponse;
import io.mantisrx.master.resourcecluster.proto.ScaleResourceRequest;
import io.mantisrx.master.resourcecluster.proto.ScaleResourceResponse;
import io.mantisrx.master.resourcecluster.proto.UpgradeClusterContainersResponse;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
/**
* Default Resource cluster provider implementation. This needs to be replaced by OSS implementation provider e.g. k8s.
*/
public class NoopResourceClusterProvider implements ResourceClusterProvider {
@Override
public CompletionStage<ResourceClusterProvisionSubmissionResponse> provisionClusterIfNotPresent(ProvisionResourceClusterRequest clusterSpec) {
return CompletableFuture.completedFuture(null);
}
@Override
public CompletionStage<ScaleResourceResponse> scaleResource(ScaleResourceRequest scaleRequest) {
return CompletableFuture.completedFuture(null);
}
@Override
public CompletionStage<UpgradeClusterContainersResponse> upgradeContainerResource(
ResourceClusterProviderUpgradeRequest request) {
return CompletableFuture.completedFuture(null);
}
@Override
public ResourceClusterResponseHandler getResponseHandler() {
return new NoopResourceClusterResponseHandler();
}
}
| 8,115 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/resourceprovider/ResourceClusterProviderAdapter.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.resourceprovider;
import akka.actor.ActorSystem;
import io.mantisrx.master.resourcecluster.proto.ProvisionResourceClusterRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterProvisionSubmissionResponse;
import io.mantisrx.master.resourcecluster.proto.ScaleResourceRequest;
import io.mantisrx.master.resourcecluster.proto.ScaleResourceResponse;
import io.mantisrx.master.resourcecluster.proto.UpgradeClusterContainersResponse;
import java.util.concurrent.CompletionStage;
import lombok.extern.slf4j.Slf4j;
/**
* Adapter to bind the implementation of {@link ResourceClusterProvider} using class name specified in
* {@link io.mantisrx.server.master.config.MasterConfiguration}.
* <p>
* This adapter requires the implementation of {@link ResourceClusterProvider} to have a ctor with
* {@link akka.actor.ActorSystem} param.
* </p>
*/
@Slf4j
public class ResourceClusterProviderAdapter implements ResourceClusterProvider {
private final ResourceClusterProvider providerImpl;
public ResourceClusterProviderAdapter(String providerClassStr, ActorSystem system) {
boolean fallBackToEmptyCtor = false;
ResourceClusterProvider provider = null;
try {
provider = (ResourceClusterProvider) Class.forName(providerClassStr)
.getConstructor(ActorSystem.class).newInstance(system);
} catch (NoSuchMethodException ex) {
log.warn("Could not find ctor with actorSystem param: {}", providerClassStr);
fallBackToEmptyCtor = true;
} catch (Exception e) {
throw new RuntimeException("Failed to create ResourceClusterProvider from " + providerClassStr, e);
}
if (fallBackToEmptyCtor) {
try {
log.info("Building ResourceClusterProvider with empty ctor: {}", providerClassStr);
provider = (ResourceClusterProvider) Class.forName(providerClassStr)
.getConstructor().newInstance();
} catch (Exception e) {
throw new RuntimeException("Failed to create ResourceClusterProvider from " + providerClassStr, e);
}
}
this.providerImpl = provider;
}
@Override
public CompletionStage<ResourceClusterProvisionSubmissionResponse> provisionClusterIfNotPresent(ProvisionResourceClusterRequest clusterSpec) {
return providerImpl.provisionClusterIfNotPresent(clusterSpec);
}
@Override
public CompletionStage<ScaleResourceResponse> scaleResource(ScaleResourceRequest scaleRequest) {
return providerImpl.scaleResource(scaleRequest);
}
@Override
public CompletionStage<UpgradeClusterContainersResponse> upgradeContainerResource(
ResourceClusterProviderUpgradeRequest request) {
return providerImpl.upgradeContainerResource(request);
}
@Override
public ResourceClusterResponseHandler getResponseHandler() {
return providerImpl.getResponseHandler();
}
}
| 8,116 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/resourceprovider/ResourceClusterResponseHandler.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.resourceprovider;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterProvisionSubmissionResponse;
/**
* Callback handler for {@link ResourceClusterProvider} responses.
*/
public interface ResourceClusterResponseHandler {
void handleProvisionResponse(ResourceClusterProvisionSubmissionResponse resp);
}
| 8,117 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/resourceprovider/NoopResourceClusterResponseHandler.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.resourceprovider;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterProvisionSubmissionResponse;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class NoopResourceClusterResponseHandler implements ResourceClusterResponseHandler {
@Override
public void handleProvisionResponse(ResourceClusterProvisionSubmissionResponse resp) {
log.info(resp.toString());
}
}
| 8,118 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/resourceprovider/ResourceClusterProviderUpgradeRequest.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.resourceprovider;
import io.mantisrx.master.resourcecluster.proto.MantisResourceClusterEnvType;
import io.mantisrx.master.resourcecluster.proto.MantisResourceClusterSpec;
import io.mantisrx.master.resourcecluster.proto.UpgradeClusterContainersRequest;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import javax.annotation.Nullable;
import lombok.Builder;
import lombok.Value;
@Value
@Builder(toBuilder = true)
public class ResourceClusterProviderUpgradeRequest {
ClusterID clusterId;
String region;
@Nullable
String optionalImageId;
@Nullable
String optionalSkuId;
MantisResourceClusterEnvType optionalEnvType;
int optionalBatchMaxSize;
boolean forceUpgradeOnSameImage;
boolean enableSkuSpecUpgrade;
@Nullable
MantisResourceClusterSpec resourceClusterSpec;
public static ResourceClusterProviderUpgradeRequest from(
UpgradeClusterContainersRequest req) {
return from(req, null);
}
public static ResourceClusterProviderUpgradeRequest from(
UpgradeClusterContainersRequest req,
MantisResourceClusterSpec resourceClusterSpec) {
return ResourceClusterProviderUpgradeRequest.builder()
.clusterId(req.getClusterId())
.region(req.getRegion())
.optionalImageId(req.getOptionalImageId())
.optionalSkuId(req.getOptionalSkuId())
.optionalEnvType(req.getOptionalEnvType())
.optionalBatchMaxSize(req.getOptionalBatchMaxSize())
.forceUpgradeOnSameImage(req.isForceUpgradeOnSameImage())
.enableSkuSpecUpgrade(req.isEnableSkuSpecUpgrade())
.resourceClusterSpec(resourceClusterSpec)
.build();
}
}
| 8,119 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/resourceprovider/ResourceClusterProvider.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.resourceprovider;
import io.mantisrx.master.resourcecluster.proto.ProvisionResourceClusterRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterProvisionSubmissionResponse;
import io.mantisrx.master.resourcecluster.proto.ScaleResourceRequest;
import io.mantisrx.master.resourcecluster.proto.ScaleResourceResponse;
import io.mantisrx.master.resourcecluster.proto.UpgradeClusterContainersResponse;
import java.util.concurrent.CompletionStage;
/**
* This interface provides the API to connect resource cluster management actor to actual
* implementations of different resource cluster clients e.g. k8s.
*
* <p>
* To implement and integrate this interface, the {@link ResourceClusterProviderAdapter} is used to wire actual
* implementation to the main entrypoint and currently this adapter requires an {@link akka.actor.ActorSystem} to be
* passed into the constructor.
* </p>
*/
public interface ResourceClusterProvider {
/**
* Provision a new resource cluster using the given spec. This operation should be idempotent.
* The returned CompletionStage instance is to indicate whether the provision has been
* accepted/started and doesn't need to represent the whole provisioning completion(s) of
* every nodes in the cluster.
*/
CompletionStage<ResourceClusterProvisionSubmissionResponse> provisionClusterIfNotPresent(
ProvisionResourceClusterRequest clusterSpec);
/**
* Request to scale an existing resource cluster using the given spec. This operation should be idempotent.
* The returned CompletionStage instance is to indicate whether the operation has been
* accepted/started and doesn't need to represent the full completion(s) of
* every nodes.
*/
CompletionStage<ScaleResourceResponse> scaleResource(ScaleResourceRequest scaleRequest);
/**
* To upgrade cluster containers: each container running task executor is using docker image tag based image version.
* In regular case the upgrade is to refresh the container to re-deploy with latest digest associated with the image
* tag (e.g. latest).
* If multiple image digest versions need to be ran/hosted at the same time, it is recommended to create a separate
* sku id in addition to the existing sku(s).
*/
CompletionStage<UpgradeClusterContainersResponse> upgradeContainerResource(ResourceClusterProviderUpgradeRequest request);
ResourceClusterResponseHandler getResponseHandler();
}
| 8,120 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/akka/ActorSystemMetrics.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.akka;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
/**
* A holder class for metrics associated with an Actor.
*/
public final class ActorSystemMetrics {
private final Counter actorKilledCount;
private final Counter actorInitExceptionCount;
private final Counter actorDeathPactExcCount;
private final Counter actorResumeCount;
private static final ActorSystemMetrics INSTANCE = new ActorSystemMetrics();
private ActorSystemMetrics() {
Metrics m = new Metrics.Builder()
.id("ActorSystemMetrics")
.addCounter("actorKilledCount")
.addCounter("actorInitExceptionCount")
.addCounter("actorDeathPactExcCount")
.addCounter("actorResumeCount")
.build();
Metrics metrics = MetricsRegistry.getInstance().registerAndGet(m);
this.actorKilledCount = metrics.getCounter("actorKilledCount");
this.actorInitExceptionCount = metrics.getCounter("actorInitExceptionCount");
this.actorDeathPactExcCount = metrics.getCounter("actorDeathPactExcCount");
this.actorResumeCount = metrics.getCounter("actorResumeCount");
}
public static ActorSystemMetrics getInstance() {
return INSTANCE;
}
/**
* Increments Actor kill count.
*/
public void incrementActorKilledCount() {
actorKilledCount.increment();
}
/**
* Tracks how many times an actor failed to initialize.
*/
public void incrementActorInitExceptionCount() {
actorInitExceptionCount.increment();
}
/**
* Tracks how many times an actor was killed due to a death pack.
*/
public void incrementActorDeathPactExcCount() {
actorDeathPactExcCount.increment();
}
/**
* Tracks how many times an actor has been resumed.
*/
public void incrementActorResumeCount() {
actorResumeCount.increment();
}
}
| 8,121 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/akka/MantisActorSupervisorStrategy.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.akka;
import akka.actor.ActorInitializationException;
import akka.actor.ActorKilledException;
import akka.actor.DeathPactException;
import akka.actor.OneForOneStrategy;
import akka.actor.SupervisorStrategy;
import akka.actor.SupervisorStrategyConfigurator;
import akka.japi.pf.DeciderBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The standard Mantis Actor supervisor strategy.
*/
public class MantisActorSupervisorStrategy implements SupervisorStrategyConfigurator {
private static final Logger LOGGER = LoggerFactory.getLogger(MantisActorSupervisorStrategy.class);
private static final MantisActorSupervisorStrategy INSTANCE = new MantisActorSupervisorStrategy();
public static MantisActorSupervisorStrategy getInstance() {
return INSTANCE;
}
@Override
public SupervisorStrategy create() {
// custom supervisor strategy to resume the child actors on Exception instead of the default restart behavior
return new OneForOneStrategy(DeciderBuilder
.match(ActorInitializationException.class, e -> {
ActorSystemMetrics.getInstance().incrementActorInitExceptionCount();
LOGGER.error("Stopping the actor because of exception", e);
return SupervisorStrategy.stop();
})
.match(ActorKilledException.class, e -> {
ActorSystemMetrics.getInstance().incrementActorKilledCount();
LOGGER.error("Stopping the actor because of exception", e);
return SupervisorStrategy.stop();
})
.match(DeathPactException.class, e -> {
ActorSystemMetrics.getInstance().incrementActorDeathPactExcCount();
LOGGER.error("Stopping the actor because of exception", e);
return SupervisorStrategy.stop();
})
.match(Exception.class, e -> {
LOGGER.error("resuming actor on exception {}", e.getMessage(), e);
ActorSystemMetrics.getInstance().incrementActorResumeCount();
return SupervisorStrategy.resume();
})
.build());
}
}
| 8,122 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/akka/MeteredMessageQueue.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.akka;
import akka.actor.ActorRef;
import akka.dispatch.Envelope;
import akka.dispatch.MessageQueue;
import akka.dispatch.UnboundedMessageQueueSemantics;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Timer;
import com.netflix.spectator.api.patterns.PolledMeter;
import io.mantisrx.common.metrics.spectator.SpectatorRegistryFactory;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.TimeUnit;
/**
* A custom implementation of a message queue used by a few key Actors. This implementation
* keeps track of enqueue and wait rates to the Actor queue.
*/
public class MeteredMessageQueue implements MessageQueue, UnboundedMessageQueueSemantics {
private final String path;
private final Counter insertCounter;
private final Timer waitTimer;
private final ConcurrentLinkedQueue<Entry> queue = new ConcurrentLinkedQueue<>();
/**
* Creates an instance.
* @param path The actor path.
*/
public MeteredMessageQueue(final String path) {
Registry registry = SpectatorRegistryFactory.getRegistry();
this.path = path;
this.insertCounter = registry.counter("akka.queue.insert", "path", path);
this.waitTimer = registry.timer("akka.queue.wait", "path", path);
PolledMeter
.using(registry)
.withName("akka.queue.size")
.withTag("path", path)
.monitorSize(queue);
}
/**
* A wrapper class that adds the time of creation of a message.
*/
static final class Entry {
/**
* The {@link Envelope} used by Akka around each enqueued message.
*/
private final Envelope v;
/**
* Nano time of when the message was enqueued.
*/
private final long t;
/**
* Creates an instance of this class.
* @param v
*/
Entry(final Envelope v) {
this.v = v;
this.t = System.nanoTime();
}
}
/**
* Invoked every time a message is enqueued for an Actor.
* @param receiver
* @param handle
*/
public void enqueue(ActorRef receiver, Envelope handle) {
insertCounter.increment();
queue.offer(new Entry(handle));
}
/**
* Invoked every time a message is dequeued from an Actor's queue.
* @return
*/
public Envelope dequeue() {
Entry tmp = queue.poll();
if (tmp == null) {
return null;
} else {
long dur = System.nanoTime() - tmp.t;
waitTimer.record(dur, TimeUnit.NANOSECONDS);
return tmp.v;
}
}
/**
* Returns current queue size.
* @return queue size
*/
public int numberOfMessages() {
return queue.size();
}
/**
* Returns true if there is atleast a single message in the queue.
* @return boolean whether queue is not empty.
*/
public boolean hasMessages() {
return !queue.isEmpty();
}
/**
* Clears the Actor queue.
* @param owner
* @param deadLetters
*/
public void cleanUp(ActorRef owner, MessageQueue deadLetters) {
queue.clear();
}
}
| 8,123 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/akka/UnboundedMeteredMailbox.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.akka;
import akka.actor.ActorPath;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.dispatch.MailboxType;
import akka.dispatch.MessageQueue;
import akka.dispatch.ProducesMessageQueue;
import com.typesafe.config.Config;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.Option;
/**
* A simple unbounded metered mail box.
*/
public class UnboundedMeteredMailbox implements MailboxType, ProducesMessageQueue<MeteredMessageQueue> {
private final ActorSystem.Settings settings;
private final Config config;
private static final Logger LOGGER = LoggerFactory.getLogger(UnboundedMeteredMailbox.class);
/**
* Creates an instance of this class.
* @param settings
* @param config
*/
public UnboundedMeteredMailbox(final ActorSystem.Settings settings, final Config config) {
this.settings = settings;
this.config = config;
}
/**
* Creates an instance of a {@link MessageQueue}.
* @param owner
* @param system
* @return
*/
public MessageQueue create(final Option<ActorRef> owner, final Option<ActorSystem> system) {
String path = owner.fold(() -> "unknown", r -> tagValue(r.path()));
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("created message queue for {}", path);
}
return new MeteredMessageQueue(path);
}
/** Summarizes a path for use in a metric tag. */
private String tagValue(ActorPath path) {
return path.name();
}
}
| 8,124 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/MasterApiAkkaService.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka;
import akka.NotUsed;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.http.javadsl.Http;
import akka.http.javadsl.ServerBinding;
import akka.http.javadsl.model.HttpRequest;
import akka.http.javadsl.model.HttpResponse;
import akka.http.javadsl.settings.ServerSettings;
import akka.http.javadsl.settings.WebSocketSettings;
import akka.stream.Materializer;
import akka.stream.javadsl.Flow;
import akka.stream.javadsl.Sink;
import com.netflix.spectator.impl.Preconditions;
import io.mantisrx.master.api.akka.route.MantisMasterRoute;
import io.mantisrx.master.api.akka.route.MasterApiMetrics;
import io.mantisrx.master.api.akka.route.handlers.JobArtifactRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.JobArtifactRouteHandlerImpl;
import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandlerAkkaImpl;
import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandlerAkkaImpl;
import io.mantisrx.master.api.akka.route.handlers.JobRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.JobRouteHandlerAkkaImpl;
import io.mantisrx.master.api.akka.route.handlers.JobStatusRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.JobStatusRouteHandlerAkkaImpl;
import io.mantisrx.master.api.akka.route.handlers.ResourceClusterRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.ResourceClusterRouteHandlerAkkaImpl;
import io.mantisrx.master.api.akka.route.v0.AgentClusterRoute;
import io.mantisrx.master.api.akka.route.v0.JobClusterRoute;
import io.mantisrx.master.api.akka.route.v0.JobDiscoveryRoute;
import io.mantisrx.master.api.akka.route.v0.JobRoute;
import io.mantisrx.master.api.akka.route.v0.JobStatusRoute;
import io.mantisrx.master.api.akka.route.v0.MasterDescriptionRoute;
import io.mantisrx.master.api.akka.route.v1.AdminMasterRoute;
import io.mantisrx.master.api.akka.route.v1.AgentClustersRoute;
import io.mantisrx.master.api.akka.route.v1.JobArtifactsRoute;
import io.mantisrx.master.api.akka.route.v1.JobClustersRoute;
import io.mantisrx.master.api.akka.route.v1.JobDiscoveryStreamRoute;
import io.mantisrx.master.api.akka.route.v1.JobStatusStreamRoute;
import io.mantisrx.master.api.akka.route.v1.JobsRoute;
import io.mantisrx.master.api.akka.route.v1.LastSubmittedJobIdStreamRoute;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.vm.AgentClusterOperations;
import io.mantisrx.server.core.BaseService;
import io.mantisrx.server.core.master.MasterDescription;
import io.mantisrx.server.core.master.MasterMonitor;
import io.mantisrx.server.master.ILeadershipManager;
import io.mantisrx.server.master.LeaderRedirectionFilter;
import io.mantisrx.server.master.persistence.IMantisPersistenceProvider;
import io.mantisrx.server.master.resourcecluster.ResourceClusters;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.duration.Duration;
public class MasterApiAkkaService extends BaseService {
private static final Logger logger = LoggerFactory.getLogger(MasterApiAkkaService.class);
private final MasterMonitor masterMonitor;
private final MasterDescription masterDescription;
private final ActorRef jobClustersManagerActor;
private final ActorRef resourceClustersHostManagerActor;
private final ResourceClusters resourceClusters;
private final ActorRef statusEventBrokerActor;
private final int port;
private final IMantisPersistenceProvider storageProvider;
private final LifecycleEventPublisher lifecycleEventPublisher;
private final MantisMasterRoute mantisMasterRoute;
private final ILeadershipManager leadershipManager;
private final ActorSystem system;
private final Materializer materializer;
private final ExecutorService executorService;
private final CountDownLatch serviceLatch = new CountDownLatch(1);
public MasterApiAkkaService(final MasterMonitor masterMonitor,
final MasterDescription masterDescription,
final ActorRef jobClustersManagerActor,
final ActorRef statusEventBrokerActor,
final ResourceClusters resourceClusters,
final ActorRef resourceClustersHostManagerActor,
final int serverPort,
final IMantisPersistenceProvider mantisStorageProvider,
final LifecycleEventPublisher lifecycleEventPublisher,
final ILeadershipManager leadershipManager,
final AgentClusterOperations agentClusterOperations) {
super(true);
Preconditions.checkNotNull(masterMonitor, "MasterMonitor");
Preconditions.checkNotNull(masterDescription, "masterDescription");
Preconditions.checkNotNull(jobClustersManagerActor, "jobClustersManagerActor");
Preconditions.checkNotNull(statusEventBrokerActor, "statusEventBrokerActor");
Preconditions.checkNotNull(mantisStorageProvider, "mantisStorageProvider");
Preconditions.checkNotNull(lifecycleEventPublisher, "lifecycleEventPublisher");
Preconditions.checkNotNull(leadershipManager, "leadershipManager");
this.masterMonitor = masterMonitor;
this.masterDescription = masterDescription;
this.jobClustersManagerActor = jobClustersManagerActor;
this.resourceClustersHostManagerActor = resourceClustersHostManagerActor;
this.statusEventBrokerActor = statusEventBrokerActor;
this.resourceClusters = resourceClusters;
this.port = serverPort;
this.storageProvider = mantisStorageProvider;
this.lifecycleEventPublisher = lifecycleEventPublisher;
this.leadershipManager = leadershipManager;
this.system = ActorSystem.create("MasterApiActorSystem");
this.materializer = Materializer.createMaterializer(system);
this.mantisMasterRoute = configureApiRoutes(this.system, agentClusterOperations);
this.executorService = Executors.newSingleThreadExecutor(r -> {
Thread t = new Thread(r, "MasterApiAkkaServiceThread");
t.setDaemon(true);
return t;
});
executorService.execute(() -> {
try {
startAPIServer();
} catch (Exception e) {
logger.warn("caught exception starting API server", e);
}
});
}
private MantisMasterRoute configureApiRoutes(final ActorSystem actorSystem, final AgentClusterOperations agentClusterOperations) {
// Setup API routes
final JobClusterRouteHandler jobClusterRouteHandler = new JobClusterRouteHandlerAkkaImpl(jobClustersManagerActor);
final JobRouteHandler jobRouteHandler = new JobRouteHandlerAkkaImpl(jobClustersManagerActor);
final MasterDescriptionRoute masterDescriptionRoute = new MasterDescriptionRoute(masterDescription);
final JobRoute v0JobRoute = new JobRoute(jobRouteHandler, actorSystem);
java.time.Duration idleTimeout = actorSystem.settings().config().getDuration("akka.http.server.idle-timeout");
logger.info("idle timeout {} sec ", idleTimeout.getSeconds());
final JobStatusRouteHandler jobStatusRouteHandler = new JobStatusRouteHandlerAkkaImpl(actorSystem, statusEventBrokerActor);
final JobDiscoveryRouteHandler jobDiscoveryRouteHandler = new JobDiscoveryRouteHandlerAkkaImpl(jobClustersManagerActor, idleTimeout);
final JobDiscoveryRoute v0JobDiscoveryRoute = new JobDiscoveryRoute(jobDiscoveryRouteHandler);
final JobClusterRoute v0JobClusterRoute = new JobClusterRoute(jobClusterRouteHandler, jobRouteHandler, actorSystem);
final AgentClusterRoute v0AgentClusterRoute = new AgentClusterRoute(agentClusterOperations, actorSystem);
final JobStatusRoute v0JobStatusRoute = new JobStatusRoute(jobStatusRouteHandler);
final JobClustersRoute v1JobClusterRoute = new JobClustersRoute(jobClusterRouteHandler, actorSystem);
final JobsRoute v1JobsRoute = new JobsRoute(jobClusterRouteHandler, jobRouteHandler, actorSystem);
final AdminMasterRoute v1AdminMasterRoute = new AdminMasterRoute(masterDescription);
final AgentClustersRoute v1AgentClustersRoute = new AgentClustersRoute(agentClusterOperations);
final JobDiscoveryStreamRoute v1JobDiscoveryStreamRoute = new JobDiscoveryStreamRoute(jobDiscoveryRouteHandler);
final LastSubmittedJobIdStreamRoute v1LastSubmittedJobIdStreamRoute = new LastSubmittedJobIdStreamRoute(jobDiscoveryRouteHandler);
final JobStatusStreamRoute v1JobStatusStreamRoute = new JobStatusStreamRoute(jobStatusRouteHandler);
final JobArtifactRouteHandler jobArtifactRouteHandler = new JobArtifactRouteHandlerImpl(storageProvider);
final JobArtifactsRoute v1JobArtifactsRoute = new JobArtifactsRoute(jobArtifactRouteHandler);
final LeaderRedirectionFilter leaderRedirectionFilter = new LeaderRedirectionFilter(masterMonitor, leadershipManager);
final ResourceClusterRouteHandler resourceClusterRouteHandler = new ResourceClusterRouteHandlerAkkaImpl(
resourceClustersHostManagerActor);
return new MantisMasterRoute(
actorSystem,
leaderRedirectionFilter,
masterDescriptionRoute,
v0JobClusterRoute,
v0JobRoute,
v0JobDiscoveryRoute,
v0JobStatusRoute,
v0AgentClusterRoute,
v1JobClusterRoute,
v1JobsRoute,
v1JobArtifactsRoute,
v1AdminMasterRoute,
v1AgentClustersRoute,
v1JobDiscoveryStreamRoute,
v1LastSubmittedJobIdStreamRoute,
v1JobStatusStreamRoute,
resourceClusters,
resourceClusterRouteHandler);
}
private void startAPIServer() {
final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow =
this.mantisMasterRoute.createRoute().flow(system, materializer);
ServerSettings defaultSettings = ServerSettings.create(system);
java.time.Duration idleTimeout = system.settings().config().getDuration("akka.http.server.idle-timeout");
logger.info("idle timeout {} sec ", idleTimeout.getSeconds());
WebSocketSettings customWebsocketSettings = defaultSettings.getWebsocketSettings()
.withPeriodicKeepAliveMaxIdle(Duration.create(idleTimeout.getSeconds() - 1, TimeUnit.SECONDS))
.withPeriodicKeepAliveMode("pong");
ServerSettings customServerSettings = defaultSettings.withWebsocketSettings(customWebsocketSettings);
final CompletionStage<ServerBinding> binding = Http.get(system)
.newServerAt("0.0.0.0", port)
.withSettings(customServerSettings)
.connectionSource()
.to(Sink.foreach(connection -> {
MasterApiMetrics.getInstance().incrementIncomingRequestCount();
connection.handleWith(routeFlow, materializer);
}))
.run(materializer)
.exceptionally(failure -> {
System.err.println("API service exited, committing suicide !" + failure.getMessage());
logger.info("Master API service exited in error, committing suicide !");
system.terminate();
System.exit(2);
return null;
});
logger.info("Starting Mantis Master API on port {}", port);
try {
serviceLatch.await();
} catch (InterruptedException e) {
logger.error("Master API thread interrupted, committing suicide", e);
System.exit(2);
}
binding
.thenCompose(ServerBinding::unbind) // trigger unbinding from the port
.thenAccept(unbound -> {
logger.error("Master API service unbind, committing suicide");
system.terminate();
System.exit(2);
}); // and shutdown when done
}
@Override
public void start() {
super.awaitActiveModeAndStart(() -> {
logger.info("marking leader READY");
leadershipManager.setLeaderReady();
});
}
@Override
public void shutdown() {
super.shutdown();
logger.info("Shutting down Mantis Master API");
serviceLatch.countDown();
executorService.shutdownNow();
system.terminate();
}
}
| 8,125 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/MantisMasterRoute.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route;
import akka.actor.ActorSystem;
import akka.http.javadsl.server.AllDirectives;
import akka.http.javadsl.server.Route;
import io.mantisrx.master.api.akka.route.handlers.ResourceClusterRouteHandler;
import io.mantisrx.master.api.akka.route.v0.AgentClusterRoute;
import io.mantisrx.master.api.akka.route.v0.JobClusterRoute;
import io.mantisrx.master.api.akka.route.v0.JobDiscoveryRoute;
import io.mantisrx.master.api.akka.route.v0.JobRoute;
import io.mantisrx.master.api.akka.route.v0.JobStatusRoute;
import io.mantisrx.master.api.akka.route.v0.MasterDescriptionRoute;
import io.mantisrx.master.api.akka.route.v1.AdminMasterRoute;
import io.mantisrx.master.api.akka.route.v1.AgentClustersRoute;
import io.mantisrx.master.api.akka.route.v1.JobArtifactsRoute;
import io.mantisrx.master.api.akka.route.v1.JobClustersRoute;
import io.mantisrx.master.api.akka.route.v1.JobDiscoveryStreamRoute;
import io.mantisrx.master.api.akka.route.v1.JobStatusStreamRoute;
import io.mantisrx.master.api.akka.route.v1.JobsRoute;
import io.mantisrx.master.api.akka.route.v1.LastSubmittedJobIdStreamRoute;
import io.mantisrx.master.api.akka.route.v1.ResourceClustersLeaderExclusiveRoute;
import io.mantisrx.master.api.akka.route.v1.ResourceClustersNonLeaderRedirectRoute;
import io.mantisrx.server.master.LeaderRedirectionFilter;
import io.mantisrx.server.master.resourcecluster.ResourceClusters;
public class MantisMasterRoute extends AllDirectives {
private final LeaderRedirectionFilter leaderRedirectionFilter;
private final JobClusterRoute v0JobClusterRoute;
private final JobRoute v0JobRoute;
private final JobDiscoveryRoute v0JobDiscoveryRoute;
private final JobStatusRoute v0JobStatusRoute;
private final AgentClusterRoute v0AgentClusterRoute;
private final MasterDescriptionRoute v0MasterDescriptionRoute;
private final JobClustersRoute v1JobClusterRoute;
private final JobsRoute v1JobsRoute;
private final JobArtifactsRoute v1JobArtifactsRoute;
private final AdminMasterRoute v1MasterRoute;
private final AgentClustersRoute v1AgentClustersRoute;
private final JobDiscoveryStreamRoute v1JobDiscoveryStreamRoute;
private final LastSubmittedJobIdStreamRoute v1LastSubmittedJobIdStreamRoute;
private final JobStatusStreamRoute v1JobStatusStreamRoute;
private final ResourceClustersNonLeaderRedirectRoute resourceClustersNonLeaderRedirectRoute;
private final ResourceClustersLeaderExclusiveRoute resourceClustersLeaderExclusiveRoute;
public MantisMasterRoute(
final ActorSystem actorSystem,
final LeaderRedirectionFilter leaderRedirectionFilter,
final MasterDescriptionRoute v0MasterDescriptionRoute,
final JobClusterRoute v0JobClusterRoute,
final JobRoute v0JobRoute,
final JobDiscoveryRoute v0JobDiscoveryRoute,
final JobStatusRoute v0JobStatusRoute,
final AgentClusterRoute v0AgentClusterRoute,
final JobClustersRoute v1JobClusterRoute,
final JobsRoute v1JobsRoute,
final JobArtifactsRoute v1JobArtifactsRoute,
final AdminMasterRoute v1MasterRoute,
final AgentClustersRoute v1AgentClustersRoute,
final JobDiscoveryStreamRoute v1JobDiscoveryStreamRoute,
final LastSubmittedJobIdStreamRoute v1LastSubmittedJobIdStreamRoute,
final JobStatusStreamRoute v1JobStatusStreamRoute,
final ResourceClusters resourceClusters,
final ResourceClusterRouteHandler resourceClusterRouteHandler) {
this.leaderRedirectionFilter = leaderRedirectionFilter;
this.v0MasterDescriptionRoute = v0MasterDescriptionRoute;
this.v0JobClusterRoute = v0JobClusterRoute;
this.v0JobRoute = v0JobRoute;
this.v0JobDiscoveryRoute = v0JobDiscoveryRoute;
this.v0JobStatusRoute = v0JobStatusRoute;
this.v0AgentClusterRoute = v0AgentClusterRoute;
this.v1JobClusterRoute = v1JobClusterRoute;
this.v1JobsRoute = v1JobsRoute;
this.v1JobArtifactsRoute = v1JobArtifactsRoute;
this.v1MasterRoute = v1MasterRoute;
this.v1AgentClustersRoute = v1AgentClustersRoute;
this.v1JobDiscoveryStreamRoute = v1JobDiscoveryStreamRoute;
this.v1LastSubmittedJobIdStreamRoute = v1LastSubmittedJobIdStreamRoute;
this.v1JobStatusStreamRoute = v1JobStatusStreamRoute;
this.resourceClustersNonLeaderRedirectRoute = new ResourceClustersNonLeaderRedirectRoute(
resourceClusters, resourceClusterRouteHandler, actorSystem);
this.resourceClustersLeaderExclusiveRoute = new ResourceClustersLeaderExclusiveRoute(resourceClusters);
}
public Route createRoute() {
return concat(
v0MasterDescriptionRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader),
v0JobStatusRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader),
v0JobRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader),
v0JobClusterRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader),
v0JobDiscoveryRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader),
v0AgentClusterRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader),
v1JobClusterRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader),
v1JobsRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader),
v1JobArtifactsRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader),
v1MasterRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader),
v1AgentClustersRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader),
v1JobDiscoveryStreamRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader),
v1LastSubmittedJobIdStreamRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader),
v1JobStatusStreamRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader),
resourceClustersNonLeaderRedirectRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader),
resourceClustersLeaderExclusiveRoute.createRoute(leaderRedirectionFilter::rejectIfNotLeader)
);
}
}
| 8,126 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/Jackson.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route;
import akka.http.javadsl.marshalling.Marshaller;
import akka.http.javadsl.model.HttpEntity;
import akka.http.javadsl.model.MediaTypes;
import akka.http.javadsl.model.RequestEntity;
import akka.http.javadsl.unmarshalling.Unmarshaller;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.SerializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ser.FilterProvider;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
import io.mantisrx.shaded.com.fasterxml.jackson.datatype.jdk8.Jdk8Module;
import java.io.IOException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class Jackson {
private static final Logger logger = LoggerFactory.getLogger(Jackson.class);
private static final ObjectMapper defaultObjectMapper = new ObjectMapper()
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false)
.registerModule(new Jdk8Module());
public static final SimpleFilterProvider DEFAULT_FILTER_PROVIDER;
static {
DEFAULT_FILTER_PROVIDER = new SimpleFilterProvider();
DEFAULT_FILTER_PROVIDER.setFailOnUnknownId(false);
}
public static <T> Marshaller<T, RequestEntity> marshaller() {
return marshaller(defaultObjectMapper, null);
}
public static <T> Marshaller<T, RequestEntity> marshaller(FilterProvider filterProvider) {
return marshaller(defaultObjectMapper, filterProvider);
}
public static <T> Marshaller<T, RequestEntity> marshaller(ObjectMapper mapper) {
return Marshaller.wrapEntity(
u -> {
try {
return toJSON(mapper, null, u);
} catch (JsonProcessingException e) {
String objStr = u.toString();
String errMsg = "cannot marshal to Json " + objStr.substring(0, Math.min(objStr.length(), 100));
logger.warn(errMsg, e);
throw new IllegalArgumentException(errMsg);
}
},
Marshaller.stringToEntity(),
MediaTypes.APPLICATION_JSON
);
}
public static <T> Marshaller<T, RequestEntity> marshaller(
ObjectMapper mapper,
FilterProvider filterProvider) {
return Marshaller.wrapEntity(
u -> {
try {
return toJSON(mapper, filterProvider, u);
} catch (JsonProcessingException e) {
String objStr = u.toString();
String errMsg = "cannot marshal to Json " + objStr.substring(0, Math.min(objStr.length(), 100));
logger.warn(errMsg, e);
throw new IllegalArgumentException(errMsg);
}
},
Marshaller.stringToEntity(),
MediaTypes.APPLICATION_JSON
);
}
public static <T> Unmarshaller<HttpEntity, T> unmarshaller(Class<T> expectedType) {
return unmarshaller(defaultObjectMapper, expectedType);
}
public static <T> Unmarshaller<HttpEntity, T> unmarshaller(TypeReference<T> expectedType) {
return unmarshaller(defaultObjectMapper, expectedType);
}
public static <T> Unmarshaller<HttpEntity, T> optionalEntityUnmarshaller(Class<T> expectedType) {
return Unmarshaller.forMediaType(MediaTypes.APPLICATION_JSON, Unmarshaller.entityToString())
.thenApply(s -> {
if (s.isEmpty()) {
return null;
} else {
try {
return fromJSON(defaultObjectMapper, s, expectedType);
} catch (IOException e) {
logger.warn("cannot unmarshal json", e);
throw new IllegalArgumentException("cannot unmarshall Json as " +
expectedType.getSimpleName());
}
}
});
}
public static <T> Unmarshaller<HttpEntity, T> unmarshaller(
ObjectMapper mapper,
Class<T> expectedType) {
return Unmarshaller.forMediaType(MediaTypes.APPLICATION_JSON, Unmarshaller.entityToString())
.thenApply(s -> {
try {
return fromJSON(mapper, s, expectedType);
} catch (IOException e) {
logger.warn("cannot unmarshal json", e);
throw new IllegalArgumentException("cannot unmarshall Json as " +
expectedType.getSimpleName());
}
});
}
public static <T> Unmarshaller<HttpEntity, T> unmarshaller(
ObjectMapper mapper,
TypeReference<T> expectedType) {
return Unmarshaller.forMediaType(MediaTypes.APPLICATION_JSON, Unmarshaller.entityToString())
.thenApply(s -> {
try {
return fromJSON(mapper, s, expectedType);
} catch (IOException e) {
logger.warn("cannot unmarshal json", e);
throw new IllegalArgumentException("cannot unmarshall Json as " +
expectedType.getType()
.getTypeName());
}
});
}
public static String toJSON(
ObjectMapper mapper,
FilterProvider filters,
Object object) throws JsonProcessingException {
if (filters == null) {
filters = DEFAULT_FILTER_PROVIDER;
}
return mapper.writer(filters).writeValueAsString(object);
}
public static <T> T fromJSON(
ObjectMapper mapper,
String json,
TypeReference<T> expectedType) throws IOException {
return mapper.readerFor(expectedType).readValue(json);
}
public static <T> T fromJSON(
ObjectMapper mapper,
String json,
Class<T> expectedType) throws IOException {
return mapper.readerFor(expectedType).readValue(json);
}
public static <T> T fromJSON(String json, Class<T> expectedType) throws IOException {
return defaultObjectMapper.readerFor(expectedType).readValue(json);
}
public static <T> T fromJSON(String json, TypeReference<T> expectedType) throws IOException {
return defaultObjectMapper.readerFor(expectedType).readValue(json);
}
public static String toJson(Object object) throws IOException {
return defaultObjectMapper.writeValueAsString(object);
}
}
| 8,127 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/MasterApiMetrics.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
public class MasterApiMetrics {
private final Counter resp2xx;
private final Counter resp4xx;
private final Counter resp5xx;
private final Counter incomingRequestCount;
private final Counter throttledRequestCount;
private final Counter askTimeOutCount;
private static final MasterApiMetrics INSTANCE = new MasterApiMetrics();
private MasterApiMetrics() {
Metrics m = new Metrics.Builder()
.id("MasterApiMetrics")
.addCounter("incomingRequestCount")
.addCounter("throttledRequestCount")
.addCounter("resp2xx")
.addCounter("resp4xx")
.addCounter("resp5xx")
.addCounter("askTimeOutCount")
.build();
Metrics metrics = MetricsRegistry.getInstance().registerAndGet(m);
this.askTimeOutCount = metrics.getCounter("askTimeOutCount");
this.resp2xx = metrics.getCounter("resp2xx");
this.resp4xx = metrics.getCounter("resp4xx");
this.resp5xx = metrics.getCounter("resp5xx");
this.incomingRequestCount = metrics.getCounter("incomingRequestCount");
this.throttledRequestCount = metrics.getCounter("throttledRequestCount");
}
public static final MasterApiMetrics getInstance() {
return INSTANCE;
}
public void incrementResp2xx() {
resp2xx.increment();
}
public void incrementResp4xx() {
resp4xx.increment();
}
public void incrementResp5xx() {
resp5xx.increment();
}
public void incrementAskTimeOutCount() {
askTimeOutCount.increment();
}
public void incrementIncomingRequestCount() {
incomingRequestCount.increment();
}
public void incrementThrottledRequestCount() {
throttledRequestCount.increment();
}
}
| 8,128 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/v1/HttpRequestMetrics.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v1;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Tag;
import io.mantisrx.common.metrics.spectator.MetricId;
import io.mantisrx.common.metrics.spectator.SpectatorRegistryFactory;
import io.mantisrx.shaded.com.google.common.base.Preconditions;
import io.mantisrx.shaded.com.google.common.collect.Sets;
import java.util.Set;
public class HttpRequestMetrics {
public enum HttpVerb {
GET,
POST,
PUT,
DELETE
}
public static class Endpoints {
public static final String JOB_ARTIFACTS = "api.v1.jobArtifacts";
public static final String JOB_ARTIFACTS_NAMES = "api.v1.jobArtifacts.names";
public static final String JOB_CLUSTERS = "api.v1.jobClusters";
public static final String JOB_CLUSTER_INSTANCE = "api.v1.jobClusters.instance";
public static final String JOB_CLUSTER_INSTANCE_LATEST_JOB_DISCOVERY_INFO = "api.v1.jobClusters.instance.latestJobDiscoveryInfo";
public static final String JOB_CLUSTER_INSTANCE_ACTION_UPDATE_ARTIFACT = "api.v1.jobClusters.instance.actions.updateArtifact";
public static final String JOB_CLUSTER_INSTANCE_SCHEDULING_INFO_UPDATE = "api.v1.jobClusters.instance.actions.updateSchedulingInfo";
public static final String JOB_CLUSTER_INSTANCE_ACTION_UPDATE_SLA = "api.v1.jobClusters.instance.actions.updateSla";
public static final String JOB_CLUSTER_INSTANCE_ACTION_UPDATE_MIGRATION_STRATEGY = "api.v1.jobClusters.instance.actions.updateMigrationStrategy";
public static final String JOB_CLUSTER_INSTANCE_ACTION_UPDATE_LABEL = "api.v1.jobClusters.instance.actions.updateLabel";
public static final String JOB_CLUSTER_INSTANCE_ACTION_ENABLE_CLUSTER = "api.v1.jobClusters.instance.actions.enableCluster";
public static final String JOB_CLUSTER_INSTANCE_ACTION_DISABLE_CLUSTER = "api.v1.jobClusters.instance.actions.disableCluster";
public static final String JOBS = "api.v1.jobs";
public static final String JOB_CLUSTER_INSTANCE_JOBS = "api.v1.jobClusters.instance.jobs";
public static final String JOB_INSTANCE = "api.v1.jobs.instance";
public static final String JOB_INSTANCE_ARCHIVED_WORKERS = "api.v1.jobs.instance.archivedWorkers";
public static final String JOB_CLUSTER_INSTANCE_JOB_INSTANCE = "api.v1.jobClusters.instance.jobs.instance";
public static final String JOB_CLUSTER_INSTANCE_JOB_INSTANCE_ARCHIVED = "api.v1.jobClusters.instance.jobs.instance.archived";
public static final String JOBS_ACTION_QUICKSUBMIT = "api.v1.jobs.actions.quickSubmit";
public static final String JOBS_ACTION_POST_JOB_STATUS = "api.v1.jobs.actions.postJobStatus";
public static final String JOB_INSTANCE_ACTION_SCALE_STAGE = "api.v1.jobs.instance.actions.scaleStage";
public static final String JOB_INSTANCE_ACTION_RESUBMIT_WORKER = "api.v1.jobs.instance.actions.resubmitWorker";
public static final String MASTER_INFO = "api.v1.masterInfo";
public static final String MASTER_CONFIGS = "api.v1.masterConfigs";
public static final String AGENT_CLUSTERS = "api.v1.agentClusters";
public static final String AGENT_CLUSTERS_JOBS = "api.v1.agentClusters.jobs";
public static final String AGENT_CLUSTERS_AUTO_SCALE_POLICY = "api.v1.agentClusters.autoScalePolicy";
public static final String JOB_STATUS_STREAM = "api.v1.jobStatusStream.instance";
public static final String JOB_DISCOVERY_STREAM = "api.v1.jobDiscoveryStream.instance";
public static final String LAST_SUBMITTED_JOB_ID_STREAM = "api.v1.lastSubmittedJobIdStream.instance";
public static final String RESOURCE_CLUSTERS = "api.v1.resourceClusters";
private static String[] endpoints = new String[]{
JOB_ARTIFACTS,
JOB_ARTIFACTS_NAMES,
JOB_CLUSTERS,
JOB_CLUSTER_INSTANCE,
JOB_CLUSTER_INSTANCE_LATEST_JOB_DISCOVERY_INFO,
JOB_CLUSTER_INSTANCE_ACTION_UPDATE_ARTIFACT,
JOB_CLUSTER_INSTANCE_ACTION_UPDATE_SLA,
JOB_CLUSTER_INSTANCE_ACTION_UPDATE_MIGRATION_STRATEGY,
JOB_CLUSTER_INSTANCE_ACTION_UPDATE_LABEL,
JOB_CLUSTER_INSTANCE_ACTION_ENABLE_CLUSTER,
JOB_CLUSTER_INSTANCE_ACTION_DISABLE_CLUSTER,
JOBS,
JOB_CLUSTER_INSTANCE_JOBS,
JOB_INSTANCE,
JOB_INSTANCE_ARCHIVED_WORKERS,
JOB_CLUSTER_INSTANCE_JOB_INSTANCE,
JOB_CLUSTER_INSTANCE_JOB_INSTANCE_ARCHIVED,
JOBS_ACTION_QUICKSUBMIT,
JOBS_ACTION_POST_JOB_STATUS,
JOB_INSTANCE_ACTION_SCALE_STAGE,
JOB_INSTANCE_ACTION_RESUBMIT_WORKER,
MASTER_INFO,
MASTER_CONFIGS,
AGENT_CLUSTERS,
AGENT_CLUSTERS_JOBS,
AGENT_CLUSTERS_AUTO_SCALE_POLICY,
JOB_STATUS_STREAM,
JOB_DISCOVERY_STREAM,
LAST_SUBMITTED_JOB_ID_STREAM,
RESOURCE_CLUSTERS
};
private static Set<String> endpointSet = Sets.newHashSet(endpoints);
}
private final Registry registry;
private static String METRIC_GROUP_ID = "apiv1";
private static HttpRequestMetrics instance;
private HttpRequestMetrics() {
this.registry = SpectatorRegistryFactory.getRegistry();
}
public static HttpRequestMetrics getInstance() {
if (instance == null) {
instance = new HttpRequestMetrics();
}
return instance;
}
public void incrementEndpointMetrics(
String endpoint,
final Tag... tags) {
Preconditions.checkArgument(
Endpoints.endpointSet.contains(endpoint),
String.format("endpoint %s is not valid", endpoint));
MetricId id = new MetricId(METRIC_GROUP_ID, endpoint, tags);
registry.counter(id.getSpectatorId(registry)).increment();
}
}
| 8,129 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/v1/JobArtifactsRoute.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v1;
import static akka.http.javadsl.server.PathMatchers.segment;
import akka.http.javadsl.model.StatusCodes;
import akka.http.javadsl.server.PathMatcher0;
import akka.http.javadsl.server.Route;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.master.api.akka.route.handlers.JobArtifactRouteHandler;
import io.mantisrx.master.jobcluster.proto.JobArtifactProto;
import io.mantisrx.master.jobcluster.proto.JobArtifactProto.SearchJobArtifactsRequest;
import io.mantisrx.master.jobcluster.proto.JobArtifactProto.UpsertJobArtifactResponse;
import io.mantisrx.server.core.domain.JobArtifact;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.SerializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
import io.mantisrx.shaded.com.fasterxml.jackson.datatype.jdk8.Jdk8Module;
import io.mantisrx.shaded.com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
import java.util.concurrent.CompletionStage;
import java.util.function.Function;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/***
* JobArtifactsRoute endpoints:
* - /api/v1/jobArtifacts (GET, POST)
* - /api/v1/jobArtifacts/names (GET)
*/
public class JobArtifactsRoute extends BaseRoute {
private static final Logger logger = LoggerFactory.getLogger(JobArtifactsRoute.class);
private static final PathMatcher0 JOB_ARTIFACTS_API_PREFIX = segment("api").slash("v1").slash("jobArtifacts");
private final JobArtifactRouteHandler jobArtifactRouteHandler;
// TODO(fdichiara): consolidate object mappers. This is needed because
// Instant cannot be serialized by the existing ObjectMappers
private static final ObjectMapper JAVA_TIME_COMPATIBLE_MAPPER = new ObjectMapper()
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false)
.registerModule(new Jdk8Module())
.registerModule(new JavaTimeModule());
public static final SimpleFilterProvider DEFAULT_FILTER_PROVIDER;
static {
DEFAULT_FILTER_PROVIDER = new SimpleFilterProvider();
DEFAULT_FILTER_PROVIDER.setFailOnUnknownId(false);
}
public JobArtifactsRoute(final JobArtifactRouteHandler jobArtifactRouteHandler) {
this.jobArtifactRouteHandler = jobArtifactRouteHandler;
}
public Route constructRoutes() {
return concat(
pathPrefix(JOB_ARTIFACTS_API_PREFIX, () -> concat(
// /api/v1/jobArtifacts
pathEndOrSingleSlash(() -> concat(
// GET - search job artifacts by name and version (optional)
get(this::getJobArtifactsRoute),
// POST - register new job artifact
post(this::postJobArtifactRoute)
)),
// /api/v1/jobArtifacts/names
path(
"names",
() -> pathEndOrSingleSlash(
// GET - search job artifacts names by prefix
() -> get(this::listJobArtifactsByNameRoute)
)
)
)
)
);
}
@Override
public Route createRoute(Function<Route, Route> routeFilter) {
logger.info("creating /api/v1/jobArtifacts routes");
return super.createRoute(routeFilter);
}
private Route getJobArtifactsRoute() {
logger.trace("GET /api/v1/jobArtifacts called");
return parameterMap(param -> completeAsync(
jobArtifactRouteHandler.search(new SearchJobArtifactsRequest(param.get("name"), param.get("version"))),
resp -> completeOK(
resp.getJobArtifacts(),
Jackson.marshaller(JAVA_TIME_COMPATIBLE_MAPPER)),
HttpRequestMetrics.Endpoints.JOB_ARTIFACTS,
HttpRequestMetrics.HttpVerb.GET));
}
private Route postJobArtifactRoute() {
return entity(
Jackson.unmarshaller(JAVA_TIME_COMPATIBLE_MAPPER, JobArtifact.class),
jobArtifact -> {
logger.trace("POST /api/v1/jobArtifacts called with payload: {}", jobArtifact);
try {
final CompletionStage<UpsertJobArtifactResponse> response = jobArtifactRouteHandler.upsert(new JobArtifactProto.UpsertJobArtifactRequest(jobArtifact));
return completeAsync(
response,
resp -> complete(
StatusCodes.CREATED,
resp.getArtifactID(),
Jackson.marshaller(JAVA_TIME_COMPATIBLE_MAPPER)),
HttpRequestMetrics.Endpoints.JOB_ARTIFACTS,
HttpRequestMetrics.HttpVerb.POST);
} catch (Exception e) {
return complete(StatusCodes.INTERNAL_SERVER_ERROR, "Failed to store job artifact");
}
}
);
}
private Route listJobArtifactsByNameRoute() {
logger.trace("GET /api/v1/jobArtifacts/names called");
return parameterMap(param -> completeAsync(
jobArtifactRouteHandler.listArtifactsByName(new JobArtifactProto.ListJobArtifactsByNameRequest(param.getOrDefault("prefix", ""), param.getOrDefault("contains", ""))),
resp -> completeOK(
resp.getNames(),
Jackson.marshaller(JAVA_TIME_COMPATIBLE_MAPPER)),
HttpRequestMetrics.Endpoints.JOB_ARTIFACTS_NAMES,
HttpRequestMetrics.HttpVerb.GET));
}
}
| 8,130 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/v1/AdminMasterRoute.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v1;
import static akka.http.javadsl.server.PathMatchers.segment;
import akka.http.javadsl.server.PathMatcher0;
import akka.http.javadsl.server.Route;
import com.netflix.spectator.api.BasicTag;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.runtime.JobConstraints;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.server.core.master.MasterDescription;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.config.MasterConfiguration;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.function.Function;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/***
* Master description route
* Defines the following end points:
* /api/v1/masterInfo (GET)
* /api/v1/masterConfigs (GET)
*/
public class AdminMasterRoute extends BaseRoute {
private static final Logger logger = LoggerFactory.getLogger(AdminMasterRoute.class);
private static final PathMatcher0 MASTER_API_PREFIX = segment("api").slash("v1");
private static final ObjectMapper mapper = new ObjectMapper().configure(
DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES,
false);
private final MasterDescription masterDesc;
private final List<Configlet> configs = new ArrayList<>();
public static class Configlet {
private final String name;
private final String value;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public Configlet(@JsonProperty("name") String name, @JsonProperty("value") String value) {
this.name = name;
this.value = value;
}
public String getName() {
return name;
}
public String getValue() {
return value;
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final Configlet configlet = (Configlet) o;
return Objects.equals(name, configlet.name) &&
Objects.equals(value, configlet.value);
}
@Override
public int hashCode() {
return Objects.hash(name, value);
}
@Override
public String toString() {
return "Configlet{" +
"name='" + name + '\'' +
", value='" + value + '\'' +
'}';
}
}
static class WorkerResourceLimits {
private final int maxCpuCores;
private final int maxMemoryMB;
private final int maxNetworkMbps;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public WorkerResourceLimits(
@JsonProperty("maxCpuCores") final int maxCpuCores,
@JsonProperty("maxMemoryMB") final int maxMemoryMB,
@JsonProperty("maxNetworkMbps") final int maxNetworkMbps) {
this.maxCpuCores = maxCpuCores;
this.maxMemoryMB = maxMemoryMB;
this.maxNetworkMbps = maxNetworkMbps;
}
public int getMaxCpuCores() {
return maxCpuCores;
}
public int getMaxMemoryMB() {
return maxMemoryMB;
}
public int getMaxNetworkMbps() {
return maxNetworkMbps;
}
}
public AdminMasterRoute(final MasterDescription masterDescription) {
//TODO: hardcode some V1 admin master info, this should be cleaned up once v0 apis
// are deprecated
this.masterDesc = new MasterDescription(masterDescription.getHostname(),
masterDescription.getHostIP(),
masterDescription.getApiPort(),
masterDescription.getSchedInfoPort(),
-1,
"api/v1/jobs/actions/postJobStatus",
-1,
masterDescription.getCreateTime());
try {
configs.add(new Configlet(
JobConstraints.class.getSimpleName(),
mapper.writeValueAsString(JobConstraints.values())));
configs.add(new Configlet(
StageScalingPolicy.ScalingReason.class.getSimpleName(),
mapper.writeValueAsString(StageScalingPolicy.ScalingReason.values())));
configs.add(new Configlet(
WorkerMigrationConfig.MigrationStrategyEnum.class.getSimpleName(),
mapper.writeValueAsString(WorkerMigrationConfig.MigrationStrategyEnum.values())));
MasterConfiguration config = ConfigurationProvider.getConfig();
int maxCpuCores = config.getWorkerMachineDefinitionMaxCpuCores();
int maxMemoryMB = config.getWorkerMachineDefinitionMaxMemoryMB();
int maxNetworkMbps = config.getWorkerMachineDefinitionMaxNetworkMbps();
configs.add(new Configlet(
WorkerResourceLimits.class.getSimpleName(),
mapper.writeValueAsString(new WorkerResourceLimits(
maxCpuCores,
maxMemoryMB,
maxNetworkMbps))));
} catch (JsonProcessingException e) {
logger.error(e.getMessage(), e);
}
}
public List<Configlet> getConfigs() {
return configs;
}
@Override
protected Route constructRoutes() {
return pathPrefix(
MASTER_API_PREFIX,
() -> concat(
// GET api/v1/masterInfo
path(segment("masterInfo"), () -> pathEndOrSingleSlash(() -> concat(
get(this::getMasterInfo)))),
// GET api/v1/masterConfigs
path(segment("masterConfigs"), () -> pathEndOrSingleSlash(() -> concat(
get(this::getMasterConfigs))))
));
}
@Override
public Route createRoute(Function<Route, Route> routeFilter) {
logger.info("creating /api/v1/masterInfo routes");
logger.info("creating /api/v1/masterConfigs routes");
return super.createRoute(routeFilter);
}
private Route getMasterInfo() {
logger.info("GET /api/v1/masterInfo called");
HttpRequestMetrics.getInstance().incrementEndpointMetrics(
HttpRequestMetrics.Endpoints.MASTER_INFO,
new BasicTag("verb", HttpRequestMetrics.HttpVerb.GET.toString()),
new BasicTag("responseCode", "200"));
return completeOK(masterDesc, Jackson.marshaller());
}
private Route getMasterConfigs() {
logger.info("GET /api/v1/masterConfigs called");
HttpRequestMetrics.getInstance().incrementEndpointMetrics(
HttpRequestMetrics.Endpoints.MASTER_CONFIGS,
new BasicTag("verb", HttpRequestMetrics.HttpVerb.GET.toString()),
new BasicTag("responseCode", "200"));
return completeOK(configs, Jackson.marshaller());
}
} | 8,131 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/v1/JobStatusStreamRoute.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v1;
import static akka.http.javadsl.server.PathMatchers.segment;
import akka.NotUsed;
import akka.http.javadsl.model.ws.Message;
import akka.http.javadsl.server.PathMatcher0;
import akka.http.javadsl.server.PathMatchers;
import akka.http.javadsl.server.Route;
import akka.stream.javadsl.Flow;
import io.mantisrx.master.api.akka.route.handlers.JobStatusRouteHandler;
import java.util.function.Function;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/***
* JobStatusStreamRoute
* Defines the following end points:
* /api/v1/jobStatusStream/{jobId} (websocket)
*/
public class JobStatusStreamRoute extends BaseRoute {
private static final Logger logger = LoggerFactory.getLogger(JobStatusStreamRoute.class);
private final JobStatusRouteHandler jobStatusRouteHandler;
private static final PathMatcher0 JOBSTATUS_API_PREFIX = segment("api").slash("v1");
public JobStatusStreamRoute(final JobStatusRouteHandler jobStatusRouteHandler) {
this.jobStatusRouteHandler = jobStatusRouteHandler;
}
@Override
protected Route constructRoutes() {
return pathPrefix(
JOBSTATUS_API_PREFIX,
() -> concat(
path(segment("jobStatusStream").slash(PathMatchers.segment()), (jobId) ->
get(() -> getJobStatusStreamRoute(jobId))
)
)
);
}
@Override
public Route createRoute(Function<Route, Route> routeFilter) {
logger.info("creating /api/v1/jobStatusStream routes");
return super.createRoute(routeFilter);
}
private Route getJobStatusStreamRoute(String jobId) {
logger.info("/api/v1/jobStatusStream/{} called", jobId);
HttpRequestMetrics.getInstance().incrementEndpointMetrics(
HttpRequestMetrics.Endpoints.JOB_STATUS_STREAM);
Flow<Message, Message, NotUsed> webSocketFlow = jobStatusRouteHandler.jobStatus(jobId);
return handleWebSocketMessages(webSocketFlow);
}
}
| 8,132 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/v1/JobsRoute.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v1;
import static akka.http.javadsl.server.PathMatchers.segment;
import static akka.http.javadsl.server.directives.CachingDirectives.alwaysCache;
import static io.mantisrx.master.api.akka.route.utils.JobRouteUtils.createListJobsRequest;
import static io.mantisrx.master.api.akka.route.utils.JobRouteUtils.createWorkerStatusRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListArchivedWorkersRequest.DEFAULT_LIST_ARCHIVED_WORKERS_LIMIT;
import akka.actor.ActorSystem;
import akka.http.caching.javadsl.Cache;
import akka.http.javadsl.model.StatusCodes;
import akka.http.javadsl.model.Uri;
import akka.http.javadsl.server.*;
import akka.http.javadsl.unmarshalling.StringUnmarshallers;
import akka.japi.Pair;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.JobRouteHandler;
import io.mantisrx.master.api.akka.route.proto.JobClusterProtoAdapter;
import io.mantisrx.master.jobcluster.job.MantisJobMetadataView;
import io.mantisrx.master.jobcluster.proto.BaseResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.runtime.MantisJobDefinition;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.runtime.descriptor.StageSchedulingInfo;
import io.mantisrx.server.core.PostJobStatusRequest;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.config.MasterConfiguration;
import io.mantisrx.server.master.domain.DataFormatAdapter;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.http.api.CompactJobInfo;
import io.mantisrx.server.master.store.MantisWorkerMetadataWritable;
import io.mantisrx.shaded.com.google.common.base.Strings;
import java.util.Collections;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.function.Function;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/***
* JobsRoute
* Defines the following end points:
* api/v1/jobs (GET, POST)
* api/v1/jobClusters/{}/jobs (GET, POST)
* api/v1/jobs/{} (GET, DELETE)
* api/v1/jobClusters/{}/jobs/{} (GET)
* api/v1/jobs/{}/archivedWorkers (GET)
* api/v1/jobs/actions/quickSubmit (POST)
* api/v1/jobs/actions/postJobStatus (POST)
* api/v1/jobs/{}/actions/scaleStage (POST)
* api/v1/jobs/{}/actions/resubmitWorker (POST)
*/
public class JobsRoute extends BaseRoute {
private static final Logger logger = LoggerFactory.getLogger(JobsRoute.class);
private static final PathMatcher0 JOBS_API_PREFIX = segment("api").slash("v1").slash("jobs");
private static final PathMatcher1<String> CLUSTER_JOBS_API_PREFIX =
segment("api").slash("v1")
.slash("jobClusters")
.slash(PathMatchers.segment())
.slash("jobs");
private final JobRouteHandler jobRouteHandler;
private final JobClusterRouteHandler clusterRouteHandler;
private final MasterConfiguration config;
private final Cache<Uri, RouteResult> routeResultCache;
public JobsRoute(
final JobClusterRouteHandler clusterRouteHandler,
final JobRouteHandler jobRouteHandler,
final ActorSystem actorSystem) {
this.jobRouteHandler = jobRouteHandler;
this.clusterRouteHandler = clusterRouteHandler;
this.config = ConfigurationProvider.getConfig();
this.routeResultCache = createCache(actorSystem, config.getApiCacheMinSize(), config.getApiCacheMaxSize(), config.getApiCacheTtlMilliseconds());
}
public Route constructRoutes() {
return concat(
pathPrefix(JOBS_API_PREFIX, () -> concat(
// api/v1/jobs
pathEndOrSingleSlash(() -> concat(
// GET - list jobs
get(this::getJobsRoute),
// POST - submit a job
post(this::postJobsRoute)
)),
// api/v1/jobs/{jobId}
path(
PathMatchers.segment(),
(jobId) -> pathEndOrSingleSlash(() -> concat(
// GET - retrieve job detail by job ID
get(() -> getJobInstanceRoute(jobId)),
// DELETE - permanently kill a job.
delete(() -> deleteJobInstanceRoute(jobId)),
// reject post
post(() -> complete(StatusCodes.METHOD_NOT_ALLOWED))
))
),
path(PathMatchers.segment().slash("archivedWorkers"),
(jobId) -> pathEndOrSingleSlash(() -> concat(
get(()-> getArchivedWorkers(jobId))
))
),
// api/v1/jobs/actions/quickSubmit
path(
PathMatchers.segment("actions").slash("quickSubmit"),
() -> pathEndOrSingleSlash(
() ->
// POST - quick submit a job
post(this::postJobInstanceQuickSubmitRoute)
)
),
// api/v1/jobs/actions/postJobStatus
path(
PathMatchers.segment("actions").slash("postJobStatus"),
() -> pathEndOrSingleSlash(
() ->
// POST Job Status
post(this::postJobStatusRoute)
)
),
// api/v1/jobs/{jobId}/actions/scaleStage
path(
PathMatchers.segment().slash("actions").slash("scaleStage"),
(jobId) -> pathEndOrSingleSlash(
// POST - scale stage
() -> post(() -> postJobInstanceScaleStageRoute(jobId))
)
),
// api/v1/jobs/{jobId}/actions/resubmitWorker
path(
PathMatchers.segment().slash("actions").slash("resubmitWorker"),
(jobId) -> pathEndOrSingleSlash(
() ->
// POST - resubmit worker
post(() -> postJobInstanceResubmitWorkerRoute(jobId))
)
))
),
pathPrefix(CLUSTER_JOBS_API_PREFIX, (cluster) -> concat(
// api/v1/jobClusters/{clusterName}/jobs
pathEndOrSingleSlash(() -> concat(
// GET - list jobs
get(() -> getJobsRoute(Optional.of(cluster))),
// POST - submit a job
post(() -> postJobsRoute(Optional.of(cluster))))
),
// api/v1/jobClusters/{clusterName}/jobs/{jobId}
path(
PathMatchers.segment(),
(jobId) -> pathEndOrSingleSlash(() -> concat(
// GET - retrieve job detail by cluster & job ID
get(() -> getJobInstanceRoute(Optional.of(cluster), jobId)),
// reject post
post(() -> complete(StatusCodes.METHOD_NOT_ALLOWED))
)))
)
)
);
}
@Override
public Route createRoute(Function<Route, Route> routeFilter) {
logger.info("creating /api/v1/jobs routes");
return super.createRoute(routeFilter);
}
private Route getJobsRoute() {
return getJobsRoute(Optional.empty());
}
private Route getJobsRoute(Optional<String> clusterName) {
return parameterOptional(StringUnmarshallers.INTEGER, ParamName.PAGINATION_LIMIT, (pageSize) ->
parameterOptional(StringUnmarshallers.INTEGER, ParamName.PAGINATION_OFFSET, (offset) ->
parameterOptional(StringUnmarshallers.BOOLEAN, ParamName.SORT_ASCENDING, (ascending) ->
parameterOptional(StringUnmarshallers.STRING, ParamName.SORT_BY, (sortField) ->
parameterOptional(StringUnmarshallers.STRING, ParamName.PROJECTION_FIELDS, (fields) ->
parameterOptional(StringUnmarshallers.STRING, ParamName.PROJECTION_TARGET, (target) ->
parameterOptional(StringUnmarshallers.BOOLEAN, ParamName.JOB_COMPACT, (isCompact) ->
parameterOptional(StringUnmarshallers.STRING, ParamName.JOB_FILTER_MATCH, (matching) ->
parameterMultiMap(params ->
alwaysCache(routeResultCache, getRequestUriKeyer , () -> extractUri(uri -> {
String endpoint;
if (clusterName.isPresent()) {
logger.debug("GET /api/v1/jobClusters/{}/jobs called", clusterName);
endpoint = HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE_JOBS;
} else {
logger.debug("GET /api/v1/jobs called");
endpoint = HttpRequestMetrics.Endpoints.JOBS;
}
JobClusterManagerProto.ListJobsRequest listJobsRequest = createListJobsRequest(
params,
clusterName.map(s -> Optional.of("^" + s + "$")).orElse(matching),
true);
return completeAsync(
jobRouteHandler.listJobs(listJobsRequest),
resp -> completeOK(
(isCompact.isPresent() && isCompact.get()) ?
resp.getJobList(
JobClusterProtoAdapter::toCompactJobInfo,
CompactJobInfo.class,
pageSize.orElse(null),
offset.orElse(null),
sortField.orElse(null),
ascending.orElse(null),
uri)
: resp.getJobList(pageSize.orElse(null),
offset.orElse(null),
sortField.orElse(null),
ascending.orElse(null),
uri),
Jackson.marshaller(
super.parseFilter(fields.orElse(null),
target.orElse(null)))
),
endpoint,
HttpRequestMetrics.HttpVerb.GET
);
})))))))))));
}
private Route postJobsRoute() {
return postJobsRoute(Optional.empty());
}
private Route postJobsRoute(Optional<String> clusterName) {
return decodeRequest(() -> entity(
Jackson.unmarshaller(MantisJobDefinition.class),
submitJobRequest -> {
String endpoint;
if (clusterName.isPresent()) {
logger.info(
"POST /api/v1/jobClusters/{}/jobs called {}",
clusterName);
endpoint = HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE_JOBS;
} else {
logger.info(
"POST /api/v1/jobs called {}",
submitJobRequest);
endpoint = HttpRequestMetrics.Endpoints.JOBS;
}
CompletionStage<JobClusterManagerProto.SubmitJobResponse> response = null;
try {
// validate request
submitJobRequest.validate(true);
Pair<Boolean, String> validationResult = validateSubmitJobRequest(
submitJobRequest,
clusterName);
if (!validationResult.first()) {
CompletableFuture<JobClusterManagerProto.SubmitJobResponse> resp = new CompletableFuture<>();
resp.complete(
new JobClusterManagerProto.SubmitJobResponse(
-1,
BaseResponse.ResponseCode.CLIENT_ERROR,
validationResult.second(),
Optional.empty()));
response = resp;
} else {
response = clusterRouteHandler.submit(
JobClusterProtoAdapter.toSubmitJobClusterRequest(
submitJobRequest));
}
} catch (Exception e) {
logger.warn("exception in submit job request {}", submitJobRequest, e);
CompletableFuture<JobClusterManagerProto.SubmitJobResponse> resp = new CompletableFuture<>();
resp.complete(
new JobClusterManagerProto.SubmitJobResponse(
-1,
BaseResponse.ResponseCode.SERVER_ERROR,
e.getMessage(),
Optional.empty()));
response = resp;
}
CompletionStage<JobClusterManagerProto.GetJobDetailsResponse> r = response.thenCompose(
t -> {
if (t.responseCode.getValue() >= 200 &&
t.responseCode.getValue() < 300) {
final JobClusterManagerProto.GetJobDetailsRequest request =
new JobClusterManagerProto.GetJobDetailsRequest(
submitJobRequest.getUser(),
t.getJobId().get());
return jobRouteHandler.getJobDetails(request);
} else {
CompletableFuture<JobClusterManagerProto.GetJobDetailsResponse> responseCompletableFuture =
new CompletableFuture<>();
responseCompletableFuture.complete(
new JobClusterManagerProto.GetJobDetailsResponse(
t.requestId,
t.responseCode,
t.message,
Optional.empty()));
return responseCompletableFuture;
}
});
return completeAsync(
r,
resp -> complete(
StatusCodes.CREATED,
resp.getJobMetadata().map(metaData -> new MantisJobMetadataView(metaData, Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), false)),
Jackson.marshaller()),
endpoint,
HttpRequestMetrics.HttpVerb.POST);
})
);
}
private Route getJobInstanceRoute(String jobId) {
return getJobInstanceRoute(Optional.empty(), jobId);
}
private Route getJobInstanceRoute(Optional<String> clusterName, String jobId) {
String endpoint;
if (clusterName.isPresent()) {
logger.info("GET /api/v1/jobClusters/{}/jobs/{} called", clusterName.get(), jobId);
endpoint = HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE_JOBS;
} else {
logger.info("GET /api/v1/jobs/{} called", jobId);
endpoint = HttpRequestMetrics.Endpoints.JOBS;
}
return parameterOptional(StringUnmarshallers.STRING, ParamName.PROJECTION_FIELDS, (fields) ->
parameterOptional(StringUnmarshallers.STRING, ParamName.PROJECTION_TARGET, (target) ->
completeAsync(
jobRouteHandler.getJobDetails(
new JobClusterManagerProto.GetJobDetailsRequest("masterAPI", jobId))
.thenCompose(r -> {
CompletableFuture<JobClusterManagerProto.GetJobDetailsResponse> resp =
new CompletableFuture<>();
if (r.responseCode.getValue() >= 200 &&
r.responseCode.getValue() < 300 &&
clusterName.isPresent() &&
r.getJobMetadata().isPresent()) {
if (!clusterName.get().equals(
r.getJobMetadata().get().getClusterName())) {
String msg = String.format(
"JobId [%s] exists but does not belong to specified cluster [%s]",
jobId,
clusterName.get());
resp.complete(
new JobClusterManagerProto.GetJobDetailsResponse(
r.requestId,
BaseResponse.ResponseCode.CLIENT_ERROR_NOT_FOUND,
msg,
Optional.empty()));
} else {
resp.complete(r);
}
} else {
resp.complete(r);
}
return resp;
}),
resp -> complete(
StatusCodes.OK,
resp.getJobMetadata().map(metaData -> new MantisJobMetadataView(metaData, Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), false)),
Jackson.marshaller(super.parseFilter(fields.orElse(null), target.orElse(null)))),
endpoint,
HttpRequestMetrics.HttpVerb.GET)
));
}
private Route getArchivedWorkers(String jobId) {
logger.info("GET /api/v1/jobs/{}/archivedWorkers called", jobId);
Optional<JobId> parsedJobId = JobId.fromId(jobId);
if (!parsedJobId.isPresent()){
return complete(StatusCodes.BAD_REQUEST, super.generateFailureResponsePayload("Invalid jobId in URI", -1));
} else {
return parameterOptional(StringUnmarshallers.INTEGER, ParamName.PAGINATION_LIMIT, (pageSize) ->
parameterOptional(StringUnmarshallers.INTEGER, ParamName.PAGINATION_OFFSET, (offset) ->
parameterOptional(StringUnmarshallers.BOOLEAN, ParamName.SORT_ASCENDING, (ascending) ->
parameterOptional(StringUnmarshallers.STRING, ParamName.SORT_BY, (sortField) ->
parameterOptional(StringUnmarshallers.STRING, ParamName.PROJECTION_FIELDS, (fields) ->
parameterOptional(StringUnmarshallers.STRING, ParamName.PROJECTION_TARGET, (target) ->
parameterOptional(StringUnmarshallers.INTEGER, ParamName.SERVER_FILTER_LIMIT, (limit) ->
parameterMultiMap(params -> extractUri(uri -> {
JobClusterManagerProto.ListArchivedWorkersRequest req =
new JobClusterManagerProto.ListArchivedWorkersRequest(
parsedJobId.get(),
limit.orElse(DEFAULT_LIST_ARCHIVED_WORKERS_LIMIT));
return completeAsync(
jobRouteHandler.listArchivedWorkers(req),
resp -> completeOK(
resp.getWorkerMetadata(DataFormatAdapter::convertMantisWorkerMetadataToMantisWorkerMetadataWritable,
MantisWorkerMetadataWritable.class,
pageSize.orElse(null),
offset.orElse(null),
sortField.orElse(null),
ascending.orElse(null),
uri),
Jackson.marshaller(super.parseFilter(fields.orElse(null), target.orElse(null)))),
HttpRequestMetrics.Endpoints.JOB_INSTANCE_ARCHIVED_WORKERS,
HttpRequestMetrics.HttpVerb.GET);
})))))))));
}
}
private Route deleteJobInstanceRoute(String jobId) {
logger.info("DELETE /api/v1/jobs/{} called", jobId);
return parameterOptional(StringUnmarshallers.STRING, ParamName.USER, (user) ->
parameterOptional(StringUnmarshallers.STRING, ParamName.REASON, (reason) -> {
String userStr = user.orElse(null);
String reasonStr = reason.orElse(null);
if (Strings.isNullOrEmpty(userStr)) {
return complete(StatusCodes.BAD_REQUEST, "Missing required parameter 'user'");
} else if (Strings.isNullOrEmpty(reasonStr)) {
return complete(StatusCodes.BAD_REQUEST, "Missing required parameter 'reason'");
} else {
return completeAsync(
jobRouteHandler.kill(new JobClusterManagerProto.KillJobRequest(
jobId,
reasonStr,
userStr)),
resp -> complete(
StatusCodes.ACCEPTED,
""),
HttpRequestMetrics.Endpoints.JOB_INSTANCE,
HttpRequestMetrics.HttpVerb.DELETE);
}
}
)
);
}
private Route postJobInstanceQuickSubmitRoute() {
return entity(
Jackson.unmarshaller(JobClusterManagerProto.SubmitJobRequest.class),
request -> {
logger.info("POST /api/v1/jobs/actions/quickSubmit called");
final CompletionStage<JobClusterManagerProto.GetJobDetailsResponse> response =
clusterRouteHandler.submit(request)
.thenCompose(t -> {
if (t.responseCode.getValue() >= 200 &&
t.responseCode.getValue() < 300) {
return jobRouteHandler.getJobDetails(new JobClusterManagerProto.GetJobDetailsRequest(
request.getSubmitter(),
t.getJobId().get()));
} else {
CompletableFuture<JobClusterManagerProto.GetJobDetailsResponse> responseCompletableFuture = new CompletableFuture<>();
responseCompletableFuture.complete(
new JobClusterManagerProto.GetJobDetailsResponse(
t.requestId,
t.responseCode,
t.message,
Optional.empty()));
return responseCompletableFuture;
}
});
return completeAsync(
response,
resp -> complete(
StatusCodes.CREATED,
resp.getJobMetadata().map(metaData -> new MantisJobMetadataView(metaData, Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), false)),
Jackson.marshaller()
),
HttpRequestMetrics.Endpoints.JOBS_ACTION_QUICKSUBMIT,
HttpRequestMetrics.HttpVerb.POST
);
});
}
private Route postJobStatusRoute() {
return entity(
Jackson.unmarshaller(PostJobStatusRequest.class), request -> {
logger.info("POST /api/v1/jobs/actions/postJobStatus called");
return completeAsync(
jobRouteHandler.workerStatus(createWorkerStatusRequest(request)),
resp -> complete(
StatusCodes.NO_CONTENT,
""),
HttpRequestMetrics.Endpoints.JOBS_ACTION_POST_JOB_STATUS,
HttpRequestMetrics.HttpVerb.POST
);
});
}
private Route postJobInstanceScaleStageRoute(String jobId) {
return entity(
Jackson.unmarshaller(JobClusterManagerProto.ScaleStageRequest.class),
request -> {
logger.info("POST /api/v1/jobs/{}/actions/scaleStage called", jobId);
CompletionStage<JobClusterManagerProto.ScaleStageResponse> response = null;
int numWorkers = request.getNumWorkers();
int maxWorkersPerStage = ConfigurationProvider.getConfig().getMaxWorkersPerStage();
if (numWorkers > maxWorkersPerStage) {
CompletableFuture<JobClusterManagerProto.ScaleStageResponse> responseCompletableFuture = new CompletableFuture<>();
responseCompletableFuture.complete(
new JobClusterManagerProto.ScaleStageResponse(
request.requestId,
BaseResponse.ResponseCode.CLIENT_ERROR,
"num workers must be less than " + maxWorkersPerStage,
-1));
response = responseCompletableFuture;
} else if (jobId.equals(request.getJobId().getId())) {
response = jobRouteHandler.scaleStage(request);
} else {
CompletableFuture<JobClusterManagerProto.ScaleStageResponse> responseCompletableFuture = new CompletableFuture<>();
responseCompletableFuture.complete(
new JobClusterManagerProto.ScaleStageResponse(
request.requestId,
BaseResponse.ResponseCode.CLIENT_ERROR,
String.format("JobId specified in request payload [%s] does not match with resource uri [%s]",
request.getJobId().getId(),
jobId),
-1));
response = responseCompletableFuture;
}
return completeAsync(
response,
resp -> complete(
StatusCodes.NO_CONTENT,
""),
HttpRequestMetrics.Endpoints.JOB_INSTANCE_ACTION_SCALE_STAGE,
HttpRequestMetrics.HttpVerb.POST
);
});
}
private Route postJobInstanceResubmitWorkerRoute(String jobId) {
return entity(
Jackson.unmarshaller(JobClusterManagerProto.V1ResubmitWorkerRequest.class),
request -> {
logger.info("POST /api/v1/jobs/{}/actions/resubmitWorker called", jobId);
CompletionStage<JobClusterManagerProto.ResubmitWorkerResponse> response;
response = jobRouteHandler.resubmitWorker(
new JobClusterManagerProto.ResubmitWorkerRequest(jobId,
request.getWorkerNum(),
request.getUser(),
request.getReason()));
return completeAsync(
response,
resp -> complete(
StatusCodes.NO_CONTENT,
""),
HttpRequestMetrics.Endpoints.JOB_INSTANCE_ACTION_RESUBMIT_WORKER,
HttpRequestMetrics.HttpVerb.POST
);
});
}
/**
* @return true to indicate valid, false otherwise. The String holds the error message when the request is invalid
*/
private Pair<Boolean, String> validateSubmitJobRequest(
MantisJobDefinition mjd,
Optional<String> clusterNameInResource) {
if (null == mjd) {
logger.error("rejecting job submit request, job definition is malformed {}", mjd);
return Pair.apply(false, "Malformed job definition.");
}
// must include job cluster name
if (mjd.getName() == null || mjd.getName().length() == 0) {
logger.info("rejecting job submit request, must include name {}", mjd);
return Pair.apply(false, "Job definition must include name");
}
// validate specified job cluster name matches with what specified in REST resource endpoint
if (clusterNameInResource.isPresent()) {
if (!clusterNameInResource.get().equals(mjd.getName())) {
String msg = String.format("Cluster name specified in request payload [%s] " +
"does not match with what specified in resource endpoint [%s]",
mjd.getName(), clusterNameInResource.get());
logger.info("rejecting job submit request, {} {}", msg, mjd);
return Pair.apply(false, msg);
}
}
// validate scheduling info
SchedulingInfo schedulingInfo = mjd.getSchedulingInfo();
if (schedulingInfo != null) {
Map<Integer, StageSchedulingInfo> stages = schedulingInfo.getStages();
if (stages != null) {
for (StageSchedulingInfo stageSchedInfo : stages.values()) {
double cpuCores = stageSchedInfo.getMachineDefinition().getCpuCores();
int maxCpuCores = ConfigurationProvider.getConfig()
.getWorkerMachineDefinitionMaxCpuCores();
if (cpuCores > maxCpuCores) {
logger.info(
"rejecting job submit request, requested CPU {} > max for {} (user: {}) (stage: {})",
cpuCores,
mjd.getName(),
mjd.getUser(),
stages);
return Pair.apply(
false,
"requested CPU cannot be more than max CPU per worker " +
maxCpuCores);
}
double memoryMB = stageSchedInfo.getMachineDefinition().getMemoryMB();
int maxMemoryMB = ConfigurationProvider.getConfig()
.getWorkerMachineDefinitionMaxMemoryMB();
if (memoryMB > maxMemoryMB) {
logger.info(
"rejecting job submit request, requested memory {} > max for {} (user: {}) (stage: {})",
memoryMB,
mjd.getName(),
mjd.getUser(),
stages);
return Pair.apply(
false,
"requested memory cannot be more than max memoryMB per worker " +
maxMemoryMB);
}
double networkMbps = stageSchedInfo.getMachineDefinition().getNetworkMbps();
int maxNetworkMbps = ConfigurationProvider.getConfig()
.getWorkerMachineDefinitionMaxNetworkMbps();
if (networkMbps > maxNetworkMbps) {
logger.info(
"rejecting job submit request, requested network {} > max for {} (user: {}) (stage: {})",
networkMbps,
mjd.getName(),
mjd.getUser(),
stages);
return Pair.apply(
false,
"requested network cannot be more than max networkMbps per worker " +
maxNetworkMbps);
}
int numberOfInstances = stageSchedInfo.getNumberOfInstances();
int maxWorkersPerStage = ConfigurationProvider.getConfig()
.getMaxWorkersPerStage();
if (numberOfInstances > maxWorkersPerStage) {
logger.info(
"rejecting job submit request, requested num instances {} > max for {} (user: {}) (stage: {})",
numberOfInstances,
mjd.getName(),
mjd.getUser(),
stages);
return Pair.apply(
false,
"requested number of instances per stage cannot be more than " +
maxWorkersPerStage);
}
StageScalingPolicy scalingPolicy = stageSchedInfo.getScalingPolicy();
if (scalingPolicy != null) {
if (scalingPolicy.getMax() > maxWorkersPerStage) {
logger.info(
"rejecting job submit request, requested num instances in scaling policy {} > max for {} (user: {}) (stage: {})",
numberOfInstances,
mjd.getName(),
mjd.getUser(),
stages);
return Pair.apply(
false,
"requested number of instances per stage in scaling policy cannot be more than " +
maxWorkersPerStage);
}
}
}
}
}
return Pair.apply(true, "");
}
}
| 8,133 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/v1/ResourceClustersNonLeaderRedirectRoute.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v1;
import static akka.http.javadsl.server.PathMatchers.segment;
import static akka.http.javadsl.server.directives.CachingDirectives.alwaysCache;
import akka.actor.ActorSystem;
import akka.http.caching.javadsl.Cache;
import akka.http.javadsl.model.StatusCodes;
import akka.http.javadsl.model.Uri;
import akka.http.javadsl.server.PathMatcher0;
import akka.http.javadsl.server.PathMatchers;
import akka.http.javadsl.server.Route;
import akka.http.javadsl.server.RouteResult;
import io.mantisrx.common.Ack;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.master.api.akka.route.handlers.ResourceClusterRouteHandler;
import io.mantisrx.master.api.akka.route.v1.HttpRequestMetrics.Endpoints;
import io.mantisrx.master.api.akka.route.v1.HttpRequestMetrics.HttpVerb;
import io.mantisrx.master.jobcluster.proto.BaseResponse;
import io.mantisrx.master.resourcecluster.proto.DisableTaskExecutorsRequest;
import io.mantisrx.master.resourcecluster.proto.GetResourceClusterSpecRequest;
import io.mantisrx.master.resourcecluster.proto.GetTaskExecutorsRequest;
import io.mantisrx.master.resourcecluster.proto.ListResourceClusterRequest;
import io.mantisrx.master.resourcecluster.proto.ProvisionResourceClusterRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterAPIProto.GetResourceClusterResponse;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleRuleProto.CreateAllResourceClusterScaleRulesRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleRuleProto.CreateResourceClusterScaleRuleRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleRuleProto.GetResourceClusterScaleRulesRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleRuleProto.GetResourceClusterScaleRulesResponse;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleRuleProto.JobArtifactsToCacheRequest;
import io.mantisrx.master.resourcecluster.proto.ScaleResourceRequest;
import io.mantisrx.master.resourcecluster.proto.ScaleResourceResponse;
import io.mantisrx.master.resourcecluster.proto.SetResourceClusterScalerStatusRequest;
import io.mantisrx.master.resourcecluster.proto.UpgradeClusterContainersRequest;
import io.mantisrx.master.resourcecluster.proto.UpgradeClusterContainersResponse;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.config.MasterConfiguration;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.PagedActiveJobOverview;
import io.mantisrx.server.master.resourcecluster.ResourceCluster;
import io.mantisrx.server.master.resourcecluster.ResourceClusters;
import io.mantisrx.server.master.resourcecluster.TaskExecutorID;
import io.mantisrx.shaded.com.google.common.collect.ImmutableMap;
import java.time.Duration;
import java.time.Instant;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.function.BiFunction;
import lombok.extern.slf4j.Slf4j;
/**
* Resource Cluster Route
* Defines the following end points:
* /api/v1/resourceClusters (GET, POST)
* /api/v1/resourceClusters/list (GET)
* <p>
* /api/v1/resourceClusters/{} (GET, DELETE)
* <p>
* <p>
* /api/v1/resourceClusters/{}/getResourceOverview (GET)
* /api/v1/resourceClusters/{}/getRegisteredTaskExecutors (GET)
* /api/v1/resourceClusters/{}/getBusyTaskExecutors (GET)
* /api/v1/resourceClusters/{}/getAvailableTaskExecutors (GET)
* /api/v1/resourceClusters/{}/getUnregisteredTaskExecutors (GET)
* /api/v1/resourceClusters/{}/scaleSku (POST)
* /api/v1/resourceClusters/{}/upgrade (POST)
* /api/v1/resourceClusters/{}/disableTaskExecutors (POST)
* /api/v1/resourceClusters/{}/setScalerStatus (POST)
* <p>
* <p>
* /api/v1/resourceClusters/{}/scaleRule (POST)
* /api/v1/resourceClusters/{}/scaleRules (GET, POST)
* <p>
* /api/v1/resourceClusters/{}/taskExecutors/{}/getTaskExecutorState (GET)
* <p>
* /api/v1/resourceClusters/{}/cacheJobArtifacts (GET)
* /api/v1/resourceClusters/{}/cacheJobArtifacts (POST)
* /api/v1/resourceClusters/{}/cacheJobArtifacts (DELETE)
*
* [Notes]
* To upgrade cluster containers: each container running task executor is using docker image tag based image version.
* In regular case the upgrade is to refresh the container to re-deploy with latest digest associated with the image
* tag (e.g. latest).
* If multiple image digest versions need to be ran/hosted at the same time, it is recommended to create a separate
* sku id in addition to the existing sku(s).
*/
@Slf4j
public class ResourceClustersNonLeaderRedirectRoute extends BaseRoute {
private static final PathMatcher0 RESOURCECLUSTERS_API_PREFIX =
segment("api").slash("v1").slash("resourceClusters");
private final ResourceClusters gateway;
private final ResourceClusterRouteHandler resourceClusterRouteHandler;
private final Cache<Uri, RouteResult> routeResultCache;
public ResourceClustersNonLeaderRedirectRoute(
final ResourceClusters gateway,
final ResourceClusterRouteHandler resourceClusterRouteHandler,
final ActorSystem actorSystem) {
this.gateway = gateway;
this.resourceClusterRouteHandler = resourceClusterRouteHandler;
MasterConfiguration config = ConfigurationProvider.getConfig();
this.routeResultCache = createCache(actorSystem, config.getApiCacheMinSize(),
config.getApiCacheMaxSize(),
config.getApiCacheTtlMilliseconds());
}
@Override
protected Route constructRoutes() {
Route result = pathPrefix(
RESOURCECLUSTERS_API_PREFIX,
() -> concat(
// /
pathEndOrSingleSlash(() -> concat(
// GET
get(this::getRegisteredResourceClustersRoute),
// POST
post(this::provisionResourceClustersRoute)
)
),
// /list
pathPrefix(
"list",
() -> concat(
// GET
get(this::listClusters))
),
// /{}
path(
PathMatchers.segment(),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
// GET
get(() -> getResourceClusterInstanceRoute(clusterName)),
// Delete
delete(() -> deleteResourceClusterInstanceRoute(clusterName))
))
),
// /{}/scaleSku
path(
PathMatchers.segment().slash("scaleSku"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
// POST
post(() -> scaleClusterSku(clusterName))
))
),
// /{}/disableTaskExecutors
path(
PathMatchers.segment().slash("disableTaskExecutors"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
post(() -> disableTaskExecutors(getClusterID(clusterName)))))
),
// /{}/setScalerStatus
path(
PathMatchers.segment().slash("setScalerStatus"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
post(() -> setScalerStatus(clusterName))))
),
// /{}/upgrade
path(
PathMatchers.segment().slash("upgrade"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
// POST
post(() -> upgradeCluster(clusterName))
))
),
// /{}/getResourceOverview
path(
PathMatchers.segment().slash("getResourceOverview"),
(clusterName) -> pathEndOrSingleSlash(
() -> concat(get(() -> getResourceOverview(getClusterID(clusterName)))))
),
// /{}/activeJobOverview?pageSize={}&startingIndex={}
path(
PathMatchers.segment().slash("activeJobOverview"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(get(() ->
parameterOptional("startingIndex", startingIndex ->
parameterOptional("pageSize", pageSize ->
getActiveJobOverview(getClusterID(clusterName), startingIndex,
pageSize))))))
),
// /{}/getRegisteredTaskExecutors
path(
PathMatchers.segment().slash("getRegisteredTaskExecutors"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
get(() -> mkTaskExecutorsRoute(getClusterID(clusterName), (rc, req) -> rc.getRegisteredTaskExecutors(req.getAttributes())))))
),
// /{}/getBusyTaskExecutors
path(
PathMatchers.segment().slash("getBusyTaskExecutors"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
get(() -> mkTaskExecutorsRoute(getClusterID(clusterName), (rc, req) -> rc.getBusyTaskExecutors(req.getAttributes())))))
),
// /{}/getAvailableTaskExecutors
path(
PathMatchers.segment().slash("getAvailableTaskExecutors"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
get(() -> mkTaskExecutorsRoute(getClusterID(clusterName), (rc, req) -> rc.getAvailableTaskExecutors(req.getAttributes())))))
),
// /{}/getUnregisteredTaskExecutors
path(
PathMatchers.segment().slash("getUnregisteredTaskExecutors"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
get(() -> mkTaskExecutorsRoute(getClusterID(clusterName), (rc, req) -> rc.getUnregisteredTaskExecutors(req.getAttributes())))))
),
// /{}/scaleRule
path(
PathMatchers.segment().slash("scaleRule"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
// POST
post(() -> createSingleScaleRule(clusterName))
))
),
// /{}/scaleRules
path(
PathMatchers.segment().slash("scaleRules"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
// GET
get(() -> getScaleRules(clusterName)),
// POST
post(() -> createAllScaleRules(clusterName))
))
),
// /{}/cacheJobArtifacts
path(
PathMatchers.segment().slash("cacheJobArtifacts"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
// GET
get(() -> withFuture(gateway.getClusterFor(getClusterID(clusterName))
.getJobArtifactsToCache())),
// POST
post(() -> cacheJobArtifacts(clusterName)),
// DELETE
delete(() -> removeJobArtifactsToCache(clusterName))
))
),
// /api/v1/resourceClusters/{}/taskExecutors/{}/getTaskExecutorState
pathPrefix(
PathMatchers.segment().slash("taskExecutors"),
(clusterName) -> concat(
path(
PathMatchers.segment().slash("getTaskExecutorState"),
(taskExecutorId) ->
pathEndOrSingleSlash(() -> concat(
get(() -> getTaskExecutorState(getClusterID(clusterName),
getTaskExecutorID(taskExecutorId))))))
)
)
));
return result;
}
private Route listClusters() {
return withFuture(gateway.listActiveClusters());
}
private Route getActiveJobOverview(ClusterID clusterID, Optional<String> startingIndex,
Optional<String> pageSize) {
CompletableFuture<PagedActiveJobOverview> jobsOverview =
gateway.getClusterFor(clusterID).getActiveJobOverview(
startingIndex.map(Integer::parseInt),
pageSize.map(Integer::parseInt));
return withFuture(jobsOverview);
}
private Route getResourceOverview(ClusterID clusterID) {
CompletableFuture<ResourceCluster.ResourceOverview> resourceOverview =
gateway.getClusterFor(clusterID).resourceOverview();
return withFuture(resourceOverview);
}
private Route mkTaskExecutorsRoute(
ClusterID clusterId,
BiFunction<ResourceCluster, GetTaskExecutorsRequest, CompletableFuture<List<TaskExecutorID>>> taskExecutors) {
final GetTaskExecutorsRequest empty = new GetTaskExecutorsRequest(ImmutableMap.of());
return entity(
Jackson.optionalEntityUnmarshaller(GetTaskExecutorsRequest.class),
request -> {
if (request == null) {
request = empty;
}
return withFuture(taskExecutors.apply(gateway.getClusterFor(clusterId), request));
});
}
private Route getTaskExecutorState(ClusterID clusterID, TaskExecutorID taskExecutorID) {
CompletableFuture<ResourceCluster.TaskExecutorStatus> statusOverview =
gateway.getClusterFor(clusterID).getTaskExecutorState(taskExecutorID);
return withFuture(statusOverview);
}
private Route disableTaskExecutors(ClusterID clusterID) {
return entity(Jackson.unmarshaller(DisableTaskExecutorsRequest.class), request -> {
log.info("POST /api/v1/resourceClusters/{}/disableTaskExecutors called with body {}",
clusterID, request);
return withFuture(gateway.getClusterFor(clusterID).disableTaskExecutorsFor(
request.getAttributes(),
Instant.now().plus(Duration.ofHours(request.getExpirationDurationInHours())),
request.getTaskExecutorID()));
});
}
private Route setScalerStatus(String clusterID) {
return entity(Jackson.unmarshaller(SetResourceClusterScalerStatusRequest.class),
request -> {
log.info("POST /api/v1/resourceClusters/{}/setScalerStatus called with body {}",
clusterID, request);
return withFuture(gateway.getClusterFor(request.getClusterID())
.setScalerStatus(request.getClusterID(), request.getSkuId(),
request.getEnabled(), request.getExpirationDurationInSeconds()));
});
}
private ClusterID getClusterID(String clusterName) {
return ClusterID.of(clusterName);
}
private TaskExecutorID getTaskExecutorID(String resourceName) {
return TaskExecutorID.of(resourceName);
}
/*
Host route section.
*/
private Route getResourceClusterInstanceRoute(String clusterId) {
log.info("GET /api/v1/resourceClusters/{} called", clusterId);
return parameterMap(param ->
alwaysCache(routeResultCache, getRequestUriKeyer, () -> extractUri(
uri -> completeAsync(
this.resourceClusterRouteHandler.get(
GetResourceClusterSpecRequest.builder().id(ClusterID.of(clusterId))
.build()),
resp -> completeOK(
resp,
Jackson.marshaller()),
Endpoints.RESOURCE_CLUSTERS,
HttpRequestMetrics.HttpVerb.GET))));
}
private Route provisionResourceClustersRoute() {
return entity(Jackson.unmarshaller(ProvisionResourceClusterRequest.class),
resClusterSpec -> {
log.info("POST /api/v1/resourceClusters called: {}", resClusterSpec);
final CompletionStage<GetResourceClusterResponse> response =
this.resourceClusterRouteHandler.create(resClusterSpec);
return completeAsync(
response,
resp -> complete(
StatusCodes.ACCEPTED,
resp.getClusterSpec(),
Jackson.marshaller()),
Endpoints.RESOURCE_CLUSTERS,
HttpRequestMetrics.HttpVerb.POST
);
});
}
private Route getRegisteredResourceClustersRoute() {
log.info("GET /api/v1/resourceClusters called");
return parameterMap(param ->
alwaysCache(routeResultCache, getRequestUriKeyer, () -> extractUri(
uri -> {
return completeAsync(
this.resourceClusterRouteHandler.get(
ListResourceClusterRequest.builder().build()),
resp -> completeOK(
resp,
Jackson.marshaller()),
Endpoints.RESOURCE_CLUSTERS,
HttpRequestMetrics.HttpVerb.GET);
})));
}
private Route deleteResourceClusterInstanceRoute(String clusterId) {
log.info("DELETE api/v1/resourceClusters/{}", clusterId);
return completeAsync(
this.resourceClusterRouteHandler.delete(ClusterID.of(clusterId)),
resp -> completeOK(
resp,
Jackson.marshaller()),
Endpoints.RESOURCE_CLUSTERS,
HttpVerb.DELETE);
}
private Route scaleClusterSku(String clusterId) {
return entity(Jackson.unmarshaller(ScaleResourceRequest.class), skuScaleRequest -> {
log.info("POST api/v1/resourceClusters/{}/scaleSku {}", clusterId, skuScaleRequest);
final CompletionStage<ScaleResourceResponse> response =
this.resourceClusterRouteHandler.scale(skuScaleRequest);
return completeAsync(
response,
resp -> complete(
StatusCodes.ACCEPTED,
resp,
Jackson.marshaller()),
Endpoints.RESOURCE_CLUSTERS,
HttpRequestMetrics.HttpVerb.POST
);
});
}
private Route upgradeCluster(String clusterId) {
return entity(Jackson.unmarshaller(UpgradeClusterContainersRequest.class),
upgradeRequest -> {
log.info("POST api/v1/resourceClusters/{}/upgrade {}", clusterId, upgradeRequest);
final CompletionStage<UpgradeClusterContainersResponse> response =
this.resourceClusterRouteHandler.upgrade(upgradeRequest);
return completeAsync(
response,
resp -> complete(
StatusCodes.ACCEPTED,
resp,
Jackson.marshaller()),
Endpoints.RESOURCE_CLUSTERS,
HttpRequestMetrics.HttpVerb.POST
);
});
}
private Route createSingleScaleRule(String clusterId) {
return entity(Jackson.unmarshaller(CreateResourceClusterScaleRuleRequest.class),
scaleRuleReq -> {
log.info("POST api/v1/resourceClusters/{}/scaleRule {}", clusterId, scaleRuleReq);
final CompletionStage<GetResourceClusterScaleRulesResponse> response =
this.resourceClusterRouteHandler.createSingleScaleRule(scaleRuleReq);
return completeAsync(
response,
resp -> complete(
StatusCodes.ACCEPTED,
resp,
Jackson.marshaller()),
Endpoints.RESOURCE_CLUSTERS,
HttpRequestMetrics.HttpVerb.POST
);
});
}
private Route createAllScaleRules(String clusterId) {
return entity(Jackson.unmarshaller(CreateAllResourceClusterScaleRulesRequest.class),
scaleRuleReq -> {
log.info("POST api/v1/resourceClusters/{}/scaleRules {}", clusterId, scaleRuleReq);
final CompletionStage<GetResourceClusterScaleRulesResponse> response =
this.resourceClusterRouteHandler.createAllScaleRule(scaleRuleReq);
return completeAsync(
response.thenCombineAsync(
this.gateway.getClusterFor(getClusterID(clusterId))
.refreshClusterScalerRuleSet(),
(createResp, dontCare) -> createResp),
resp -> complete(
StatusCodes.ACCEPTED,
resp,
Jackson.marshaller()),
Endpoints.RESOURCE_CLUSTERS,
HttpRequestMetrics.HttpVerb.POST
);
});
}
private Route getScaleRules(String clusterId) {
log.info("GET /api/v1/resourceClusters/{}/scaleRules called", clusterId);
return parameterMap(param ->
alwaysCache(routeResultCache, getRequestUriKeyer, () -> extractUri(
uri -> completeAsync(
this.resourceClusterRouteHandler.getClusterScaleRules(
GetResourceClusterScaleRulesRequest.builder()
.clusterId(getClusterID(clusterId)).build()),
resp -> completeOK(
resp,
Jackson.marshaller()),
Endpoints.RESOURCE_CLUSTERS,
HttpVerb.GET))));
}
private Route cacheJobArtifacts(String clusterId) {
return entity(Jackson.unmarshaller(JobArtifactsToCacheRequest.class), request -> {
log.info("POST /api/v1/resourceClusters/{}/cacheJobArtifacts {}", clusterId, request);
final CompletionStage<Ack> response =
gateway.getClusterFor(getClusterID(clusterId))
.addNewJobArtifactsToCache(request.getClusterID(), request.getArtifacts());
return completeAsync(
response.thenApply(dontCare -> new BaseResponse(request.requestId,
BaseResponse.ResponseCode.SUCCESS, "job artifacts stored successfully")),
resp -> complete(
StatusCodes.CREATED,
request.getArtifacts(),
Jackson.marshaller()),
Endpoints.RESOURCE_CLUSTERS,
HttpRequestMetrics.HttpVerb.POST
);
});
}
private Route removeJobArtifactsToCache(String clusterId) {
return entity(Jackson.unmarshaller(JobArtifactsToCacheRequest.class), request -> {
log.info("DELETE /api/v1/resourceClusters/{}/cacheJobArtifacts {}", clusterId, request);
final CompletionStage<Ack> response =
gateway.getClusterFor(getClusterID(clusterId))
.removeJobArtifactsToCache(request.getArtifacts());
return completeAsync(
response.thenApply(dontCare -> new BaseResponse(request.requestId,
BaseResponse.ResponseCode.SUCCESS, "job artifacts removed successfully")),
resp -> complete(
StatusCodes.OK,
request.getArtifacts(),
Jackson.marshaller()),
Endpoints.RESOURCE_CLUSTERS,
HttpRequestMetrics.HttpVerb.DELETE
);
});
}
}
| 8,134 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/v1/JobDiscoveryStreamRoute.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v1;
import static akka.http.javadsl.server.PathMatchers.segment;
import akka.NotUsed;
import akka.http.javadsl.marshalling.sse.EventStreamMarshalling;
import akka.http.javadsl.model.StatusCodes;
import akka.http.javadsl.model.sse.ServerSentEvent;
import akka.http.javadsl.server.PathMatcher0;
import akka.http.javadsl.server.PathMatchers;
import akka.http.javadsl.server.Route;
import akka.http.javadsl.unmarshalling.StringUnmarshallers;
import akka.stream.javadsl.Source;
import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandler;
import io.mantisrx.master.api.akka.route.proto.JobDiscoveryRouteProto;
import io.mantisrx.master.api.akka.route.utils.StreamingUtils;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.server.core.JobSchedulingInfo;
import io.mantisrx.server.master.domain.JobId;
import java.util.Objects;
import java.util.Optional;
import java.util.concurrent.CompletionStage;
import java.util.function.Function;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.RxReactiveStreams;
/***
* JobDiscoveryStreamRoute - returns scheduling info stream for a given job.
* Defines the following end points:
* /api/v1/jobDiscoveryStream/{jobId} (GET)
*/public class JobDiscoveryStreamRoute extends BaseRoute {
private static final Logger logger = LoggerFactory.getLogger(JobDiscoveryStreamRoute.class);
private final JobDiscoveryRouteHandler jobDiscoveryRouteHandler;
private static final PathMatcher0 JOBDISCOVERY_API_PREFIX = segment("api").slash("v1");
public JobDiscoveryStreamRoute(final JobDiscoveryRouteHandler jobDiscoveryRouteHandler) {
this.jobDiscoveryRouteHandler = jobDiscoveryRouteHandler;
}
@Override
protected Route constructRoutes() {
return pathPrefix(
JOBDISCOVERY_API_PREFIX,
() -> concat(
path(
segment("jobDiscoveryStream").slash(PathMatchers.segment()),
(jobId) -> pathEndOrSingleSlash(
() -> get(() -> getJobDiscoveryStreamRoute(
jobId)))
)
)
);
}
@Override
public Route createRoute(Function<Route, Route> routeFilter) {
logger.info("creating /api/v1/jobDiscoveryStream routes");
return super.createRoute(routeFilter);
}
private Route getJobDiscoveryStreamRoute(String jobId) {
return parameterOptional(
StringUnmarshallers.BOOLEAN, ParamName.SEND_HEARTBEAT,
(sendHeartbeats) -> {
logger.info("GET /api/v1/jobStatusStream/{} called", jobId);
CompletionStage<JobDiscoveryRouteProto.SchedInfoResponse> schedulingInfoRespCS =
jobDiscoveryRouteHandler.schedulingInfoStream(
new JobClusterManagerProto
.GetJobSchedInfoRequest(JobId.fromId(jobId).get()),
sendHeartbeats.orElse(false));
return completeAsync(
schedulingInfoRespCS,
resp -> {
Optional<Observable<JobSchedulingInfo>> siStream = resp.getSchedInfoStream();
if (siStream.isPresent()) {
Observable<JobSchedulingInfo> schedulingInfoObs = siStream.get();
Source<ServerSentEvent, NotUsed> schedInfoSource =
Source.fromPublisher(RxReactiveStreams.toPublisher(
schedulingInfoObs))
.map(j -> StreamingUtils.from(j).orElse(null))
.filter(Objects::nonNull);
return completeOK(
schedInfoSource,
EventStreamMarshalling.toEventStream());
} else {
logger.warn(
"Failed to get sched info stream for job {}",
jobId);
return complete(
StatusCodes.INTERNAL_SERVER_ERROR,
"Failed to get sched info stream for job " +
jobId);
}
},
HttpRequestMetrics.Endpoints.JOB_STATUS_STREAM,
HttpRequestMetrics.HttpVerb.GET
);
});
}
}
| 8,135 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/v1/ResourceClustersLeaderExclusiveRoute.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v1;
import static akka.http.javadsl.server.PathMatchers.segment;
import akka.http.javadsl.model.HttpRequest;
import akka.http.javadsl.model.HttpResponse;
import akka.http.javadsl.server.PathMatcher0;
import akka.http.javadsl.server.PathMatchers;
import akka.http.javadsl.server.Rejection;
import akka.http.javadsl.server.Route;
import akka.http.javadsl.server.directives.LogEntry;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.ResourceClusters;
import io.mantisrx.server.master.resourcecluster.TaskExecutorDisconnection;
import io.mantisrx.server.master.resourcecluster.TaskExecutorHeartbeat;
import io.mantisrx.server.master.resourcecluster.TaskExecutorRegistration;
import io.mantisrx.server.master.resourcecluster.TaskExecutorStatusChange;
import java.util.List;
import java.util.Optional;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
/**
* Resource Cluster Route
* Defines the following end points:
* /api/v1/resourceClusters/{}/actions/registerTaskExecutor (POST)
* /api/v1/resourceClusters/{}/actions/heartBeatFromTaskExecutor (POST)
* /api/v1/resourceClusters/{}/actions/notifyTaskExecutorStatusChange (POST)
* /api/v1/resourceClusters/{}/actions/disconnectTaskExecutor (POST)
*/
@Slf4j
@RequiredArgsConstructor
public class ResourceClustersLeaderExclusiveRoute extends BaseRoute {
private static final PathMatcher0 RESOURCECLUSTERS_API_PREFIX =
segment("api").slash("v1").slash("resourceClusters");
private final ResourceClusters gateway;
private Optional<LogEntry> onRequestCompletion(HttpRequest request, HttpResponse response) {
log.debug("ResourceClustersLeaderExclusiveRoute: {} {}", request, response);
return Optional.empty();
}
private Optional<LogEntry> onRequestRejection(HttpRequest request, List<Rejection> rejections) {
return Optional.empty();
}
@Override
protected Route constructRoutes() {
return pathPrefix(
RESOURCECLUSTERS_API_PREFIX,
() -> logRequestResultOptional(this::onRequestCompletion, this::onRequestRejection, () -> concat(
// /{}/actions/registerTaskExecutor
path(
PathMatchers.segment().slash("actions").slash("registerTaskExecutor"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
// POST
post(() -> registerTaskExecutor(getClusterID(clusterName)))
))
),
// /{}/actions/heartBeatFromTaskExecutor
path(
PathMatchers.segment().slash("actions").slash("heartBeatFromTaskExecutor"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
// POST
post(() -> heartbeatFromTaskExecutor(getClusterID(clusterName)))
))
),
// /{}/actions/notifyTaskExecutorStatusChange
path(
PathMatchers.segment().slash("actions").slash("notifyTaskExecutorStatusChange"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
// POST
post(() -> notifyTaskExecutorStatusChange(getClusterID(clusterName)))
))
),
// /{}/actions/disconnectTaskExecutor
path(
PathMatchers.segment().slash("actions").slash("disconnectTaskExecutor"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
// POST
post(() -> disconnectTaskExecutor(getClusterID(clusterName)))
))
)
)));
}
private Route registerTaskExecutor(ClusterID clusterID) {
return entity(Jackson.unmarshaller(TaskExecutorRegistration.class), request -> {
log.info(
"POST /api/v1/resourceClusters/{}/actions/registerTaskExecutor called {}",
clusterID,
request);
return withFuture(gateway.getClusterFor(clusterID).registerTaskExecutor(request));
});
}
private Route heartbeatFromTaskExecutor(ClusterID clusterID) {
return entity(Jackson.unmarshaller(TaskExecutorHeartbeat.class), request -> {
log.debug(
"POST /api/v1/resourceClusters/{}/actions/heartbeatFromTaskExecutor called {}",
clusterID.getResourceID(),
request);
return withFuture(gateway.getClusterFor(clusterID).heartBeatFromTaskExecutor(request));
});
}
private Route disconnectTaskExecutor(ClusterID clusterID) {
return entity(Jackson.unmarshaller(TaskExecutorDisconnection.class), request -> {
log.info(
"POST /api/v1/resourceClusters/{}/actions/disconnectTaskExecutor called {}",
clusterID.getResourceID(),
request);
return withFuture(gateway.getClusterFor(clusterID).disconnectTaskExecutor(request));
});
}
private Route notifyTaskExecutorStatusChange(ClusterID clusterID) {
return entity(Jackson.unmarshaller(TaskExecutorStatusChange.class), request -> {
log.info(
"POST /api/v1/resourceClusters/{}/actions/notifyTaskExecutorStatusChange called {}",
clusterID.getResourceID(),
request);
return withFuture(
gateway.getClusterFor(clusterID).notifyTaskExecutorStatusChange(request));
});
}
private ClusterID getClusterID(String clusterName) {
return ClusterID.of(clusterName);
}
}
| 8,136 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/v1/BaseRoute.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v1;
import akka.actor.ActorSystem;
import akka.http.caching.LfuCache;
import akka.http.caching.javadsl.Cache;
import akka.http.caching.javadsl.CachingSettings;
import akka.http.caching.javadsl.LfuCacheSettings;
import akka.http.javadsl.model.ContentTypes;
import akka.http.javadsl.model.HttpEntities;
import akka.http.javadsl.model.HttpHeader;
import akka.http.javadsl.model.HttpMethods;
import akka.http.javadsl.model.HttpRequest;
import akka.http.javadsl.model.HttpResponse;
import akka.http.javadsl.model.StatusCodes;
import akka.http.javadsl.model.Uri;
import akka.http.javadsl.server.AllDirectives;
import akka.http.javadsl.server.ExceptionHandler;
import akka.http.javadsl.server.RequestContext;
import akka.http.javadsl.server.Route;
import akka.http.javadsl.server.RouteResult;
import akka.http.javadsl.server.directives.RouteAdapter;
import akka.japi.JavaPartialFunction;
import akka.japi.pf.PFBuilder;
import akka.pattern.AskTimeoutException;
import com.netflix.spectator.api.BasicTag;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.master.api.akka.route.MasterApiMetrics;
import io.mantisrx.master.jobcluster.proto.BaseResponse;
import io.mantisrx.server.master.resourcecluster.RequestThrottledException;
import io.mantisrx.server.master.resourcecluster.ResourceCluster.TaskExecutorNotFoundException;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.node.JsonNodeFactory;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.node.ObjectNode;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ser.FilterProvider;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
import io.mantisrx.shaded.com.google.common.base.Strings;
import io.mantisrx.shaded.com.google.common.collect.Sets;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Arrays;
import java.util.Set;
import java.util.StringTokenizer;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.duration.Duration;
abstract class BaseRoute extends AllDirectives {
private static final Logger logger = LoggerFactory.getLogger(BaseRoute.class);
public static final String TOPLEVEL_FILTER = "topLevelFilter";
public static final String JOBMETADATA_FILTER = "jobMetadata";
public static final String STAGEMETADATA_FILTER = "stageMetadataList";
public static final String WORKERMETADATA_FILTER = "workerMetadataList";
private static final HttpHeader ACCESS_CONTROL_ALLOW_ORIGIN_HEADER =
HttpHeader.parse("Access-Control-Allow-Origin", "*");
private static final Iterable<HttpHeader> DEFAULT_RESPONSE_HEADERS =
Arrays.asList(ACCESS_CONTROL_ALLOW_ORIGIN_HEADER);
protected final JavaPartialFunction<RequestContext, Uri> getRequestUriKeyer = new JavaPartialFunction<RequestContext, Uri>() {
public Uri apply(RequestContext in, boolean isCheck) {
final HttpRequest request = in.getRequest();
final boolean isGet = request.method() == HttpMethods.GET;
if (isGet) {
return request.getUri();
} else {
throw noMatch();
}
}
};
private String hostName;
BaseRoute() {
try {
this.hostName = InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException ex) {
this.hostName = "unknown";
}
}
protected Cache<Uri, RouteResult> createCache(ActorSystem actorSystem, int initialCapacity, int maxCapacity, int ttlMillis) {
final CachingSettings defaultCachingSettings = CachingSettings.create(actorSystem);
final LfuCacheSettings lfuCacheSettings = defaultCachingSettings.lfuCacheSettings()
.withInitialCapacity(initialCapacity)
.withMaxCapacity(maxCapacity)
.withTimeToLive(Duration.create(ttlMillis, TimeUnit.MILLISECONDS));
final CachingSettings cachingSettings = defaultCachingSettings.withLfuCacheSettings(lfuCacheSettings);
return LfuCache.create(cachingSettings);
}
protected abstract Route constructRoutes();
public Route createRoute(Function<Route, Route> routeFilter) {
final ExceptionHandler jsonExceptionHandler = ExceptionHandler
.newBuilder()
.match(
Exception.class,
x -> {
logger.error("got exception", x);
return complete(
StatusCodes.INTERNAL_SERVER_ERROR,
generateFailureResponsePayload(
"caught exception: " + x.toString(),
-1)
);
})
.build();
return respondWithHeaders(
DEFAULT_RESPONSE_HEADERS,
() -> handleExceptions(
jsonExceptionHandler,
() -> routeFilter.apply(this.constructRoutes())));
}
HttpResponse toDefaultHttpResponse(final BaseResponse r) {
switch (r.responseCode) {
case SUCCESS:
return HttpResponse.create()
.withEntity(ContentTypes.APPLICATION_JSON, r.message)
.withStatus(StatusCodes.OK);
case SUCCESS_CREATED:
return HttpResponse.create()
.withEntity(ContentTypes.APPLICATION_JSON, r.message)
.withStatus(StatusCodes.CREATED);
case CLIENT_ERROR:
return HttpResponse.create()
.withEntity(
ContentTypes.APPLICATION_JSON,
generateFailureResponsePayload(r.message, r.requestId))
.withStatus(StatusCodes.BAD_REQUEST);
case CLIENT_ERROR_NOT_FOUND:
return HttpResponse.create()
.withEntity(
ContentTypes.APPLICATION_JSON,
generateFailureResponsePayload(r.message, r.requestId))
.withStatus(StatusCodes.NOT_FOUND);
case CLIENT_ERROR_CONFLICT:
return HttpResponse.create()
.withEntity(
ContentTypes.APPLICATION_JSON,
generateFailureResponsePayload(r.message, r.requestId))
.withStatus(StatusCodes.CONFLICT);
case OPERATION_NOT_ALLOWED:
return HttpResponse.create()
.withEntity(
ContentTypes.APPLICATION_JSON,
generateFailureResponsePayload(r.message, r.requestId))
.withStatus(StatusCodes.METHOD_NOT_ALLOWED);
case SERVER_ERROR:
default:
return HttpResponse.create()
.withEntity(
ContentTypes.APPLICATION_JSON,
generateFailureResponsePayload(r.message, r.requestId))
.withStatus(StatusCodes.INTERNAL_SERVER_ERROR);
}
}
<T extends BaseResponse> RouteAdapter completeAsync(
final CompletionStage<T> stage,
final Function<T, RouteAdapter> successTransform,
String endpointName,
HttpRequestMetrics.HttpVerb verb) {
return completeAsync(
stage,
successTransform,
r -> {
HttpResponse response = toDefaultHttpResponse(r);
return complete(
response.status(),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
generateFailureResponsePayload(
r.message,
r.requestId))
);
},
endpointName,
verb);
}
<T extends BaseResponse> RouteAdapter completeAsync(
final CompletionStage<T> stage,
final Function<T, RouteAdapter> successTransform,
final Function<T, RouteAdapter> clientFailureTransform,
String endpointName,
HttpRequestMetrics.HttpVerb verb) {
return onComplete(
stage,
resp -> resp
.map(r -> {
HttpRequestMetrics.getInstance()
.incrementEndpointMetrics(
endpointName,
new BasicTag("verb", verb.toString()),
new BasicTag(
"responseCode",
String.valueOf(r.responseCode.getValue())));
switch (r.responseCode) {
case SUCCESS:
case SUCCESS_CREATED:
MasterApiMetrics.getInstance().incrementResp2xx();
return successTransform.apply(r);
case CLIENT_ERROR:
case CLIENT_ERROR_CONFLICT:
case CLIENT_ERROR_NOT_FOUND:
case OPERATION_NOT_ALLOWED:
MasterApiMetrics.getInstance().incrementResp4xx();
return clientFailureTransform.apply(r);
case SERVER_ERROR:
default:
MasterApiMetrics.getInstance().incrementResp5xx();
logger.error("completeAsync default response code error: {}", r.message);
return complete(StatusCodes.INTERNAL_SERVER_ERROR, r.message);
}
})
.recover(
new PFBuilder<Throwable, Route>()
.match(AskTimeoutException.class, te -> {
MasterApiMetrics.getInstance()
.incrementAskTimeOutCount();
MasterApiMetrics.getInstance().incrementResp5xx();
return complete(
StatusCodes.INTERNAL_SERVER_ERROR,
generateFailureResponsePayload(
te.toString(),
-1));
})
.matchAny(ex -> {
MasterApiMetrics.getInstance().incrementResp5xx();
logger.error("completeAsync matchAny ex: ", ex);
return complete(
StatusCodes.INTERNAL_SERVER_ERROR,
generateFailureResponsePayload(
ex.toString(),
-1));
})
.build()).get());
}
protected String generateFailureResponsePayload(String errorMsg, long requestId) {
ObjectNode node = JsonNodeFactory.instance.objectNode();
node.put("time", System.currentTimeMillis());
node.put("host", this.hostName);
node.put("error", errorMsg);
node.put("requestId", requestId);
return node.toString();
}
FilterProvider parseFilter(String fields, String target) {
if (Strings.isNullOrEmpty(fields)) {
return null;
}
if (Strings.isNullOrEmpty(target)) {
target = TOPLEVEL_FILTER;
}
Set<String> filtersSet = Sets.newHashSet();
StringTokenizer st = new StringTokenizer(fields, ",");
while (st.hasMoreTokens()) {
filtersSet.add(st.nextToken().trim());
}
return new SimpleFilterProvider()
.addFilter(TOPLEVEL_FILTER, TOPLEVEL_FILTER.equalsIgnoreCase(target) ? SimpleBeanPropertyFilter.filterOutAllExcept(filtersSet)
: SimpleBeanPropertyFilter.filterOutAllExcept(target))
.addFilter(JOBMETADATA_FILTER, JOBMETADATA_FILTER.equalsIgnoreCase(target) ? SimpleBeanPropertyFilter.filterOutAllExcept(filtersSet)
: SimpleBeanPropertyFilter.serializeAll())
.addFilter(STAGEMETADATA_FILTER, STAGEMETADATA_FILTER.equalsIgnoreCase(target) ? SimpleBeanPropertyFilter.filterOutAllExcept(filtersSet)
: SimpleBeanPropertyFilter.serializeAll())
.addFilter(WORKERMETADATA_FILTER, WORKERMETADATA_FILTER.equalsIgnoreCase(target) ? SimpleBeanPropertyFilter.filterOutAllExcept(filtersSet)
: SimpleBeanPropertyFilter.serializeAll());
}
Integer parseInteger(String val) {
if (Strings.isNullOrEmpty(val)) {
return null;
} else {
return Integer.valueOf(val);
}
}
Boolean parseBoolean(String val) {
if (Strings.isNullOrEmpty(val)) {
return null;
} else {
return Boolean.valueOf(val);
}
}
protected <T> Route withFuture(CompletableFuture<T> tFuture) {
return onComplete(tFuture,
t -> t.fold(
throwable -> {
if (throwable instanceof TaskExecutorNotFoundException) {
MasterApiMetrics.getInstance().incrementResp4xx();
return complete(StatusCodes.NOT_FOUND);
}
if (throwable instanceof RequestThrottledException) {
MasterApiMetrics.getInstance().incrementResp4xx();
MasterApiMetrics.getInstance().incrementThrottledRequestCount();
return complete(StatusCodes.TOO_MANY_REQUESTS);
}
if (throwable instanceof AskTimeoutException) {
MasterApiMetrics.getInstance().incrementAskTimeOutCount();
}
MasterApiMetrics.getInstance().incrementResp5xx();
logger.error("withFuture error: ", throwable);
return complete(StatusCodes.INTERNAL_SERVER_ERROR, throwable, Jackson.marshaller());
},
r -> complete(StatusCodes.OK, r, Jackson.marshaller())));
}
}
| 8,137 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/v1/LastSubmittedJobIdStreamRoute.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v1;
import static akka.http.javadsl.server.PathMatchers.segment;
import akka.NotUsed;
import akka.http.javadsl.marshalling.sse.EventStreamMarshalling;
import akka.http.javadsl.model.StatusCodes;
import akka.http.javadsl.model.sse.ServerSentEvent;
import akka.http.javadsl.server.PathMatcher0;
import akka.http.javadsl.server.PathMatchers;
import akka.http.javadsl.server.Route;
import akka.http.javadsl.unmarshalling.StringUnmarshallers;
import akka.stream.javadsl.Source;
import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandler;
import io.mantisrx.master.api.akka.route.proto.JobClusterInfo;
import io.mantisrx.master.api.akka.route.proto.JobDiscoveryRouteProto;
import io.mantisrx.master.api.akka.route.utils.StreamingUtils;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import java.util.Objects;
import java.util.Optional;
import java.util.concurrent.CompletionStage;
import java.util.function.Function;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.RxReactiveStreams;
/***
* LastSubmittedJobIdStreamRoute
* Defines the following end points:
* /api/v1/lastSubmittedJobIdStream/{clusterName} (GET)
*/
public class LastSubmittedJobIdStreamRoute extends BaseRoute {
private static final Logger logger = LoggerFactory.getLogger(LastSubmittedJobIdStreamRoute.class);
private final JobDiscoveryRouteHandler jobDiscoveryRouteHandler;
private static final PathMatcher0 JOBDISCOVERY_API_PREFIX = segment("api").slash("v1");
public LastSubmittedJobIdStreamRoute(final JobDiscoveryRouteHandler jobDiscoveryRouteHandler) {
this.jobDiscoveryRouteHandler = jobDiscoveryRouteHandler;
}
@Override
protected Route constructRoutes() {
return pathPrefix(
JOBDISCOVERY_API_PREFIX,
() -> concat(
path(
segment("lastSubmittedJobIdStream").slash(PathMatchers.segment()),
(clusterName) -> pathEndOrSingleSlash(
() -> get(() -> getLastSubmittedJobIdStreamRoute(clusterName)))
)
)
);
}
@Override
public Route createRoute(Function<Route, Route> routeFilter) {
logger.info("creating /api/v1/jobDiscoveryStream routes");
return super.createRoute(routeFilter);
}
private Route getLastSubmittedJobIdStreamRoute(String clusterName) {
return parameterOptional(StringUnmarshallers.BOOLEAN, ParamName.SEND_HEARTBEAT,
(sendHeartbeats) -> {
logger.info("GET /api/v1/lastSubmittedJobIdStream/{} called", clusterName);
CompletionStage<JobDiscoveryRouteProto.JobClusterInfoResponse> jobClusterInfoRespCS =
jobDiscoveryRouteHandler.lastSubmittedJobIdStream(
new JobClusterManagerProto.GetLastSubmittedJobIdStreamRequest(
clusterName),
sendHeartbeats.orElse(false));
return completeAsync(
jobClusterInfoRespCS,
resp -> {
Optional<Observable<JobClusterInfo>> jobClusterInfoO = resp.getJobClusterInfoObs();
if (jobClusterInfoO.isPresent()) {
Observable<JobClusterInfo> jciStream = jobClusterInfoO.get();
Source<ServerSentEvent, NotUsed> source = Source
.fromPublisher(RxReactiveStreams.toPublisher(jciStream))
.map(j -> StreamingUtils.from(j).orElse(null))
.filter(Objects::nonNull);
return completeOK(
source,
EventStreamMarshalling.toEventStream());
} else {
logger.warn(
"Failed to get last submitted jobId stream for {}",
clusterName);
return complete(
StatusCodes.INTERNAL_SERVER_ERROR,
"Failed to get last submitted jobId stream for " +
clusterName);
}
},
HttpRequestMetrics.Endpoints.LAST_SUBMITTED_JOB_ID_STREAM,
HttpRequestMetrics.HttpVerb.GET);
});
}
}
| 8,138 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/v1/JobClustersRoute.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v1;
import static akka.http.javadsl.server.PathMatchers.segment;
import static akka.http.javadsl.server.directives.CachingDirectives.alwaysCache;
import static akka.http.javadsl.server.directives.CachingDirectives.cache;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.*;
import akka.actor.ActorSystem;
import akka.http.caching.javadsl.Cache;
import akka.http.javadsl.model.HttpResponse;
import akka.http.javadsl.model.StatusCodes;
import akka.http.javadsl.model.Uri;
import akka.http.javadsl.server.PathMatcher0;
import akka.http.javadsl.server.PathMatchers;
import akka.http.javadsl.server.Route;
import akka.http.javadsl.server.RouteResult;
import akka.http.javadsl.unmarshalling.StringUnmarshallers;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandler;
import io.mantisrx.master.api.akka.route.proto.JobClusterProtoAdapter;
import io.mantisrx.master.jobcluster.proto.BaseResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.runtime.NamedJobDefinition;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.config.MasterConfiguration;
import io.mantisrx.shaded.com.google.common.base.Strings;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.function.Function;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/***
* JobClustersRoute
* Defines the following end points:
* api/v1/jobsClusters (GET, POST)
* api/v1/jobClusters/{}/latestJobDiscoveryInfo (GET)
* api/v1/jobClusters/{} (GET, POST, PUT, DELETE)
* api/v1/jobClusters/{}/actions/updateArtifact (POST)
* api/v1/jobClusters/{}/actions/updateSla (POST)
* api/v1/jobClusters/{}/actions/updateMigrationStrategy (POST)
* api/v1/jobClusters/{}/actions/updateLabel (POST)
* api/v1/jobClusters/{}/actions/enableCluster (POST)
* api/v1/jobClusters/{}/actions/disableCluster (POST)
* api/v1/jobClusters/{}/actions/updateSchedulingInfo (POST)
*/
public class JobClustersRoute extends BaseRoute {
private static final Logger logger = LoggerFactory.getLogger(JobClustersRoute.class);
private static final PathMatcher0 JOBCLUSTERS_API_PREFIX =
segment("api").slash("v1").slash("jobClusters");
private final JobClusterRouteHandler jobClusterRouteHandler;
private final Cache<Uri, RouteResult> routeResultCache;
public JobClustersRoute(final JobClusterRouteHandler jobClusterRouteHandler,
final ActorSystem actorSystem) {
this.jobClusterRouteHandler = jobClusterRouteHandler;
MasterConfiguration config = ConfigurationProvider.getConfig();
this.routeResultCache = createCache(actorSystem, config.getApiCacheMinSize(), config.getApiCacheMaxSize(),
config.getApiCacheTtlMilliseconds());
}
public Route constructRoutes() {
return pathPrefix(
JOBCLUSTERS_API_PREFIX,
() -> concat(
// api/v1/jobClusters
pathEndOrSingleSlash(() -> concat(
// GET
get(this::getJobClustersRoute),
// POST
post(this::postJobClustersRoute))
),
// api/v1/jobClusters/{}
path(
PathMatchers.segment(),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
// GET
get(() -> getJobClusterInstanceRoute(clusterName)),
// PUT
put(() -> putJobClusterInstanceRoute(clusterName)),
// DELETE
delete(() -> deleteJobClusterInstanceRoute(clusterName)))
)
),
// api/v1/jobClusters/{}/latestJobDiscoveryInfo
path(
PathMatchers.segment().slash("latestJobDiscoveryInfo"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
// GET
get(() -> getLatestJobDiscoveryInfo(clusterName))
))
),
// api/v1/jobClusters/{}/actions/updateArtifact
path(
PathMatchers.segment().slash("actions").slash("updateArtifact"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
// POST
post(() -> updateClusterArtifactRoute(clusterName))
))
),
// api/v1/jobClusters/{}/actions/updateSchedulingInfo
path(
PathMatchers.segment().slash("actions").slash("updateSchedulingInfo"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
// POST
post(() -> updateClusterSchedulingInfo(clusterName))
))
),
// api/v1/jobClusters/{}/actions/updateSla
pathPrefix(
PathMatchers.segment().slash("actions").slash("updateSla"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
// POST
post(() -> updateClusterSlaRoute(clusterName))
))
),
// api/v1/jobClusters/{}/actions/updateMigrationStrategy
pathPrefix(
PathMatchers.segment()
.slash("actions")
.slash("updateMigrationStrategy"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
// POST
post(() -> updateMigrationStrategyRoute(clusterName))
))
),
// api/v1/jobClusters/{}/actions/updateLabel
pathPrefix(
PathMatchers.segment().slash("actions").slash("updateLabel"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
// POST
post(() -> updateJobClusterLabelRoute(clusterName))
))
),
// api/v1/jobClusters/{}/actions/enableCluster
pathPrefix(
PathMatchers.segment().slash("actions").slash("enableCluster"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
// POST
post(() -> updateJobClusterStateEnableRoute(clusterName))
))
),
// api/v1/jobClusters/{}/actions/disableCluster
pathPrefix(
PathMatchers.segment().slash("actions").slash("disableCluster"),
(clusterName) -> pathEndOrSingleSlash(() -> concat(
// POST
post(() -> updateJobClusterStateDisableRoute(clusterName))
))
)
)
);
}
@Override
public Route createRoute(Function<Route, Route> routeFilter) {
logger.info("creating /api/v1/jobClusters routes");
return super.createRoute(routeFilter);
}
private Route getJobClustersRoute() {
logger.trace("GET /api/v1/jobClusters called");
return parameterMap(param ->
alwaysCache(routeResultCache, getRequestUriKeyer, () -> extractUri(
uri -> {
logger.debug("GET all job clusters");
return completeAsync(
jobClusterRouteHandler.getAllJobClusters(
new ListJobClustersRequest()),
resp -> completeOK(
resp.getJobClusters(
param.getOrDefault(
ParamName.JOBCLUSTER_FILTER_MATCH,
null),
this.parseInteger(param.getOrDefault(
ParamName.PAGINATION_LIMIT,
null)),
this.parseInteger(param.getOrDefault(
ParamName.PAGINATION_OFFSET,
null)),
param.getOrDefault(ParamName.SORT_BY, null),
this.parseBoolean(param.getOrDefault(
ParamName.SORT_ASCENDING,
null)),
uri),
Jackson.marshaller(super.parseFilter(
param.getOrDefault(ParamName.PROJECTION_FIELDS, null),
null))),
HttpRequestMetrics.Endpoints.JOB_CLUSTERS,
HttpRequestMetrics.HttpVerb.GET);
})));
}
private Route postJobClustersRoute() {
return entity(Jackson.unmarshaller(NamedJobDefinition.class), jobClusterDefn -> {
logger.info("POST /api/v1/jobClusters called {}", jobClusterDefn);
final CreateJobClusterRequest createJobClusterRequest =
JobClusterProtoAdapter.toCreateJobClusterRequest(jobClusterDefn);
// sequentially chaining the createJobClusterRequest and getJobClusterRequest
// when previous is successful
final CompletionStage<GetJobClusterResponse> response =
jobClusterRouteHandler
.create(createJobClusterRequest)
.thenCompose(t -> {
if (t.responseCode.getValue() >= 200 &&
t.responseCode.getValue() < 300) {
final GetJobClusterRequest request = new GetJobClusterRequest(
t.getJobClusterName());
return jobClusterRouteHandler.getJobClusterDetails(request);
} else {
CompletableFuture<GetJobClusterResponse> responseCompletableFuture = new CompletableFuture<>();
responseCompletableFuture.complete(
new JobClusterManagerProto.GetJobClusterResponse(
t.requestId,
t.responseCode,
t.message,
Optional.empty()));
return responseCompletableFuture;
}
});
return completeAsync(
response,
resp -> complete(
StatusCodes.CREATED,
resp.getJobCluster(),
Jackson.marshaller()),
HttpRequestMetrics.Endpoints.JOB_CLUSTERS,
HttpRequestMetrics.HttpVerb.POST
);
});
}
private Route getLatestJobDiscoveryInfo(String clusterName) {
logger.trace("GET /api/v1/jobClusters/{}/latestJobDiscoveryInfo called", clusterName);
return parameterOptional(StringUnmarshallers.STRING, ParamName.PROJECTION_FIELDS, (fields) ->
cache(routeResultCache, getRequestUriKeyer, () ->
extractUri(uri -> {
logger.debug("GET latest job discovery info for {}", clusterName);
return completeAsync(
jobClusterRouteHandler.getLatestJobDiscoveryInfo(new GetLatestJobDiscoveryInfoRequest(clusterName)),
resp -> {
HttpResponse httpResponse = this.toDefaultHttpResponse(resp);
return complete(
httpResponse.status(),
resp.getDiscoveryInfo().orElse(null),
Jackson.marshaller(super.parseFilter(fields.orElse(null),
null)));
},
HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE_LATEST_JOB_DISCOVERY_INFO,
HttpRequestMetrics.HttpVerb.GET);
})));
}
private Route getJobClusterInstanceRoute(String clusterName) {
logger.info("GET /api/v1/jobClusters/{} called", clusterName);
return parameterOptional(StringUnmarshallers.STRING, ParamName.PROJECTION_FIELDS, (fields) ->
completeAsync(
jobClusterRouteHandler.getJobClusterDetails(new GetJobClusterRequest(
clusterName)),
resp -> {
HttpResponse httpResponse = this.toDefaultHttpResponse(resp);
return complete(
httpResponse.status(),
resp.getJobCluster(),
Jackson.marshaller(super.parseFilter(fields.orElse(null),
null)));
},
HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE,
HttpRequestMetrics.HttpVerb.GET));
}
private Route putJobClusterInstanceRoute(String clusterName) {
return entity(Jackson.unmarshaller(NamedJobDefinition.class), jobClusterDefn -> {
logger.info("PUT /api/v1/jobClusters/{} called {}", clusterName, jobClusterDefn);
final UpdateJobClusterRequest request = JobClusterProtoAdapter
.toUpdateJobClusterRequest(jobClusterDefn);
CompletionStage<UpdateJobClusterResponse> updateResponse;
if (jobClusterDefn.getJobDefinition() == null) {
// if request payload is invalid
CompletableFuture<UpdateJobClusterResponse> resp = new CompletableFuture<>();
resp.complete(
new UpdateJobClusterResponse(
request.requestId,
BaseResponse.ResponseCode.CLIENT_ERROR,
"Invalid request payload."));
updateResponse = resp;
} else if (!clusterName.equals(jobClusterDefn.getJobDefinition().getName())) {
// if cluster name specified in request payload does not match with what specified in
// the endpoint path segment
CompletableFuture<UpdateJobClusterResponse> resp = new CompletableFuture<>();
resp.complete(
new UpdateJobClusterResponse(
request.requestId,
BaseResponse.ResponseCode.CLIENT_ERROR,
String.format(
"Cluster name specified in request payload %s " +
"does not match with what specified in resource path %s",
jobClusterDefn.getJobDefinition().getName(),
clusterName)));
updateResponse = resp;
} else {
// everything look ok so far, process the request!
updateResponse = jobClusterRouteHandler.update(
JobClusterProtoAdapter.toUpdateJobClusterRequest(jobClusterDefn));
}
CompletionStage<GetJobClusterResponse> response = updateResponse
.thenCompose(t -> {
if (t.responseCode.getValue() >= 200 &&
t.responseCode.getValue() < 300) {
return jobClusterRouteHandler.getJobClusterDetails(
new GetJobClusterRequest(clusterName));
} else {
CompletableFuture<GetJobClusterResponse> responseCompletableFuture = new CompletableFuture<>();
responseCompletableFuture.complete(
new JobClusterManagerProto.GetJobClusterResponse(
t.requestId,
t.responseCode,
t.message,
Optional.empty()));
return responseCompletableFuture;
}
});
return completeAsync(
response,
resp -> {
HttpResponse httpResponse = this.toDefaultHttpResponse(resp);
return complete(
httpResponse.status(),
resp.getJobCluster(),
Jackson.marshaller());
},
HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE,
HttpRequestMetrics.HttpVerb.PUT);
});
}
private Route deleteJobClusterInstanceRoute(String clusterName) {
return parameterOptional("user", user -> {
logger.info("DELETE /api/v1/jobClusters/{} called", clusterName);
String userStr = user.orElse(null);
if (Strings.isNullOrEmpty(userStr)) {
return complete(StatusCodes.BAD_REQUEST, "Missing required parameter 'user'");
} else {
return completeAsync(
jobClusterRouteHandler.delete(new DeleteJobClusterRequest(userStr, clusterName)),
resp -> complete(StatusCodes.ACCEPTED, ""),
HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE,
HttpRequestMetrics.HttpVerb.DELETE
);
}
});
}
private Route updateClusterArtifactRoute(String clusterName) {
return entity(Jackson.unmarshaller(UpdateJobClusterArtifactRequest.class), request -> {
logger.info(
"POST /api/v1/jobClusters/{}/actions/updateArtifact called {}",
clusterName,
request);
CompletionStage<UpdateJobClusterArtifactResponse> updateResponse;
if (!clusterName.equals(request.getClusterName())) {
// if cluster name specified in request payload does not match with what specified in
// the endpoint path segment
CompletableFuture<UpdateJobClusterArtifactResponse> resp = new CompletableFuture<>();
resp.complete(
new UpdateJobClusterArtifactResponse(
request.requestId,
BaseResponse.ResponseCode.CLIENT_ERROR,
String.format(
"Cluster name specified in request payload %s " +
"does not match with what specified in resource path %s",
request.getClusterName(),
clusterName)));
updateResponse = resp;
} else {
// everything look ok so far, process the request!
updateResponse = jobClusterRouteHandler.updateArtifact(request);
}
return completeAsync(
updateResponse,
resp -> complete(StatusCodes.NO_CONTENT, ""),
HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE_ACTION_UPDATE_ARTIFACT,
HttpRequestMetrics.HttpVerb.POST
);
});
}
private Route updateClusterSchedulingInfo(String clusterName) {
return entity(Jackson.unmarshaller(UpdateSchedulingInfoRequest.class), request -> {
logger.info(
"POST /api/v1/jobClusters/{}/actions/updateSchedulingInfo called {}",
clusterName,
request);
CompletionStage<UpdateSchedulingInfoResponse> updateResponse =
jobClusterRouteHandler.updateSchedulingInfo(clusterName, request);
return completeAsync(
updateResponse,
resp -> complete(StatusCodes.NO_CONTENT, ""),
HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE_ACTION_UPDATE_ARTIFACT,
HttpRequestMetrics.HttpVerb.POST
);
});
}
private Route updateClusterSlaRoute(String clusterName) {
return entity(Jackson.unmarshaller(UpdateJobClusterSLARequest.class), request -> {
logger.info(
"POST /api/v1/jobClusters/{}/actions/updateSla called {}",
clusterName,
request);
CompletionStage<UpdateJobClusterSLAResponse> updateResponse;
if (!clusterName.equals(request.getClusterName())) {
// if cluster name specified in request payload does not match with what specified in
// the endpoint path segment
CompletableFuture<UpdateJobClusterSLAResponse> resp = new CompletableFuture<>();
resp.complete(
new UpdateJobClusterSLAResponse(
request.requestId,
BaseResponse.ResponseCode.CLIENT_ERROR,
String.format(
"Cluster name specified in request payload %s " +
"does not match with what specified in resource path %s",
request.getClusterName(),
clusterName)));
updateResponse = resp;
} else {
// everything look ok so far, process the request!
updateResponse = jobClusterRouteHandler.updateSLA(request);
}
return completeAsync(
updateResponse,
resp -> complete(StatusCodes.NO_CONTENT, ""),
HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE_ACTION_UPDATE_SLA,
HttpRequestMetrics.HttpVerb.POST
);
});
}
private Route updateMigrationStrategyRoute(String clusterName) {
return entity(
Jackson.unmarshaller(UpdateJobClusterWorkerMigrationStrategyRequest.class),
request -> {
logger.info(
"POST /api/v1/jobClusters/{}/actions/updateMigrationStrategy called {}",
clusterName,
request);
CompletionStage<UpdateJobClusterWorkerMigrationStrategyResponse> updateResponse;
if (!clusterName.equals(request.getClusterName())) {
// if cluster name specified in request payload does not match with what specified in
// the endpoint path segment
CompletableFuture<UpdateJobClusterWorkerMigrationStrategyResponse> resp = new CompletableFuture<>();
resp.complete(
new UpdateJobClusterWorkerMigrationStrategyResponse(
request.requestId,
BaseResponse.ResponseCode.CLIENT_ERROR,
String.format(
"Cluster name specified in request payload %s " +
"does not match with what specified in resource path %s",
request.getClusterName(),
clusterName)));
updateResponse = resp;
} else {
// everything look ok so far, process the request!
updateResponse = jobClusterRouteHandler.updateWorkerMigrateStrategy(request);
}
return completeAsync(
updateResponse,
resp -> complete(StatusCodes.NO_CONTENT, ""),
HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE_ACTION_UPDATE_MIGRATION_STRATEGY,
HttpRequestMetrics.HttpVerb.POST
);
});
}
private Route updateJobClusterLabelRoute(String clusterName) {
return entity(Jackson.unmarshaller(UpdateJobClusterLabelsRequest.class), request -> {
logger.info(
"POST /api/v1/jobClusters/{}/actions/updateLabel called {}",
clusterName,
request);
CompletionStage<UpdateJobClusterLabelsResponse> updateResponse;
if (!clusterName.equals(request.getClusterName())) {
// if cluster name specified in request payload does not match with what specified in
// the endpoint path segment
CompletableFuture<UpdateJobClusterLabelsResponse> resp = new CompletableFuture<>();
resp.complete(
new UpdateJobClusterLabelsResponse(
request.requestId,
BaseResponse.ResponseCode.CLIENT_ERROR,
String.format(
"Cluster name specified in request payload %s " +
"does not match with what specified in resource path %s",
request.getClusterName(),
clusterName)));
updateResponse = resp;
} else {
// everything look ok so far, process the request!
updateResponse = jobClusterRouteHandler.updateLabels(request);
}
return completeAsync(
updateResponse,
resp -> complete(StatusCodes.NO_CONTENT, ""),
HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE_ACTION_UPDATE_LABEL,
HttpRequestMetrics.HttpVerb.POST
);
});
}
private Route updateJobClusterStateEnableRoute(String clusterName) {
return entity(Jackson.unmarshaller(EnableJobClusterRequest.class), request -> {
logger.info(
"POST /api/v1/jobClusters/{}/actions/enableCluster called {}",
clusterName,
request);
CompletionStage<EnableJobClusterResponse> updateResponse;
if (!clusterName.equals(request.getClusterName())) {
// if cluster name specified in request payload does not match with what specified in
// the endpoint path segment
CompletableFuture<EnableJobClusterResponse> resp = new CompletableFuture<>();
resp.complete(
new EnableJobClusterResponse(
request.requestId,
BaseResponse.ResponseCode.CLIENT_ERROR,
String.format(
"Cluster name specified in request payload %s " +
"does not match with what specified in resource path %s",
request.getClusterName(),
clusterName)));
updateResponse = resp;
} else {
// everything look ok so far, process the request!
updateResponse = jobClusterRouteHandler.enable(request);
}
return completeAsync(
updateResponse,
resp -> complete(StatusCodes.NO_CONTENT, ""),
HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE_ACTION_ENABLE_CLUSTER,
HttpRequestMetrics.HttpVerb.POST
);
});
}
private Route updateJobClusterStateDisableRoute(String clusterName) {
return entity(Jackson.unmarshaller(DisableJobClusterRequest.class), request -> {
logger.info(
"POST /api/v1/jobClusters/{}/actions/disableCluster called {}",
clusterName,
request);
CompletionStage<DisableJobClusterResponse> updateResponse;
if (!clusterName.equals(request.getClusterName())) {
// if cluster name specified in request payload does not match with what specified in
// the endpoint path segment
CompletableFuture<DisableJobClusterResponse> resp = new CompletableFuture<>();
resp.complete(
new DisableJobClusterResponse(
request.requestId,
BaseResponse.ResponseCode.CLIENT_ERROR,
String.format(
"Cluster name specified in request payload %s " +
"does not match with what specified in resource path %s",
request.getClusterName(),
clusterName)));
updateResponse = resp;
} else {
// everything look ok so far, process the request!
updateResponse = jobClusterRouteHandler.disable(request);
}
return completeAsync(
updateResponse,
resp -> complete(StatusCodes.NO_CONTENT, ""),
HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE_ACTION_DISABLE_CLUSTER,
HttpRequestMetrics.HttpVerb.POST
);
});
}
}
| 8,139 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/v1/ParamName.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v1;
public class ParamName {
public static String PROJECTION_FIELDS = "fields";
public static String PROJECTION_TARGET = "fromObj";
public static String SORT_BY = "sortBy";
public static String SORT_ASCENDING = "ascending";
public static String PAGINATION_LIMIT = "pageSize";
public static String PAGINATION_OFFSET = "offset";
public static String JOB_COMPACT = "compact";
public static String JOB_FILTER_MATCH = "matching";
public static String JOBCLUSTER_FILTER_MATCH = "matching";
public static String REASON = "reason";
public static String USER = "user";
public static String SEND_HEARTBEAT = "sendHB";
public static String ARCHIVED = "archived";
public static String SERVER_FILTER_LIMIT = "limit";
}
| 8,140 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/v1/AgentClustersRoute.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v1;
import static akka.http.javadsl.server.PathMatchers.segment;
import akka.http.javadsl.model.StatusCodes;
import akka.http.javadsl.server.PathMatcher0;
import akka.http.javadsl.server.Route;
import com.netflix.spectator.api.BasicTag;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.master.vm.AgentClusterOperations;
import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference;
import java.io.IOException;
import java.util.List;
import java.util.function.Function;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/***
* Agent clusters route
* Defines the following end points:
* /api/v1/agentClusters (GET, POST)
* /api/v1/agentClusters/jobs (GET)
* /api/v1/agentClusters/autoScalePolicy (GET)
*/
public class AgentClustersRoute extends BaseRoute {
private static final Logger logger = LoggerFactory.getLogger(AgentClustersRoute.class);
private final AgentClusterOperations agentClusterOps;
public AgentClustersRoute(final AgentClusterOperations agentClusterOperations) {
this.agentClusterOps = agentClusterOperations;
}
private static final PathMatcher0 API_V1_AGENT_CLUSTER = segment("api").slash("v1")
.slash("agentClusters");
@Override
public Route createRoute(Function<Route, Route> routeFilter) {
logger.info("creating /api/v1/agentClusters");
return super.createRoute(routeFilter);
}
public Route constructRoutes() {
return concat(
pathPrefix(API_V1_AGENT_CLUSTER, () -> concat(
// api/v1/agentClusters
pathEndOrSingleSlash(() -> concat(
// GET - list all active agent clusters
get(this::getAgentClustersRoute),
// POST - activate/deactivate agent clusters
post(this::postAgentClustersRoute)
)),
// api/v1/agentClusters/jobs
path(
"jobs",
() -> pathEndOrSingleSlash(
// GET - retrieve job detail by job ID
() -> get(this::getAgentClustersJobsRoute)
)
),
// api/v1/agentClusters/autoScalePolicy
path(
"autoScalePolicy",
() -> pathEndOrSingleSlash(
// GET - retrieve job detail by job ID
() -> get(this::getAgentClustersAutoScalePolicyRoute)
))
)
)
);
}
private Route getAgentClustersRoute() {
logger.info("GET /api/v1/agentClusters called");
HttpRequestMetrics.getInstance().incrementEndpointMetrics(
HttpRequestMetrics.Endpoints.AGENT_CLUSTERS,
new BasicTag("verb", HttpRequestMetrics.HttpVerb.GET.toString()),
new BasicTag("responseCode", String.valueOf(StatusCodes.OK.intValue())));
return complete(
StatusCodes.OK,
agentClusterOps.getActiveVMsAttributeValues(),
Jackson.marshaller());
}
private Route postAgentClustersRoute() {
logger.info("POST /api/v1/agentClusters called");
return entity(
Jackson.unmarshaller(new TypeReference<List<String>>() {
}),
activeClustersList -> {
logger.info("POST {} called {}", API_V1_AGENT_CLUSTER, activeClustersList);
try {
agentClusterOps.setActiveVMsAttributeValues(activeClustersList);
} catch (IOException e) {
HttpRequestMetrics.getInstance().incrementEndpointMetrics(
HttpRequestMetrics.Endpoints.AGENT_CLUSTERS,
new BasicTag("verb", HttpRequestMetrics.HttpVerb.GET.toString()),
new BasicTag("responseCode", String.valueOf(StatusCodes.INTERNAL_SERVER_ERROR.intValue())));
return complete(
StatusCodes.INTERNAL_SERVER_ERROR,
"Failed to set active clusters to " +
activeClustersList.toString());
}
HttpRequestMetrics.getInstance().incrementEndpointMetrics(
HttpRequestMetrics.Endpoints.AGENT_CLUSTERS,
new BasicTag("verb", HttpRequestMetrics.HttpVerb.GET.toString()),
new BasicTag("responseCode", String.valueOf(StatusCodes.OK.intValue())));
return complete(StatusCodes.OK, "");
});
}
private Route getAgentClustersJobsRoute() {
logger.info("GET /api/v1/agentClusters/jobs called");
HttpRequestMetrics.getInstance().incrementEndpointMetrics(
HttpRequestMetrics.Endpoints.AGENT_CLUSTERS_JOBS,
new BasicTag("verb", HttpRequestMetrics.HttpVerb.GET.toString()),
new BasicTag("responseCode", String.valueOf(StatusCodes.OK.intValue())));
return complete(
StatusCodes.OK,
agentClusterOps.getJobsOnVMs(),
Jackson.marshaller());
}
private Route getAgentClustersAutoScalePolicyRoute() {
logger.info("GET /api/v1/agentClusters/autoScalePolicy called");
HttpRequestMetrics.getInstance().incrementEndpointMetrics(
HttpRequestMetrics.Endpoints.AGENT_CLUSTERS_AUTO_SCALE_POLICY,
new BasicTag("verb", HttpRequestMetrics.HttpVerb.GET.toString()),
new BasicTag("responseCode", String.valueOf(StatusCodes.OK.intValue())));
return complete(
StatusCodes.OK,
agentClusterOps.getAgentClusterAutoScaleRules(),
Jackson.marshaller());
}
}
| 8,141 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/pagination/ListObject.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.pagination;
import akka.http.javadsl.model.Uri;
import com.netflix.spectator.impl.Preconditions;
import io.mantisrx.master.api.akka.route.v1.ParamName;
import io.mantisrx.shaded.com.google.common.base.Strings;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import io.mantisrx.shaded.com.google.common.collect.Maps;
import java.beans.BeanInfo;
import java.beans.IntrospectionException;
import java.beans.Introspector;
import java.beans.MethodDescriptor;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/***
* Generic ListObject to support pagination, sorting
* @param <T>
*/
public class ListObject<T> {
private static final Logger logger = LoggerFactory.getLogger(ListObject.class);
public List<T> list;
public String prev;
public String next;
public int total;
ListObject(List<T> objects, int limit, int offset, Comparator<T> sorter, Uri uri) {
if (objects == null) {
list = Lists.newArrayList();
total = 0;
} else {
total = objects.size();
if (sorter != null) {
objects.sort(sorter);
}
int toIndex = (offset + limit) > (objects.size() - 1) ?
objects.size() :
offset + limit;
if (offset > toIndex) {
this.list = Lists.newArrayList();
} else {
this.list = objects.subList(offset, toIndex);
}
if (limit < Integer.MAX_VALUE && uri != null) {
if (offset == 0) {
prev = null;
} else {
int prevOffset = offset - limit >= 0 ? offset - limit : 0;
prev = generateNewUri(uri, prevOffset);
}
if ((offset + limit) >= objects.size()) {
next = null;
} else {
int nextOffset = offset + limit;
next = generateNewUri(uri, nextOffset);
}
}
}
}
public List<T> getList() {
return list;
}
public void setList(List<T> list) {
this.list = list;
}
public String getNext() {
return next;
}
public void setNext(String next) {
this.next = next;
}
public String getPrev() {
return prev;
}
public void setPrev(String prev) {
this.prev = prev;
}
private String generateNewUri(Uri originalUri, int offset) {
Map<String, String> queryMap = Maps.newLinkedHashMap(originalUri.query().toMap());
queryMap.put(ParamName.PAGINATION_OFFSET, String.valueOf(offset));
StringBuilder stringBuilder = new StringBuilder(originalUri.path());
String dividerChar = "?";
for (Map.Entry<String, String> entry : queryMap.entrySet()) {
stringBuilder.append(dividerChar);
stringBuilder.append(entry.getKey());
stringBuilder.append("=");
stringBuilder.append(entry.getValue());
dividerChar = "&";
}
return stringBuilder.toString();
}
public static class Builder<T> {
private List<T> objects = null;
private Class<T> targetType;
private int limit = Integer.MAX_VALUE;
private int offset = 0;
private String sortField = null;
private boolean sortAscending = true;
private Uri uri = null;
public Builder() {
}
public ListObject.Builder<T> withObjects(List<T> objects, Class<T> targetType) {
this.objects = objects;
this.targetType = targetType;
return this;
}
public ListObject.Builder<T> withLimit(int limit) {
this.limit = limit;
return this;
}
public ListObject.Builder<T> withOffset(int offset) {
this.offset = offset;
return this;
}
public ListObject.Builder<T> withSortField(String sortField) {
this.sortField = sortField;
return this;
}
public ListObject.Builder<T> withSortAscending(boolean isAscending) {
this.sortAscending = isAscending;
return this;
}
public ListObject.Builder<T> withUri(Uri uri) {
this.uri = uri;
return this;
}
public ListObject<T> build() {
Preconditions.checkNotNull(this.objects, "Objects cannot be null");
Preconditions.checkNotNull(this.targetType, "Target type cannot be null");
Preconditions.checkState(this.limit > 0, "limit needs to be greater than 0");
Preconditions.checkState(offset >= 0, "offset has to be equal or greater than 0.");
return new ListObject<>(
this.objects,
this.limit,
this.offset,
getSorter(),
this.uri);
}
private Comparator<T> getSorter() {
if (Strings.isNullOrEmpty(sortField)) {
return null;
}
// make sure specified field is valid for the given type
try {
Field field = targetType.getDeclaredField(sortField);
if (field == null) {
throw new RuntimeException(
String.format("Specified sort field is invalid. [%s]", sortField));
}
} catch (NoSuchFieldException ex) {
throw new RuntimeException(
String.format("Specified sort field is invalid. [%s]", sortField),
ex);
}
return (T t1, T t2) -> {
int result;
if (t1 == null && t2 == null) {
result = 0;
} else if (t1 == null) {
result = -1;
} else if (t2 == null) {
result = 1;
} else {
Comparable f1 = getComparableFromFieldName(sortField, t1, targetType);
Comparable f2 = getComparableFromFieldName(sortField, t2, targetType);
if (f1 != null) {
result = f1.compareTo(f2);
} else if (f2 != null) {
result = f2.compareTo(f1);
} else {
result = 0;
}
}
return sortAscending ? result : -result;
};
}
private static <T> Comparable getComparableFromFieldName(
String fieldName,
T val,
Class<T> targetType) {
try {
Field field = targetType.getDeclaredField(fieldName);
Object fieldValue = null;
try {
fieldValue = field.get(val);
} catch (IllegalAccessException ex) {
logger.warn(
"Unable to access field {}, trying Bean getter method instead...",
fieldName);
}
// field is private, try pojo/bean get method instead
if (fieldValue == null) {
BeanInfo info = Introspector.getBeanInfo(targetType);
MethodDescriptor[] methods = info.getMethodDescriptors();
if (methods == null) {
throw new RuntimeException("Cannot access sort field. " + fieldName);
}
for (MethodDescriptor methodDescriptor : methods) {
if (methodDescriptor.getName().equalsIgnoreCase("get" + fieldName)) {
fieldValue = methodDescriptor.getMethod().invoke(val);
break;
}
}
}
if (fieldValue == null) {
throw new RuntimeException("Cannot access sort field. " + fieldName);
}
if (!(fieldValue instanceof Comparable)) {
throw new RuntimeException(
String.format("Specified sort field is invalid. [%s]", fieldName));
}
return (Comparable) fieldValue;
} catch (NoSuchFieldException |
IllegalAccessException |
IntrospectionException |
InvocationTargetException ex) {
throw new RuntimeException(
String.format("Specified sort field is invalid. [%s]", fieldName),
ex);
}
}
}
}
| 8,142 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/v0/AgentClusterRoute.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v0;
import static akka.http.javadsl.server.PathMatchers.segment;
import static akka.http.javadsl.server.directives.CachingDirectives.alwaysCache;
import akka.actor.ActorSystem;
import akka.http.caching.javadsl.Cache;
import akka.http.javadsl.model.HttpHeader;
import akka.http.javadsl.model.HttpMethods;
import akka.http.javadsl.model.HttpRequest;
import akka.http.javadsl.model.StatusCodes;
import akka.http.javadsl.model.Uri;
import akka.http.javadsl.server.ExceptionHandler;
import akka.http.javadsl.server.PathMatcher0;
import akka.http.javadsl.server.RequestContext;
import akka.http.javadsl.server.Route;
import akka.http.javadsl.server.RouteResult;
import akka.http.javadsl.unmarshalling.Unmarshaller;
import akka.japi.JavaPartialFunction;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.master.vm.AgentClusterOperations;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.config.MasterConfiguration;
import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference;
import io.mantisrx.shaded.com.google.common.annotations.VisibleForTesting;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.function.Function;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class AgentClusterRoute extends BaseRoute {
private static final Logger logger = LoggerFactory.getLogger(AgentClusterRoute.class);
private final AgentClusterOperations agentClusterOps;
private final Cache<Uri, RouteResult> cache;
private final JavaPartialFunction<RequestContext, Uri> requestUriKeyer = new JavaPartialFunction<RequestContext, Uri>() {
public Uri apply(RequestContext in, boolean isCheck) {
final HttpRequest request = in.getRequest();
final boolean isGet = request.method() == HttpMethods.GET;
if (isGet) {
return request.getUri();
} else {
throw noMatch();
}
}
};
private final Counter setActiveCount;
private final Counter listActiveCount;
private final Counter listJobsOnVMsCount;
private final Counter listAgentClustersCount;
public AgentClusterRoute(final AgentClusterOperations agentClusterOperations, final ActorSystem actorSystem) {
this.agentClusterOps = agentClusterOperations;
MasterConfiguration config = ConfigurationProvider.getConfig();
this.cache = createCache(actorSystem, config.getApiCacheMinSize(), config.getApiCacheMaxSize(),
config.getApiCacheTtlMilliseconds());
Metrics m = new Metrics.Builder()
.id("V0AgentClusterRoute")
.addCounter("setActive")
.addCounter("listActive")
.addCounter("listJobsOnVMs")
.addCounter("listAgentClusters")
.build();
this.setActiveCount = m.getCounter("setActive");
this.listActiveCount = m.getCounter("listActive");
this.listJobsOnVMsCount = m.getCounter("listJobsOnVMs");
this.listAgentClustersCount = m.getCounter("listAgentClusters");
}
private static final PathMatcher0 API_VM_ACTIVEVMS = segment("api").slash("vm").slash("activevms");
@VisibleForTesting
public static final String LISTACTIVE ="listactive";
@VisibleForTesting
public static final String SETACTIVE ="setactive";
@VisibleForTesting
public static final String LISTJOBSONVMS="listjobsonvms";
@VisibleForTesting
public static final String LISTAGENTCLUSTERS = "listagentclusters";
private static final HttpHeader ACCESS_CONTROL_ALLOW_ORIGIN_HEADER =
HttpHeader.parse("Access-Control-Allow-Origin", "*");
private static final Iterable<HttpHeader> DEFAULT_RESPONSE_HEADERS = Arrays.asList(
ACCESS_CONTROL_ALLOW_ORIGIN_HEADER);
private Route agentClusterRoutes() {
return route(
get(() -> route(
path(API_VM_ACTIVEVMS.slash(LISTACTIVE), () -> {
logger.debug("/api/vm/activems/{} called", LISTACTIVE);
listActiveCount.increment();
return complete(StatusCodes.OK,
agentClusterOps != null ? agentClusterOps.getActiveVMsAttributeValues() : Collections.emptySet(),
Jackson.marshaller());
}),
path(API_VM_ACTIVEVMS.slash(LISTJOBSONVMS), () -> {
logger.debug("/api/vm/activems/{} called", LISTJOBSONVMS);
listJobsOnVMsCount.increment();
return alwaysCache(cache, requestUriKeyer, () ->
extractUri(uri -> complete(StatusCodes.OK,
agentClusterOps != null ? agentClusterOps.getJobsOnVMs() : Collections.emptyMap(),
Jackson.marshaller())));
}),
path(API_VM_ACTIVEVMS.slash(LISTAGENTCLUSTERS), () -> {
logger.debug("/api/vm/activems/{} called", LISTAGENTCLUSTERS);
listAgentClustersCount.increment();
return complete(StatusCodes.OK,
agentClusterOps != null ? agentClusterOps.getAgentClusterAutoScaleRules() : Collections.emptyMap(),
Jackson.marshaller());
})
)),
post(() -> route(
path(API_VM_ACTIVEVMS.slash(SETACTIVE), () ->
decodeRequest(() ->
entity(Unmarshaller.entityToString(), req -> {
try {
setActiveCount.increment();
List<String> activeClustersList = Jackson.fromJSON(req, new TypeReference<List<String>>() {});
logger.info("POST /api/vm/activems/{} called {}", SETACTIVE, activeClustersList);
if (agentClusterOps != null) {
agentClusterOps.setActiveVMsAttributeValues(activeClustersList);
}
} catch (IOException e) {
return complete(StatusCodes.INTERNAL_SERVER_ERROR,
"Failed to set active clusters to "+req);
}
return complete(StatusCodes.OK, req);
}))
)
))
);
}
public Route createRoute(Function<Route, Route> routeFilter) {
logger.info("creating routes");
final ExceptionHandler jsonExceptionHandler = ExceptionHandler.newBuilder()
.match(Exception.class, x -> {
logger.error("got exception", x);
return complete(StatusCodes.INTERNAL_SERVER_ERROR, "{\"error\": \"" + x.getMessage() + "\"}");
})
.build();
return
respondWithHeaders(DEFAULT_RESPONSE_HEADERS,
() -> handleExceptions(jsonExceptionHandler,
() -> routeFilter.apply(agentClusterRoutes())));
}
}
| 8,143 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/v0/JobDiscoveryRoute.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v0;
import static akka.http.javadsl.server.PathMatchers.segment;
import akka.NotUsed;
import akka.http.javadsl.marshalling.sse.EventStreamMarshalling;
import akka.http.javadsl.model.HttpHeader;
import akka.http.javadsl.model.StatusCodes;
import akka.http.javadsl.model.sse.ServerSentEvent;
import akka.http.javadsl.server.ExceptionHandler;
import akka.http.javadsl.server.PathMatchers;
import akka.http.javadsl.server.Route;
import akka.http.javadsl.unmarshalling.StringUnmarshallers;
import akka.stream.javadsl.Source;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandler;
import io.mantisrx.master.api.akka.route.proto.JobClusterInfo;
import io.mantisrx.master.api.akka.route.proto.JobDiscoveryRouteProto;
import io.mantisrx.master.api.akka.route.utils.StreamingUtils;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.server.core.JobSchedulingInfo;
import io.mantisrx.server.master.domain.JobId;
import java.util.Arrays;
import java.util.Optional;
import java.util.concurrent.CompletionStage;
import java.util.function.Function;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.RxReactiveStreams;
public class JobDiscoveryRoute extends BaseRoute {
private static final Logger logger = LoggerFactory.getLogger(JobDiscoveryRoute.class);
private final JobDiscoveryRouteHandler jobDiscoveryRouteHandler;
private final Metrics metrics;
private final Counter schedulingInfoStreamGET;
private final Counter jobClusterInfoStreamGET;
public JobDiscoveryRoute(final JobDiscoveryRouteHandler jobDiscoveryRouteHandler) {
this.jobDiscoveryRouteHandler = jobDiscoveryRouteHandler;
Metrics m = new Metrics.Builder()
.id("JobDiscoveryRoute")
.addCounter("schedulingInfoStreamGET")
.addCounter("jobClusterInfoStreamGET")
.build();
this.metrics = MetricsRegistry.getInstance().registerAndGet(m);
this.schedulingInfoStreamGET = metrics.getCounter("schedulingInfoStreamGET");
this.jobClusterInfoStreamGET = metrics.getCounter("jobClusterInfoStreamGET");
}
private static final HttpHeader ACCESS_CONTROL_ALLOW_ORIGIN_HEADER =
HttpHeader.parse("Access-Control-Allow-Origin", "*");
private static final Iterable<HttpHeader> DEFAULT_RESPONSE_HEADERS = Arrays.asList(
ACCESS_CONTROL_ALLOW_ORIGIN_HEADER);
private Route getJobDiscoveryRoutes() {
return route(
get(() -> route(
path(segment("assignmentresults").slash(PathMatchers.segment()), (jobId) ->
parameterOptional(
StringUnmarshallers.BOOLEAN,
"sendHB",
(sendHeartbeats) -> {
logger.debug(
"/assignmentresults/{} called",
jobId);
schedulingInfoStreamGET.increment();
JobClusterManagerProto.GetJobSchedInfoRequest req =
new JobClusterManagerProto.GetJobSchedInfoRequest(
JobId.fromId(jobId).get());
CompletionStage<JobDiscoveryRouteProto.SchedInfoResponse> schedulingInfoRespCS =
jobDiscoveryRouteHandler.schedulingInfoStream(
req,
sendHeartbeats.orElse(false));
return completeAsync(
schedulingInfoRespCS,
r -> {
Optional<Observable<JobSchedulingInfo>> schedInfoStreamO = r
.getSchedInfoStream();
if (schedInfoStreamO.isPresent()) {
Observable<JobSchedulingInfo> schedulingInfoObs = schedInfoStreamO
.get();
Source<ServerSentEvent, NotUsed> schedInfoSource =
Source.fromPublisher(
RxReactiveStreams.toPublisher(
schedulingInfoObs))
.map(j -> StreamingUtils.from(
j)
.orElse(null))
.filter(sse -> sse !=
null);
return completeOK(
schedInfoSource,
EventStreamMarshalling.toEventStream());
} else {
logger.warn(
"Failed to get sched info stream for job {}",
jobId);
return complete(
StatusCodes.INTERNAL_SERVER_ERROR,
"Failed to get sched info stream for job " +
jobId);
}
});
})
),
path(segment("namedjobs").slash(PathMatchers.segment()), (jobCluster) ->
parameterOptional(
StringUnmarshallers.BOOLEAN,
"sendHB",
(sendHeartbeats) -> {
logger.debug(
"/namedjobs/{} called",
jobCluster);
jobClusterInfoStreamGET.increment();
JobClusterManagerProto.GetLastSubmittedJobIdStreamRequest req =
new JobClusterManagerProto.GetLastSubmittedJobIdStreamRequest(
jobCluster);
CompletionStage<JobDiscoveryRouteProto.JobClusterInfoResponse> jobClusterInfoRespCS =
jobDiscoveryRouteHandler.lastSubmittedJobIdStream(
req,
sendHeartbeats.orElse(false));
return completeAsync(
jobClusterInfoRespCS,
r -> {
Optional<Observable<JobClusterInfo>> jobClusterInfoO = r
.getJobClusterInfoObs();
if (jobClusterInfoO.isPresent()) {
Observable<JobClusterInfo> jobClusterInfoObs = jobClusterInfoO
.get();
Source<ServerSentEvent, NotUsed> source = Source
.fromPublisher(RxReactiveStreams
.toPublisher(
jobClusterInfoObs))
.map(j -> StreamingUtils.from(j)
.orElse(null))
.filter(sse -> sse != null);
return completeOK(
source,
EventStreamMarshalling.toEventStream());
} else {
logger.warn(
"Failed to get last submitted jobId stream for {}",
jobCluster);
return complete(
StatusCodes.INTERNAL_SERVER_ERROR,
"Failed to get last submitted jobId stream for " +
jobCluster);
}
});
})
)
))
);
}
public Route createRoute(Function<Route, Route> routeFilter) {
logger.info("creating routes");
final ExceptionHandler jsonExceptionHandler =
ExceptionHandler.newBuilder()
.match(Exception.class, x -> {
logger.error("got exception", x);
return complete(
StatusCodes.INTERNAL_SERVER_ERROR,
"{\"error\": \"" + x.getMessage() + "\"}");
})
.build();
return respondWithHeaders(
DEFAULT_RESPONSE_HEADERS,
() -> handleExceptions(
jsonExceptionHandler,
() -> routeFilter.apply(getJobDiscoveryRoutes())));
}
}
| 8,144 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/v0/JobRoute.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v0;
import static akka.http.javadsl.server.PathMatchers.segment;
import static akka.http.javadsl.server.directives.CachingDirectives.alwaysCache;
import static io.mantisrx.master.api.akka.route.utils.JobRouteUtils.createListJobIdsRequest;
import static io.mantisrx.master.api.akka.route.utils.JobRouteUtils.createListJobsRequest;
import static io.mantisrx.master.api.akka.route.utils.JobRouteUtils.createWorkerStatusRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListArchivedWorkersRequest.DEFAULT_LIST_ARCHIVED_WORKERS_LIMIT;
import akka.actor.ActorSystem;
import akka.http.caching.javadsl.Cache;
import akka.http.javadsl.model.HttpHeader;
import akka.http.javadsl.model.HttpMethods;
import akka.http.javadsl.model.HttpRequest;
import akka.http.javadsl.model.StatusCodes;
import akka.http.javadsl.model.Uri;
import akka.http.javadsl.server.ExceptionHandler;
import akka.http.javadsl.server.PathMatcher0;
import akka.http.javadsl.server.PathMatchers;
import akka.http.javadsl.server.RequestContext;
import akka.http.javadsl.server.Route;
import akka.http.javadsl.server.RouteResult;
import akka.http.javadsl.unmarshalling.StringUnmarshallers;
import akka.http.javadsl.unmarshalling.Unmarshaller;
import akka.japi.JavaPartialFunction;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.master.api.akka.route.handlers.JobRouteHandler;
import io.mantisrx.master.api.akka.route.proto.JobClusterProtoAdapter;
import io.mantisrx.master.jobcluster.job.MantisJobMetadataView;
import io.mantisrx.master.jobcluster.job.worker.WorkerHeartbeat;
import io.mantisrx.master.jobcluster.proto.BaseResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.KillJobRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListArchivedWorkersRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ResubmitWorkerRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ScaleStageRequest;
import io.mantisrx.server.core.PostJobStatusRequest;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.config.MasterConfiguration;
import io.mantisrx.server.master.domain.DataFormatAdapter;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.scheduler.WorkerEvent;
import io.mantisrx.server.master.store.MantisWorkerMetadataWritable;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class JobRoute extends BaseRoute {
private static final Logger logger = LoggerFactory.getLogger(JobRoute.class);
private final JobRouteHandler jobRouteHandler;
private final Metrics metrics;
private final Counter jobListGET;
private final Counter jobListJobIdGET;
private final Counter jobListRegexGET;
private final Counter jobListLabelMatchGET;
private final Counter jobArchivedWorkersGET;
private final Counter jobArchivedWorkersGETInvalid;
private final Counter workerHeartbeatStatusPOST;
private final Counter workerHeartbeatSkipped;
private final Cache<Uri, RouteResult> cache;
private final JavaPartialFunction<RequestContext, Uri> requestUriKeyer = new JavaPartialFunction<RequestContext, Uri>() {
public Uri apply(RequestContext in, boolean isCheck) {
final HttpRequest request = in.getRequest();
final boolean isGet = request.method() == HttpMethods.GET;
if (isGet) {
return request.getUri();
} else {
throw noMatch();
}
}
};
public JobRoute(final JobRouteHandler jobRouteHandler, final ActorSystem actorSystem) {
this.jobRouteHandler = jobRouteHandler;
MasterConfiguration config = ConfigurationProvider.getConfig();
this.cache = createCache(actorSystem, config.getApiCacheMinSize(), config.getApiCacheMaxSize(),
config.getApiCacheTtlMilliseconds());
Metrics m = new Metrics.Builder()
.id("V0JobRoute")
.addCounter("jobListGET")
.addCounter("jobListJobIdGET")
.addCounter("jobListRegexGET")
.addCounter("jobListLabelMatchGET")
.addCounter("jobArchivedWorkersGET")
.addCounter("jobArchivedWorkersGETInvalid")
.addCounter("workerHeartbeatStatusPOST")
.addCounter("workerHeartbeatSkipped")
.build();
this.metrics = MetricsRegistry.getInstance().registerAndGet(m);
this.jobListGET = metrics.getCounter("jobListGET");
this.jobListJobIdGET = metrics.getCounter("jobListJobIdGET");
this.jobListRegexGET = metrics.getCounter("jobListRegexGET");
this.jobListLabelMatchGET = metrics.getCounter("jobListLabelMatchGET");
this.jobArchivedWorkersGET = metrics.getCounter("jobArchivedWorkersGET");
this.jobArchivedWorkersGETInvalid = metrics.getCounter("jobArchivedWorkersGETInvalid");
this.workerHeartbeatStatusPOST = metrics.getCounter("workerHeartbeatStatusPOST");
this.workerHeartbeatSkipped = metrics.getCounter("workerHeartbeatSkipped");
}
private static final PathMatcher0 API_JOBS = segment("api").slash("jobs");
private static final HttpHeader ACCESS_CONTROL_ALLOW_ORIGIN_HEADER =
HttpHeader.parse("Access-Control-Allow-Origin", "*");
private static final Iterable<HttpHeader> DEFAULT_RESPONSE_HEADERS = Arrays.asList(
ACCESS_CONTROL_ALLOW_ORIGIN_HEADER);
public static final String KILL_ENDPOINT = "kill";
public static final String RESUBMIT_WORKER_ENDPOINT = "resubmitWorker";
public static final String SCALE_STAGE_ENDPOINT = "scaleStage";
public static final PathMatcher0 STATUS_ENDPOINT = segment("api").slash("postjobstatus");
/**
* Route that returns
* - a list of Job Ids only if 'jobIdsOnly' query param is set
* - a list of compact Job Infos if 'compact' query param is set
* - a list of Job metadatas otherwise
* The above lists are filtered and returned based on other criteria specified in the List request
* like stageNumber, workerIndex, workerNumber, matchingLabels, regex, activeOnly, jobState, workerState, limit
*
* @param regex the regex to match against Job IDs to return in response
* @return Route job list route
*/
private Route jobListRoute(final Optional<String> regex) {
return parameterOptional(StringUnmarshallers.BOOLEAN, "jobIdsOnly", (jobIdsOnly) ->
parameterOptional(StringUnmarshallers.BOOLEAN, "compact", (isCompact) ->
parameterMultiMap(params -> {
if (jobIdsOnly.isPresent() && jobIdsOnly.get()) {
logger.debug("/api/jobs/list jobIdsOnly called");
return alwaysCache(cache, requestUriKeyer, () ->
extractUri(uri -> completeAsync(
jobRouteHandler.listJobIds(createListJobIdsRequest(params, regex, true)),
resp -> completeOK(
resp.getJobIds().stream()
.map(jobId -> jobId.getJobId())
.collect(Collectors.toList()),
Jackson.marshaller()))));
}
if (isCompact.isPresent() && isCompact.get()) {
logger.debug("/api/jobs/list compact called");
return alwaysCache(cache, requestUriKeyer, () ->
extractUri(uri -> completeAsync(
jobRouteHandler.listJobs(createListJobsRequest(params, regex, true)),
resp -> completeOK(
resp.getJobList()
.stream()
.map(jobMetadataView -> JobClusterProtoAdapter.toCompactJobInfo(jobMetadataView))
.collect(Collectors.toList()),
Jackson.marshaller()))));
} else {
logger.debug("/api/jobs/list called");
return alwaysCache(cache, requestUriKeyer, () ->
extractUri(uri -> completeAsync(
jobRouteHandler.listJobs(createListJobsRequest(params, regex, true)),
resp -> completeOK(
resp.getJobList(),
Jackson.marshaller()))));
}
})
)
);
}
private Route getJobRoutes() {
return route(
path(STATUS_ENDPOINT, () ->
post(() ->
decodeRequest(() ->
entity(Unmarshaller.entityToString(), req -> {
if (logger.isDebugEnabled()) {
logger.debug("/api/postjobstatus called {}", req);
}
try {
workerHeartbeatStatusPOST.increment();
PostJobStatusRequest postJobStatusRequest = Jackson.fromJSON(req, PostJobStatusRequest.class);
WorkerEvent workerStatusRequest = createWorkerStatusRequest(postJobStatusRequest);
if (workerStatusRequest instanceof WorkerHeartbeat) {
if (!ConfigurationProvider.getConfig().isHeartbeatProcessingEnabled()) {
// skip heartbeat processing
if (logger.isTraceEnabled()) {
logger.trace("skipped heartbeat event {}", workerStatusRequest);
}
workerHeartbeatSkipped.increment();
return complete(StatusCodes.OK);
}
}
return completeWithFuture(
jobRouteHandler.workerStatus(workerStatusRequest)
.thenApply(this::toHttpResponse));
} catch (IOException e) {
logger.warn("Error handling job status {}", req, e);
return complete(StatusCodes.BAD_REQUEST, "{\"error\": \"invalid JSON payload to post job status\"}");
}
})
))),
pathPrefix(API_JOBS, () -> route(
post(() -> route(
path(KILL_ENDPOINT, () ->
decodeRequest(() ->
entity(Unmarshaller.entityToString(), req -> {
logger.debug("/api/jobs/kill called {}", req);
try {
final KillJobRequest killJobRequest = Jackson.fromJSON(req, KillJobRequest.class);
return completeWithFuture(
jobRouteHandler.kill(killJobRequest)
.thenApply(resp -> {
if (resp.responseCode == BaseResponse.ResponseCode.SUCCESS) {
return new JobClusterManagerProto.KillJobResponse(resp.requestId, resp.responseCode,
resp.getState(), "[\""+ resp.getJobId().getId() +" Killed\"]", resp.getJobId(), resp.getUser());
} else if (resp.responseCode == BaseResponse.ResponseCode.CLIENT_ERROR) {
// for backwards compatibility with old master
return new JobClusterManagerProto.KillJobResponse(resp.requestId, BaseResponse.ResponseCode.SUCCESS,
resp.getState(), "[\""+ resp.message +" \"]", resp.getJobId(), resp.getUser());
}
return resp;
})
.thenApply(this::toHttpResponse));
} catch (IOException e) {
logger.warn("Error on job kill {}", req, e);
return complete(StatusCodes.BAD_REQUEST, "{\"error\": \"invalid json payload to kill job\"}");
}
})
)),
path(RESUBMIT_WORKER_ENDPOINT, () ->
decodeRequest(() ->
entity(Unmarshaller.entityToString(), req -> {
logger.debug("/api/jobs/resubmitWorker called {}", req);
try {
final ResubmitWorkerRequest resubmitWorkerRequest = Jackson.fromJSON(req, ResubmitWorkerRequest.class);
return completeWithFuture(
jobRouteHandler.resubmitWorker(resubmitWorkerRequest)
.thenApply(this::toHttpResponse));
} catch (IOException e) {
logger.warn("Error on worker resubmit {}", req, e);
return complete(StatusCodes.BAD_REQUEST, "{\"error\": \"invalid json payload to resubmit worker\"}");
}
})
)),
path(SCALE_STAGE_ENDPOINT, () ->
decodeRequest(() ->
entity(Unmarshaller.entityToString(), req -> {
logger.debug("/api/jobs/scaleStage called {}", req);
try {
ScaleStageRequest scaleStageRequest = Jackson.fromJSON(req, ScaleStageRequest.class);
int numWorkers = scaleStageRequest.getNumWorkers();
int maxWorkersPerStage = ConfigurationProvider.getConfig().getMaxWorkersPerStage();
if (numWorkers > maxWorkersPerStage) {
logger.warn("rejecting ScaleStageRequest {} with invalid num workers", scaleStageRequest);
return complete(StatusCodes.BAD_REQUEST, "{\"error\": \"num workers must be less than " + maxWorkersPerStage + "\"}");
}
return completeWithFuture(
jobRouteHandler.scaleStage(scaleStageRequest)
.thenApply(this::toHttpResponse));
} catch (IOException e) {
logger.warn("Error scaling stage {}", req, e);
return complete(StatusCodes.BAD_REQUEST,
"{\"error\": \"invalid json payload to scale stage " + e.getMessage() +"\"}");
}
})
))
// TODO path("updateScalingPolicy", () ->
// entity(Jackson.unmarshaller(UpdateJobClusterRequest.class), req -> {
// logger.info("/api/jobs/kill called {}", req);
// return completeWithFuture(
// jobRouteHandler.kill(req)
// .thenApply(this::toHttpResponse));
// })
// )
)),
get(() -> route(
// Context from old mantis master:
// list all jobs activeOnly = true
// optional boolean 'compact' query param to return compact job infos if set
// For compact,
// - optional 'limit' query param
// - optional 'jobState' query param
// For non compact,
// - optional boolean 'jobIdsOnly' query param to return only the job Ids if set
// - optional int 'stageNumber' query param to filter for stage number
// - optional int 'workerIndex' query param to filter for worker index
// - optional int 'workerNumber' query param to filter for worker number
// - optional int 'workerState' query param to filter for worker state
// list/all - list all jobs activeOnly=false with above query parameters
// list/matching/<regex> - if optional regex param specified, propagate regex
// else list all jobs activeOnly=false with above query parameters
// list/matchinglabels
// - optional labels query param
// - optional labels.op query param - default value is 'or' if not specified (other possible value is 'and'
path(segment("list"), () -> {
jobListGET.increment();
return jobListRoute(Optional.empty());
}),
path(segment("list").slash("matchinglabels"), () -> {
jobListLabelMatchGET.increment();
return jobListRoute(Optional.empty());
}),
path(segment("list").slash(PathMatchers.segment()), (jobId) -> {
logger.debug("/api/jobs/list/{} called", jobId);
jobListJobIdGET.increment();
return completeAsync(
jobRouteHandler.getJobDetails(new JobClusterManagerProto.GetJobDetailsRequest("masterAPI", jobId)),
resp -> {
Optional<MantisJobMetadataView> mantisJobMetadataView = resp.getJobMetadata()
.map(metaData -> new MantisJobMetadataView(metaData, Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), false));
return completeOK(mantisJobMetadataView,
Jackson.marshaller());
});
}),
path(segment("list").slash("matching").slash(PathMatchers.segment()), (regex) -> {
jobListRegexGET.increment();
return jobListRoute(Optional.ofNullable(regex)
.filter(r -> !r.isEmpty()));
}),
path(segment("archived").slash(PathMatchers.segment()), (jobId) ->
parameterOptional(StringUnmarshallers.INTEGER, "limit", (limit) -> {
jobArchivedWorkersGET.increment();
Optional<JobId> jobIdO = JobId.fromId(jobId);
if (jobIdO.isPresent()) {
ListArchivedWorkersRequest req = new ListArchivedWorkersRequest(jobIdO.get(),
limit.orElse(DEFAULT_LIST_ARCHIVED_WORKERS_LIMIT));
return alwaysCache(cache, requestUriKeyer, () ->
extractUri(uri -> completeAsync(
jobRouteHandler.listArchivedWorkers(req),
resp -> {
List<MantisWorkerMetadataWritable> workers = resp.getWorkerMetadata().stream()
.map(wm -> DataFormatAdapter.convertMantisWorkerMetadataToMantisWorkerMetadataWritable(wm))
.collect(Collectors.toList());
return completeOK(workers,
Jackson.marshaller());
})));
} else {
return complete(StatusCodes.BAD_REQUEST,
"error: 'archived/<jobId>' request must include a valid jobId");
}
})
),
path(segment("archived"), () -> {
jobArchivedWorkersGETInvalid.increment();
return complete(StatusCodes.BAD_REQUEST,
"error: 'archived' Request must include jobId");
})
)))
));
}
public Route createRoute(Function<Route, Route> routeFilter) {
logger.info("creating routes");
final ExceptionHandler genericExceptionHandler = ExceptionHandler.newBuilder()
.match(Exception.class, x -> {
logger.error("got exception", x);
return complete(StatusCodes.INTERNAL_SERVER_ERROR, "{\"error\": \"" + x.getMessage() + "\"}");
})
.build();
return respondWithHeaders(DEFAULT_RESPONSE_HEADERS, () -> handleExceptions(genericExceptionHandler, () -> routeFilter.apply(getJobRoutes())));
}
}
| 8,145 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/v0/MasterDescriptionRoute.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v0;
import static akka.http.javadsl.server.PathMatchers.segment;
import akka.http.javadsl.model.HttpHeader;
import akka.http.javadsl.model.StatusCodes;
import akka.http.javadsl.server.ExceptionHandler;
import akka.http.javadsl.server.Route;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.runtime.JobConstraints;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.server.core.master.MasterDescription;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.config.MasterConfiguration;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import java.util.function.Function;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MasterDescriptionRoute extends BaseRoute {
private static final Logger logger = LoggerFactory.getLogger(MasterDescriptionRoute.class);
private static final ObjectMapper mapper = new ObjectMapper()
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
private final MasterDescription masterDesc;
private String masterDescStr;
private final List<Configlet> configs = new ArrayList<>();
public static class Configlet {
private final String name;
private final String value;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown=true)
public Configlet(@JsonProperty("name") String name, @JsonProperty("value") String value) {
this.name = name;
this.value = value;
}
public String getName() {
return name;
}
public String getValue() {
return value;
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final Configlet configlet = (Configlet) o;
return Objects.equals(name, configlet.name) &&
Objects.equals(value, configlet.value);
}
@Override
public int hashCode() {
return Objects.hash(name, value);
}
@Override
public String toString() {
return "Configlet{" +
"name='" + name + '\'' +
", value='" + value + '\'' +
'}';
}
}
static class WorkerResourceLimits {
private final int maxCpuCores;
private final int maxMemoryMB;
private final int maxNetworkMbps;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public WorkerResourceLimits(@JsonProperty("maxCpuCores") final int maxCpuCores,
@JsonProperty("maxMemoryMB") final int maxMemoryMB,
@JsonProperty("maxNetworkMbps") final int maxNetworkMbps) {
this.maxCpuCores = maxCpuCores;
this.maxMemoryMB = maxMemoryMB;
this.maxNetworkMbps = maxNetworkMbps;
}
public int getMaxCpuCores() {
return maxCpuCores;
}
public int getMaxMemoryMB() {
return maxMemoryMB;
}
public int getMaxNetworkMbps() {
return maxNetworkMbps;
}
}
public MasterDescriptionRoute(final MasterDescription masterDescription) {
this.masterDesc = masterDescription;
try {
this.masterDescStr = mapper.writeValueAsString(masterDesc);
} catch (JsonProcessingException e) {
logger.error("failed to create json for master desc {}", masterDesc);
this.masterDescStr = masterDesc.toString();
}
try {
configs.add(new Configlet(JobConstraints.class.getSimpleName(), mapper.writeValueAsString(JobConstraints.values())));
configs.add(new Configlet(StageScalingPolicy.ScalingReason.class.getSimpleName(), mapper.writeValueAsString(StageScalingPolicy.ScalingReason.values())));
configs.add(new Configlet(WorkerMigrationConfig.MigrationStrategyEnum.class.getSimpleName(), mapper.writeValueAsString(WorkerMigrationConfig.MigrationStrategyEnum.values())));
MasterConfiguration config = ConfigurationProvider.getConfig();
int maxCpuCores = config.getWorkerMachineDefinitionMaxCpuCores();
int maxMemoryMB = config.getWorkerMachineDefinitionMaxMemoryMB();
int maxNetworkMbps = config.getWorkerMachineDefinitionMaxNetworkMbps();
configs.add(new Configlet(WorkerResourceLimits.class.getSimpleName(), mapper.writeValueAsString(new WorkerResourceLimits(maxCpuCores, maxMemoryMB, maxNetworkMbps))));
} catch (JsonProcessingException e) {
logger.error(e.getMessage(), e);
}
}
private static final HttpHeader ACCESS_CONTROL_ALLOW_ORIGIN_HEADER =
HttpHeader.parse("Access-Control-Allow-Origin", "*");
private static final Iterable<HttpHeader> DEFAULT_RESPONSE_HEADERS = Arrays.asList(
ACCESS_CONTROL_ALLOW_ORIGIN_HEADER);
public List<Configlet> getConfigs() {
return configs;
}
private Route getMasterDescRoute() {
return route(
get(() -> route(
path(segment("api").slash("masterinfo"), () -> completeOK(masterDesc, Jackson.marshaller())),
path(segment("api").slash("masterinfostr"), () -> complete(StatusCodes.OK, masterDescStr)),
path(segment("api").slash("masterconfig"), () -> completeOK(configs, Jackson.marshaller()))
))
);
}
public Route createRoute(Function<Route, Route> routeFilter) {
logger.info("creating routes");
final ExceptionHandler jsonExceptionHandler = ExceptionHandler.newBuilder()
.match(IOException.class, x -> {
logger.error("got exception", x);
return complete(StatusCodes.BAD_REQUEST, "caught exception " + x.getMessage());
})
.build();
return respondWithHeaders(DEFAULT_RESPONSE_HEADERS, () -> handleExceptions(jsonExceptionHandler, () -> routeFilter.apply(getMasterDescRoute())));
}
}
| 8,146 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/v0/BaseRoute.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v0;
import akka.actor.ActorSystem;
import akka.http.caching.LfuCache;
import akka.http.caching.javadsl.Cache;
import akka.http.caching.javadsl.CachingSettings;
import akka.http.caching.javadsl.LfuCacheSettings;
import akka.http.javadsl.model.ContentTypes;
import akka.http.javadsl.model.HttpResponse;
import akka.http.javadsl.model.StatusCodes;
import akka.http.javadsl.model.Uri;
import akka.http.javadsl.server.AllDirectives;
import akka.http.javadsl.server.Route;
import akka.http.javadsl.server.RouteResult;
import akka.http.javadsl.server.directives.RouteAdapter;
import akka.japi.pf.PFBuilder;
import akka.pattern.AskTimeoutException;
import io.mantisrx.master.api.akka.route.MasterApiMetrics;
import io.mantisrx.master.jobcluster.proto.BaseResponse;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import lombok.extern.slf4j.Slf4j;
import scala.concurrent.duration.Duration;
@Slf4j
abstract class BaseRoute extends AllDirectives {
protected HttpResponse toHttpResponse(final BaseResponse r) {
switch (r.responseCode) {
case SUCCESS:
case SUCCESS_CREATED:
MasterApiMetrics.getInstance().incrementResp2xx();
return HttpResponse.create()
.withEntity(ContentTypes.APPLICATION_JSON, r.message)
.withStatus(StatusCodes.OK);
case CLIENT_ERROR:
case CLIENT_ERROR_NOT_FOUND:
case CLIENT_ERROR_CONFLICT:
MasterApiMetrics.getInstance().incrementResp4xx();
return HttpResponse.create()
.withEntity(ContentTypes.APPLICATION_JSON, "{\"error\": \"" + r.message + "\"}")
.withStatus(StatusCodes.BAD_REQUEST);
case OPERATION_NOT_ALLOWED:
MasterApiMetrics.getInstance().incrementResp4xx();
return HttpResponse.create()
.withEntity(ContentTypes.APPLICATION_JSON, "{\"error\": \"" + r.message + "\"}")
.withStatus(StatusCodes.METHOD_NOT_ALLOWED);
case SERVER_ERROR:
default:
MasterApiMetrics.getInstance().incrementResp5xx();
log.error("Non-matched response code error: {}", r.message);
return HttpResponse.create()
.withEntity(ContentTypes.APPLICATION_JSON, "{\"error\": \"" + r.message + "\"}")
.withStatus(StatusCodes.INTERNAL_SERVER_ERROR);
}
}
protected <T extends BaseResponse> RouteAdapter completeAsync(final CompletionStage<T> stage,
final Function<T, RouteAdapter> successTransform) {
return completeAsync(stage,
successTransform,
r -> complete(StatusCodes.BAD_REQUEST, "{\"error\": \"" + r.message + "\"}"));
}
protected <T extends BaseResponse> RouteAdapter completeAsync(final CompletionStage<T> stage,
final Function<T, RouteAdapter> successTransform,
final Function<T, RouteAdapter> clientFailureTransform) {
return onComplete(
stage,
resp -> resp
.map(r -> {
switch (r.responseCode) {
case SUCCESS:
case SUCCESS_CREATED:
MasterApiMetrics.getInstance().incrementResp2xx();
return successTransform.apply(r);
case CLIENT_ERROR:
case CLIENT_ERROR_NOT_FOUND:
case CLIENT_ERROR_CONFLICT:
return clientFailureTransform.apply(r);
case SERVER_ERROR:
case OPERATION_NOT_ALLOWED:
default:
MasterApiMetrics.getInstance().incrementResp5xx();
return complete(StatusCodes.INTERNAL_SERVER_ERROR, r.message);
}
})
.recover(new PFBuilder<Throwable, Route>()
.match(AskTimeoutException.class, te -> {
MasterApiMetrics.getInstance().incrementAskTimeOutCount();
MasterApiMetrics.getInstance().incrementResp5xx();
return complete(StatusCodes.INTERNAL_SERVER_ERROR,
"{\"error\": \"" + te.getMessage() + "\"}");
})
.matchAny(ex -> {
MasterApiMetrics.getInstance().incrementResp5xx();
log.error("Internal server error from completeAsync: ", ex);
return complete(StatusCodes.INTERNAL_SERVER_ERROR,
"{\"error\": \"" + ex.getMessage() + "\"}");
})
.build()).get());
}
protected Cache<Uri, RouteResult> createCache(ActorSystem actorSystem, int initialCapacity, int maxCapacity, int ttlMillis) {
final CachingSettings defaultCachingSettings = CachingSettings.create(actorSystem);
final LfuCacheSettings lfuCacheSettings = defaultCachingSettings.lfuCacheSettings()
.withInitialCapacity(initialCapacity)
.withMaxCapacity(maxCapacity)
.withTimeToLive(Duration.create(ttlMillis, TimeUnit.MILLISECONDS));
final CachingSettings cachingSettings = defaultCachingSettings.withLfuCacheSettings(lfuCacheSettings);
return LfuCache.create(cachingSettings);
}
}
| 8,147 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/v0/JobClusterRoute.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v0;
import static akka.http.javadsl.server.PathMatchers.segment;
import static akka.http.javadsl.server.directives.CachingDirectives.alwaysCache;
import static io.mantisrx.master.api.akka.route.utils.JobRouteUtils.createListJobIdsRequest;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR_CONFLICT;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SERVER_ERROR;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.CreateJobClusterResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DeleteJobClusterRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DeleteJobClusterResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DisableJobClusterRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DisableJobClusterResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.EnableJobClusterRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.EnableJobClusterResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobClustersRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterArtifactRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterArtifactResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterLabelsRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterSLARequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyRequest;
import akka.actor.ActorSystem;
import akka.http.caching.LfuCache;
import akka.http.caching.javadsl.Cache;
import akka.http.caching.javadsl.CachingSettings;
import akka.http.caching.javadsl.LfuCacheSettings;
import akka.http.javadsl.model.HttpHeader;
import akka.http.javadsl.model.HttpMethods;
import akka.http.javadsl.model.HttpRequest;
import akka.http.javadsl.model.StatusCodes;
import akka.http.javadsl.model.Uri;
import akka.http.javadsl.server.ExceptionHandler;
import akka.http.javadsl.server.PathMatcher0;
import akka.http.javadsl.server.PathMatchers;
import akka.http.javadsl.server.RequestContext;
import akka.http.javadsl.server.Route;
import akka.http.javadsl.server.RouteResult;
import akka.http.javadsl.unmarshalling.StringUnmarshallers;
import akka.http.javadsl.unmarshalling.Unmarshaller;
import akka.japi.JavaPartialFunction;
import akka.japi.Pair;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.JobRouteHandler;
import io.mantisrx.master.api.akka.route.proto.JobClusterProtoAdapter;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.runtime.MantisJobDefinition;
import io.mantisrx.runtime.NamedJobDefinition;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.runtime.descriptor.StageSchedulingInfo;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.config.MasterConfiguration;
import io.mantisrx.shaded.com.google.common.base.Strings;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.duration.Duration;
public class JobClusterRoute extends BaseRoute {
private static final Logger logger = LoggerFactory.getLogger(JobClusterRoute.class);
private final JobClusterRouteHandler jobClusterRouteHandler;
private final JobRouteHandler jobRouteHandler;
private final Cache<Uri, RouteResult> cache;
private final JavaPartialFunction<RequestContext, Uri> requestUriKeyer = new JavaPartialFunction<RequestContext, Uri>() {
public Uri apply(RequestContext in, boolean isCheck) {
final HttpRequest request = in.getRequest();
final boolean isGet = request.method() == HttpMethods.GET;
if (isGet) {
return request.getUri();
} else {
throw noMatch();
}
}
};
private final Metrics metrics;
private final Counter jobClusterSubmit;
private final Counter jobClusterSubmitError;
private final Counter jobClusterCreate;
private final Counter jobClusterCreateError;
private final Counter jobClusterCreateUpdate;
private final Counter jobClusterCreateUpdateError;
private final Counter jobClusterDelete;
private final Counter jobClusterDeleteError;
private final Counter jobClusterDisable;
private final Counter jobClusterDisableError;
private final Counter jobClusterEnable;
private final Counter jobClusterEnableError;
private final Counter jobClusterQuickupdate;
private final Counter jobClusterQuickupdateError;
private final Counter jobClusterUpdateLabel;
private final Counter jobClusterUpdateSla;
private final Counter jobClusterUpdateSlaError;
private final Counter jobClusterUpdateLabelError;
private final Counter jobClusterListGET;
private final Counter jobClusterListJobIdGET;
private final Counter jobClusterListClusterGET;
public JobClusterRoute(final JobClusterRouteHandler jobClusterRouteHandler,
final JobRouteHandler jobRouteHandler,
final ActorSystem actorSystem) {
this.jobClusterRouteHandler = jobClusterRouteHandler;
this.jobRouteHandler = jobRouteHandler;
MasterConfiguration config = ConfigurationProvider.getConfig();
this.cache = createCache(actorSystem, config.getApiCacheMinSize(), config.getApiCacheMaxSize(),
config.getApiCacheTtlMilliseconds());
Metrics m = new Metrics.Builder()
.id("V0JobClusterRoute")
.addCounter("jobClusterSubmit")
.addCounter("jobClusterSubmitError")
.addCounter("jobClusterCreate")
.addCounter("jobClusterCreateError")
.addCounter("jobClusterCreateUpdate")
.addCounter("jobClusterCreateUpdateError")
.addCounter("jobClusterDelete")
.addCounter("jobClusterDeleteError")
.addCounter("jobClusterDisable")
.addCounter("jobClusterDisableError")
.addCounter("jobClusterEnable")
.addCounter("jobClusterEnableError")
.addCounter("jobClusterQuickupdate")
.addCounter("jobClusterQuickupdateError")
.addCounter("jobClusterUpdateLabel")
.addCounter("jobClusterUpdateLabelError")
.addCounter("jobClusterListGET")
.addCounter("jobClusterListJobIdGET")
.addCounter("jobClusterListClusterGET")
.addCounter("jobClusterUpdateSla")
.addCounter("jobClusterUpdateSlaError")
.build();
this.metrics = MetricsRegistry.getInstance().registerAndGet(m);
this.jobClusterSubmit = metrics.getCounter("jobClusterSubmit");
this.jobClusterSubmitError = metrics.getCounter("jobClusterSubmitError");
this.jobClusterCreate = metrics.getCounter("jobClusterCreate");
this.jobClusterCreateError = metrics.getCounter("jobClusterCreateError");
this.jobClusterCreateUpdate = metrics.getCounter("jobClusterCreateUpdate");
this.jobClusterCreateUpdateError = metrics.getCounter("jobClusterCreateUpdateError");
this.jobClusterDelete = metrics.getCounter("jobClusterDelete");
this.jobClusterDeleteError = metrics.getCounter("jobClusterDeleteError");
this.jobClusterDisable = metrics.getCounter("jobClusterDisable");
this.jobClusterDisableError = metrics.getCounter("jobClusterDisableError");
this.jobClusterEnable = metrics.getCounter("jobClusterEnable");
this.jobClusterEnableError = metrics.getCounter("jobClusterEnableError");
this.jobClusterQuickupdate = metrics.getCounter("jobClusterQuickupdate");
this.jobClusterQuickupdateError = metrics.getCounter("jobClusterQuickupdateError");
this.jobClusterUpdateLabel = metrics.getCounter("jobClusterUpdateLabel");
this.jobClusterUpdateLabelError = metrics.getCounter("jobClusterUpdateLabelError");
this.jobClusterListGET = metrics.getCounter("jobClusterListGET");
this.jobClusterListJobIdGET = metrics.getCounter("jobClusterListJobIdGET");
this.jobClusterListClusterGET = metrics.getCounter("jobClusterListClusterGET");
this.jobClusterUpdateSla = metrics.getCounter("jobClusterUpdateSla");
this.jobClusterUpdateSlaError = metrics.getCounter("jobClusterUpdateSlaError");
}
private Cache<Uri, RouteResult> createCache(ActorSystem actorSystem) {
final CachingSettings defaultCachingSettings = CachingSettings.create(actorSystem);
final LfuCacheSettings lfuCacheSettings = defaultCachingSettings.lfuCacheSettings()
.withInitialCapacity(5)
.withMaxCapacity(50)
.withTimeToLive(Duration.create(1, TimeUnit.SECONDS));
final CachingSettings cachingSettings = defaultCachingSettings.withLfuCacheSettings(lfuCacheSettings);
// Created outside the route to potentially allow using
// the same cache across multiple calls
final Cache<Uri, RouteResult> jobClustersListCache = LfuCache.create(cachingSettings);
return jobClustersListCache;
}
private static final PathMatcher0 API_V0_JOBCLUSTER = segment("api").slash("namedjob");
private static final HttpHeader ACCESS_CONTROL_ALLOW_ORIGIN_HEADER =
HttpHeader.parse("Access-Control-Allow-Origin", "*");
private static final Iterable<HttpHeader> DEFAULT_RESPONSE_HEADERS = Arrays.asList(
ACCESS_CONTROL_ALLOW_ORIGIN_HEADER);
/**
* Route that returns
* - a list of Job Ids only if 'jobIdsOnly' query param is set
* - a list of JobIdInfo objects otherwise
* The above lists are filtered and returned based on other criteria specified in the List request
* like stageNumber, workerIndex, workerNumber, matchingLabels, regex, activeOnly, jobState, workerState, limit
*
* @param jobCluster the regex to match against Job IDs to return in response
* @return Route job list route
*/
private Route jobClusterListRoute(final String jobCluster) {
return parameterOptional(StringUnmarshallers.BOOLEAN, "jobIdsOnly", (jobIdsOnly) ->
parameterMultiMap(params -> {
if (jobIdsOnly.isPresent() && jobIdsOnly.get()) {
logger.debug("/api/namedjob/listJobIds jobIdsOnly called");
return alwaysCache(cache, requestUriKeyer, () ->
extractUri(uri -> completeAsync(
jobRouteHandler.listJobIds(createListJobIdsRequest(params,
(Strings.isNullOrEmpty(jobCluster)) ? Optional.empty() : Optional.of("^" + jobCluster + "$"),
true)),
resp -> completeOK(
resp.getJobIds().stream()
.map(jobId -> jobId.getJobId())
.collect(Collectors.toList()),
Jackson.marshaller())
)
)
);
}
logger.debug("/api/namedjob/listJobIds/{} called", jobCluster);
return alwaysCache(cache, requestUriKeyer, () ->
extractUri(uri -> {
return completeAsync(
jobRouteHandler.listJobIds(createListJobIdsRequest(params,
(Strings.isNullOrEmpty(jobCluster)) ? Optional.empty() : Optional.of("^" + jobCluster + "$"),
false)),
resp -> completeOK(
resp.getJobIds(),
Jackson.marshaller()),
resp -> completeOK(Collections.emptyList(), Jackson.marshaller())
);
})
);
})
);
}
/**
* @return true to indicate valid, false otherwise. The String holds the error message when the request is invalid
*/
private Pair<Boolean, String> validateSubmitJobRequest(MantisJobDefinition mjd) {
if (mjd.getName() == null ||
mjd.getName().length() == 0) {
logger.info("rejecting job submit request, must include name {}", mjd);
return Pair.apply(false, "Job definition must include name");
}
SchedulingInfo schedulingInfo = mjd.getSchedulingInfo();
if (schedulingInfo != null) {
Map<Integer, StageSchedulingInfo> stages = schedulingInfo.getStages();
if (stages == null) {
return Pair.apply(true, "");
}
for (StageSchedulingInfo stageSchedInfo : stages.values()) {
double cpuCores = stageSchedInfo.getMachineDefinition().getCpuCores();
int maxCpuCores = ConfigurationProvider.getConfig().getWorkerMachineDefinitionMaxCpuCores();
if (cpuCores > maxCpuCores) {
logger.info("rejecting job submit request, requested CPU {} > max for {} (user: {}) (stage: {})",
cpuCores, mjd.getName(), mjd.getUser(), stages);
return Pair.apply(false, "requested CPU cannot be more than max CPU per worker "+maxCpuCores);
}
double memoryMB = stageSchedInfo.getMachineDefinition().getMemoryMB();
int maxMemoryMB = ConfigurationProvider.getConfig().getWorkerMachineDefinitionMaxMemoryMB();
if (memoryMB > maxMemoryMB) {
logger.info("rejecting job submit request, requested memory {} > max for {} (user: {}) (stage: {})",
memoryMB, mjd.getName(), mjd.getUser(), stages);
return Pair.apply(false, "requested memory cannot be more than max memoryMB per worker "+maxMemoryMB);
}
double networkMbps = stageSchedInfo.getMachineDefinition().getNetworkMbps();
int maxNetworkMbps = ConfigurationProvider.getConfig().getWorkerMachineDefinitionMaxNetworkMbps();
if (networkMbps > maxNetworkMbps) {
logger.info("rejecting job submit request, requested network {} > max for {} (user: {}) (stage: {})",
networkMbps, mjd.getName(), mjd.getUser(), stages);
return Pair.apply(false, "requested network cannot be more than max networkMbps per worker "+maxNetworkMbps);
}
int numberOfInstances = stageSchedInfo.getNumberOfInstances();
int maxWorkersPerStage = ConfigurationProvider.getConfig().getMaxWorkersPerStage();
if (numberOfInstances > maxWorkersPerStage) {
logger.info("rejecting job submit request, requested num instances {} > max for {} (user: {}) (stage: {})",
numberOfInstances, mjd.getName(), mjd.getUser(), stages);
return Pair.apply(false, "requested number of instances per stage cannot be more than " + maxWorkersPerStage);
}
StageScalingPolicy scalingPolicy = stageSchedInfo.getScalingPolicy();
if (scalingPolicy != null) {
if (scalingPolicy.getMax() > maxWorkersPerStage) {
logger.info("rejecting job submit request, requested num instances in scaling policy {} > max for {} (user: {}) (stage: {})",
numberOfInstances, mjd.getName(), mjd.getUser(), stages);
return Pair.apply(false, "requested number of instances per stage in scaling policy cannot be more than " + maxWorkersPerStage);
}
}
}
}
return Pair.apply(true, "");
}
private Route getJobClusterRoutes() {
return route(
path(segment("api").slash("submit"), () ->
decodeRequest(() ->
entity(Unmarshaller.entityToString(), request -> {
logger.debug("/api/submit called {}", request);
try {
MantisJobDefinition mjd = Jackson.fromJSON(request, MantisJobDefinition.class);
logger.debug("job submit request {}", mjd);
mjd.validate(true);
Pair<Boolean, String> validationResult = validateSubmitJobRequest(mjd);
if (!validationResult.first()) {
jobClusterSubmitError.increment();
return complete(StatusCodes.BAD_REQUEST,
"{\"error\": \"" + validationResult.second() + "\"}");
}
jobClusterSubmit.increment();
return completeWithFuture(
jobClusterRouteHandler.submit(JobClusterProtoAdapter.toSubmitJobClusterRequest(mjd))
.thenApply(this::toHttpResponse));
} catch (Exception e) {
logger.warn("exception in submit job request {}", request, e);
jobClusterSubmitError.increment();
return complete(StatusCodes.INTERNAL_SERVER_ERROR,
"{\"error\": \""+e.getMessage()+ "\"}");
}
})
)
),
pathPrefix(API_V0_JOBCLUSTER, () -> route(
post(() -> route(
path("create", () ->
decodeRequest(() ->
entity(Unmarshaller.entityToString(), jobClusterDefn -> {
logger.debug("/api/namedjob/create called {}", jobClusterDefn);
try {
final NamedJobDefinition namedJobDefinition = Jackson.fromJSON(jobClusterDefn, NamedJobDefinition.class);
if (namedJobDefinition == null ||
namedJobDefinition.getJobDefinition() == null ||
namedJobDefinition.getJobDefinition().getJobJarFileLocation() == null ||
namedJobDefinition.getJobDefinition().getName() == null ||
namedJobDefinition.getJobDefinition().getName().isEmpty()) {
logger.warn("JobCluster create request must include name and URL {}", jobClusterDefn);
return complete(StatusCodes.BAD_REQUEST, "{\"error\": \"Job definition must include name and URL\"}");
}
final CompletionStage<CreateJobClusterResponse> response =
jobClusterRouteHandler.create(JobClusterProtoAdapter.toCreateJobClusterRequest(namedJobDefinition));
jobClusterCreate.increment();
return completeWithFuture(response
.thenApply(r -> {
if ((r.responseCode == CLIENT_ERROR || r.responseCode == CLIENT_ERROR_CONFLICT)
&& r.message.contains("already exists")) {
return new CreateJobClusterResponse(r.requestId, SERVER_ERROR, r.message, r.getJobClusterName());
}
return r;
})
.thenApply(this::toHttpResponse));
} catch (IOException e) {
logger.warn("Error creating JobCluster {}", jobClusterDefn, e);
jobClusterCreateError.increment();
return complete(StatusCodes.BAD_REQUEST, "Can't read valid json in request: "+e.getMessage());
} catch (Exception e) {
logger.warn("Error creating JobCluster {}", jobClusterDefn, e);
jobClusterCreateError.increment();
return complete(StatusCodes.INTERNAL_SERVER_ERROR, "{\"error\": "+e.getMessage()+"}");
}
})
)
),
path("update", () ->
decodeRequest(() ->
entity(Unmarshaller.entityToString(), jobClusterDefn -> {
logger.debug("/api/namedjob/update called {}", jobClusterDefn);
try {
final NamedJobDefinition namedJobDefinition = Jackson.fromJSON(jobClusterDefn, NamedJobDefinition.class);
if (namedJobDefinition == null ||
namedJobDefinition.getJobDefinition() == null ||
namedJobDefinition.getJobDefinition().getJobJarFileLocation() == null ||
namedJobDefinition.getJobDefinition().getName() == null ||
namedJobDefinition.getJobDefinition().getName().isEmpty()) {
logger.warn("JobCluster update request must include name and URL {}", jobClusterDefn);
jobClusterCreateUpdateError.increment();
return complete(StatusCodes.BAD_REQUEST, "{\"error\": \"Job definition must include name and URL\"}");
}
final CompletionStage<UpdateJobClusterResponse> response =
jobClusterRouteHandler.update(JobClusterProtoAdapter.toUpdateJobClusterRequest(namedJobDefinition));
jobClusterCreateUpdate.increment();
return completeWithFuture(response.thenApply(this::toHttpResponse));
} catch (IOException e) {
logger.warn("Error updating JobCluster {}", jobClusterDefn, e);
jobClusterCreateUpdateError.increment();
return complete(StatusCodes.BAD_REQUEST, "Can't read valid json in request: "+e.getMessage());
} catch (Exception e) {
logger.warn("Error updating JobCluster {}", jobClusterDefn, e);
jobClusterCreateUpdateError.increment();
return complete(StatusCodes.INTERNAL_SERVER_ERROR, "{\"error\": "+e.getMessage()+"}");
}
})
)
),
path("delete", () ->
decodeRequest(() ->
entity(Unmarshaller.entityToString(), deleteReq -> {
logger.debug("/api/namedjob/delete called {}", deleteReq);
try {
final DeleteJobClusterRequest deleteJobClusterRequest = Jackson.fromJSON(deleteReq, DeleteJobClusterRequest.class);
final CompletionStage<DeleteJobClusterResponse> response =
jobClusterRouteHandler.delete(deleteJobClusterRequest);
jobClusterDelete.increment();
return completeWithFuture(response.thenApply(this::toHttpResponse));
} catch (IOException e) {
logger.warn("Error deleting JobCluster {}", deleteReq, e);
jobClusterDeleteError.increment();
return complete(StatusCodes.BAD_REQUEST, "Can't find valid json in request: " + e.getMessage());
}
})
)
),
path("disable", () ->
decodeRequest(() ->
entity(Unmarshaller.entityToString(), request -> {
logger.debug("/api/namedjob/disable called {}", request);
try {
final DisableJobClusterRequest disableJobClusterRequest = Jackson.fromJSON(request, DisableJobClusterRequest.class);
final CompletionStage<DisableJobClusterResponse> response =
jobClusterRouteHandler.disable(disableJobClusterRequest);
jobClusterDisable.increment();
return completeWithFuture(response.thenApply(this::toHttpResponse));
} catch (IOException e) {
logger.warn("Error disabling JobCluster {}", request, e);
jobClusterDisableError.increment();
return complete(StatusCodes.BAD_REQUEST, "Can't find valid json in request: " + e.getMessage());
}
})
)
),
path("enable", () ->
decodeRequest(() ->
entity(Unmarshaller.entityToString(), request -> {
logger.debug("/api/namedjob/enable called {}", request);
try {
final EnableJobClusterRequest enableJobClusterRequest = Jackson.fromJSON(request, EnableJobClusterRequest.class);
final CompletionStage<EnableJobClusterResponse> response =
jobClusterRouteHandler.enable(enableJobClusterRequest);
jobClusterEnable.increment();
return completeWithFuture(response.thenApply(this::toHttpResponse));
} catch (IOException e) {
logger.warn("Error enabling JobCluster {}", request, e);
jobClusterEnableError.increment();
return complete(StatusCodes.BAD_REQUEST, "Can't find valid json in request: " + e.getMessage());
}
})
)
),
path("quickupdate", () ->
decodeRequest(() ->
entity(Unmarshaller.entityToString(), request -> {
logger.debug("/api/namedjob/quickupdate called {}", request);
try {
final UpdateJobClusterArtifactRequest updateJobClusterArtifactRequest = Jackson.fromJSON(request, UpdateJobClusterArtifactRequest.class);
final CompletionStage<UpdateJobClusterArtifactResponse> response =
jobClusterRouteHandler.updateArtifact(updateJobClusterArtifactRequest);
jobClusterQuickupdate.increment();
return completeWithFuture(response.thenApply(this::toHttpResponse));
} catch (IOException e) {
logger.warn("Error on quickupdate for JobCluster {}", request, e);
jobClusterQuickupdateError.increment();
return complete(StatusCodes.BAD_REQUEST, "Can't find valid json in request: " + e.getMessage());
}
})
)
),
path("updatelabels", () ->
decodeRequest(() ->
entity(Unmarshaller.entityToString(), request -> {
logger.debug("/api/namedjob/updatelabels called {}", request);
try {
final UpdateJobClusterLabelsRequest updateJobClusterLabelsRequest = Jackson.fromJSON(request, UpdateJobClusterLabelsRequest.class);
jobClusterUpdateLabel.increment();
return completeWithFuture(jobClusterRouteHandler.updateLabels(updateJobClusterLabelsRequest)
.thenApply(this::toHttpResponse));
} catch (IOException e) {
logger.warn("Error updating labels for JobCluster {}", request, e);
jobClusterUpdateLabelError.increment();
return complete(StatusCodes.BAD_REQUEST, "Can't find valid json in request: " + e.getMessage());
}
})
)
),
path("updatesla", () ->
decodeRequest(() ->
entity(Unmarshaller.entityToString(), request -> {
logger.debug("/api/namedjob/updatesla called {}", request);
jobClusterUpdateSla.increment();
try {
final UpdateJobClusterSLARequest updateJobClusterSLARequest = Jackson.fromJSON(request, UpdateJobClusterSLARequest.class);
return completeWithFuture(jobClusterRouteHandler.updateSLA(updateJobClusterSLARequest)
.thenApply(this::toHttpResponse));
} catch (IOException e) {
logger.warn("Error updating SLA for JobCluster {}", request, e);
jobClusterUpdateSlaError.increment();
return complete(StatusCodes.BAD_REQUEST, "Can't find valid json in request: " + e.getMessage());
}
})
)
),
path("migratestrategy", () ->
decodeRequest(() ->
entity(Unmarshaller.entityToString(), request -> {
logger.debug("/api/namedjob/migratestrategy called {}", request);
try {
final UpdateJobClusterWorkerMigrationStrategyRequest updateMigrateStrategyReq =
Jackson.fromJSON(request, UpdateJobClusterWorkerMigrationStrategyRequest.class);
return completeWithFuture(jobClusterRouteHandler.updateWorkerMigrateStrategy(updateMigrateStrategyReq)
.thenApply(this::toHttpResponse));
} catch (IOException e) {
logger.warn("Error updating migrate strategy for JobCluster {}", request, e);
return complete(StatusCodes.BAD_REQUEST, "Can't find valid json in request: " + e.getMessage());
}
})
)
),
path("quicksubmit", () ->
decodeRequest(() ->
entity(Unmarshaller.entityToString(), request -> {
logger.debug("/api/namedjob/quicksubmit called {}", request);
try {
final JobClusterManagerProto.SubmitJobRequest submitJobRequest = Jackson.fromJSON(request, JobClusterManagerProto.SubmitJobRequest.class);
return completeWithFuture(jobClusterRouteHandler.submit(submitJobRequest)
.thenApply(this::toHttpResponse));
} catch (IOException e) {
logger.warn("Error on quick submit for JobCluster {}", request, e);
return complete(StatusCodes.BAD_REQUEST, "Can't find valid json in request: " + e.getMessage());
}
})
)
)
)),
get(() -> route(
pathPrefix("list", () -> route(
pathEndOrSingleSlash(() -> {
logger.debug("/api/namedjob/list called");
jobClusterListGET.increment();
return alwaysCache(cache, requestUriKeyer, () ->
extractUri(uri -> completeAsync(
jobClusterRouteHandler.getAllJobClusters(new ListJobClustersRequest()),
resp -> completeOK(
resp.getJobClusters()
.stream()
.map(jobClusterMetadataView -> JobClusterProtoAdapter.toJobClusterInfo(jobClusterMetadataView))
.collect(Collectors.toList())
,
Jackson.marshaller()),
resp -> completeOK(Collections.emptyList(), Jackson.marshaller()))));
}),
path(PathMatchers.segment(), (jobCluster) -> {
if (logger.isDebugEnabled()) {
logger.debug("/api/namedjob/list/{} called", jobCluster);
}
jobClusterListClusterGET.increment();
return completeAsync(
jobClusterRouteHandler.getJobClusterDetails(new JobClusterManagerProto.GetJobClusterRequest(jobCluster)),
resp -> completeOK(
resp.getJobCluster().map(jc -> Arrays.asList(jc)).orElse(Collections.emptyList()),
Jackson.marshaller()),
resp -> completeOK(Collections.emptyList(), Jackson.marshaller())
);
})
)),
path(segment("listJobIds").slash(PathMatchers.segment()), (jobCluster) -> {
logger.debug("/api/namedjob/listJobIds/{} called", jobCluster);
jobClusterListJobIdGET.increment();
return jobClusterListRoute(jobCluster);
}),
path("listJobIds", () ->
{
logger.debug("/api/namedjob/listJobIds called");
return complete(StatusCodes.BAD_REQUEST,
"Specify the Job cluster name '/api/namedjob/listJobIds/<JobClusterName>' to list the job Ids");
})
)))
));
}
public Route createRoute(Function<Route, Route> routeFilter) {
logger.info("creating routes");
final ExceptionHandler genericExceptionHandler = ExceptionHandler.newBuilder()
.match(Exception.class, e -> {
logger.error("got exception", e);
return complete(StatusCodes.INTERNAL_SERVER_ERROR, "{\"error\": \"" + e.getMessage() + "\"}");
})
.build();
return respondWithHeaders(DEFAULT_RESPONSE_HEADERS, () -> handleExceptions(genericExceptionHandler, () -> routeFilter.apply(getJobClusterRoutes())));
}
}
| 8,148 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/v0/JobStatusRoute.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v0;
import static akka.http.javadsl.server.PathMatchers.segment;
import akka.NotUsed;
import akka.http.javadsl.model.HttpHeader;
import akka.http.javadsl.model.StatusCodes;
import akka.http.javadsl.model.ws.Message;
import akka.http.javadsl.server.ExceptionHandler;
import akka.http.javadsl.server.PathMatchers;
import akka.http.javadsl.server.Route;
import akka.stream.javadsl.Flow;
import io.mantisrx.master.api.akka.route.handlers.JobStatusRouteHandler;
import java.io.IOException;
import java.util.Arrays;
import java.util.function.Function;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class JobStatusRoute extends BaseRoute {
private static final Logger logger = LoggerFactory.getLogger(JobStatusRoute.class);
private final JobStatusRouteHandler jobStatusRouteHandler;
public JobStatusRoute(final JobStatusRouteHandler jobStatusRouteHandler) {
this.jobStatusRouteHandler = jobStatusRouteHandler;
}
private static final HttpHeader ACCESS_CONTROL_ALLOW_ORIGIN_HEADER =
HttpHeader.parse("Access-Control-Allow-Origin", "*");
private static final Iterable<HttpHeader> DEFAULT_RESPONSE_HEADERS = Arrays.asList(
ACCESS_CONTROL_ALLOW_ORIGIN_HEADER);
private Route getJobStatusRoutes() {
return route(
get(() -> route(
path(segment("job").slash("status").slash(PathMatchers.segment()), (jobId) -> {
logger.info("/job/status/{} called", jobId);
Flow<Message, Message, NotUsed> webSocketFlow = jobStatusRouteHandler.jobStatus(jobId);
return handleWebSocketMessages(webSocketFlow);
})
))
);
}
public Route createRoute(Function<Route, Route> routeFilter) {
logger.info("creating routes");
final ExceptionHandler jsonExceptionHandler = ExceptionHandler.newBuilder()
.match(IOException.class, x -> {
logger.error("got exception", x);
return complete(StatusCodes.BAD_REQUEST, "caught exception " + x.getMessage());
})
.build();
return respondWithHeaders(DEFAULT_RESPONSE_HEADERS, () -> handleExceptions(jsonExceptionHandler, () -> routeFilter.apply(getJobStatusRoutes())));
}
}
| 8,149 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/proto/JobClusterInfo.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.proto;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
public class JobClusterInfo {
private final String name;
private final String jobId;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown=true)
public JobClusterInfo(@JsonProperty("name") String name,
@JsonProperty("jobId") String jobId) {
this.name = name;
this.jobId = jobId;
}
public String getName() {
return name;
}
public String getJobId() {
return jobId;
}
} | 8,150 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/proto/JobClusterProtoAdapter.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.proto;
import io.mantisrx.common.Label;
import io.mantisrx.master.jobcluster.LabelManager.SystemLabels;
import io.mantisrx.master.jobcluster.MantisJobClusterMetadataView;
import io.mantisrx.master.jobcluster.job.JobState;
import io.mantisrx.master.jobcluster.job.MantisJobMetadataView;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.CreateJobClusterRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterRequest;
import io.mantisrx.runtime.MantisJobDefinition;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.runtime.NamedJobDefinition;
import io.mantisrx.runtime.command.InvalidJobException;
import io.mantisrx.server.master.domain.DataFormatAdapter;
import io.mantisrx.server.master.domain.IJobClusterDefinition;
import io.mantisrx.server.master.domain.JobClusterConfig;
import io.mantisrx.server.master.domain.JobClusterDefinitionImpl;
import io.mantisrx.server.master.domain.JobDefinition;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.domain.SLA;
import io.mantisrx.server.master.http.api.CompactJobInfo;
import io.mantisrx.server.master.http.api.JobClusterInfo;
import io.mantisrx.server.master.store.MantisJobMetadata;
import io.mantisrx.server.master.store.MantisStageMetadata;
import io.mantisrx.server.master.store.MantisWorkerMetadata;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import io.mantisrx.shaded.com.google.common.base.Strings;
import io.mantisrx.shaded.com.google.common.collect.ImmutableList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class JobClusterProtoAdapter {
// explicit private constructor to prohibit instantiation
private JobClusterProtoAdapter() {}
public static final CreateJobClusterRequest toCreateJobClusterRequest(final NamedJobDefinition njd) {
MantisJobDefinition jd = njd.getJobDefinition();
final CreateJobClusterRequest request = new CreateJobClusterRequest(new JobClusterDefinitionImpl(
jd.getName(),
Arrays.asList(new JobClusterConfig(jd.getJobJarFileLocation().toString(),
System.currentTimeMillis(),
jd.getVersion(),
jd.getSchedulingInfo()
)),
njd.getOwner(),
jd.getUser(),
new SLA(jd.getSlaMin(),
jd.getSlaMax(),
jd.getCronSpec(),
jd.getCronPolicy() == NamedJobDefinition.CronPolicy.KEEP_EXISTING ?
IJobClusterDefinition.CronPolicy.KEEP_EXISTING : IJobClusterDefinition.CronPolicy.KEEP_NEW),
jd.getMigrationConfig(),
jd.getIsReadyForJobMaster(),
jd.getParameters(),
processLabels(jd)
)
, "user"
);
return request;
}
// public static final JobSla toJobSla(final io.mantisrx.master.core.proto.JobSla protoSla) {
// return new JobSla(protoSla.getRuntimeLimitSecs(),
// protoSla.getMinRuntimeSecs(),
// JobSla.StreamSLAType.valueOf(protoSla.getSlaType().name()),
// MantisJobDurationType.valueOf(protoSla.getDurationType().name()),
// protoSla.getUserProvidedType());
// }
//
// public static final MachineDefinition toMachineDefinition(final io.mantisrx.master.core.proto.MachineDefinition md) {
// return new MachineDefinition(md.getCpuCores(),
// md.getMemoryMB(), md.getNetworkMbps(), md.getDiskMB(), md.getNumPorts());
// }
// public static final StageScalingPolicy.Strategy toStageScalingStrategy(final io.mantisrx.master.core.proto.StageScalingPolicy.Strategy s) {
// return new StageScalingPolicy.Strategy(
// StageScalingPolicy.ScalingReason.valueOf(s.getReason().name()),
// s.getScaleDownBelowPct(),
// s.getScaleUpAbovePct(),
// s.hasRollingCount() ?
// new StageScalingPolicy.RollingCount(
// s.getRollingCount().getCount(),
// s.getRollingCount().getOf()) :
// null
// );
// }
// public static final StageScalingPolicy toStageScalingPolicy(final io.mantisrx.master.core.proto.StageScalingPolicy p) {
// return new StageScalingPolicy(
// p.getStage(),
// p.getMin(),
// p.getMax(),
// p.getIncrement(),
// p.getDecrement(),
// p.getCoolDownSecs(),
// p.getStrategiesMap().entrySet().stream().collect(
// Collectors.toMap(
// e -> StageScalingPolicy.ScalingReason.valueOf(e.getKey()),
// e -> toStageScalingStrategy(e.getValue())
// )
// )
// );
// }
//
// private static final StageSchedulingInfo toStageSchedulingInfo(final io.mantisrx.master.core.proto.SchedulingInfo.StageSchedulingInfo s) {
// return new StageSchedulingInfo(
// s.getNumberOfInstances(),
// toMachineDefinition(s.getMachineDefinition()),
// s.getHardConstraintsList().stream().map(c -> JobConstraints.valueOf(c.name())).collect(Collectors.toList()),
// s.getSoftConstraintsList().stream().map(c -> JobConstraints.valueOf(c.name())).collect(Collectors.toList()),
// s.hasScalingPolicy() ? toStageScalingPolicy(s.getScalingPolicy()) : null,
// s.getScalable()
// );
// }
// private static final SchedulingInfo toSchedulingInfo(final io.mantisrx.master.core.proto.SchedulingInfo s) {
//
// return new SchedulingInfo(
// s.getStagesMap().entrySet().stream()
// .collect(Collectors.toMap(e -> e.getKey(),
// e -> toStageSchedulingInfo(e.getValue())))
// );
// }
// private static final WorkerMigrationConfig toMigrationConfig(final io.mantisrx.master.core.proto.WorkerMigrationConfig cfg) {
// return new WorkerMigrationConfig(
// WorkerMigrationConfig.MigrationStrategyEnum.valueOf(cfg.getStrategy().name()),
// cfg.getConfigString()
// );
// }
// public static final MantisJobDefinition toMantisJobDefinition(final JobSubmitRequest jsr) throws MalformedURLException {
//
// return new MantisJobDefinition(jsr.getName(),
// jsr.getUser(),
// new URL(jsr.getUrl()),
// jsr.getVersion(),
// jsr.getParametersList().stream().map(p -> new Parameter(p.getName(), p.getValue())).collect(Collectors.toList()),
// jsr.hasJobSla() ? toJobSla(jsr.getJobSla()) : null,
// jsr.getSubscriptionTimeoutSecs(),
// jsr.hasSchedulingInfo() ? toSchedulingInfo(jsr.getSchedulingInfo()) : null,
// jsr.getSlaMin(),
// jsr.getSlaMax(),
// jsr.getCronSpec(),
// NamedJobDefinition.CronPolicy.valueOf(jsr.getCronPolicy().name()),
// true,
// jsr.hasMigrationConfig() ? toMigrationConfig(jsr.getMigrationConfig()) : WorkerMigrationConfig.DEFAULT,
// jsr.getLabelsList().stream().map(l -> new Label(l.getName(), l.getValue())).collect(Collectors.toList())
// );
// }
public static final UpdateJobClusterRequest toUpdateJobClusterRequest(final NamedJobDefinition njd) {
MantisJobDefinition jd = njd.getJobDefinition();
final UpdateJobClusterRequest request = new UpdateJobClusterRequest(new JobClusterDefinitionImpl(
jd.getName(),
Arrays.asList(new JobClusterConfig(jd.getJobJarFileLocation().toString(),
System.currentTimeMillis(),
jd.getVersion(),
jd.getSchedulingInfo()
)),
njd.getOwner(),
jd.getUser(),
new SLA(jd.getSlaMin(),
jd.getSlaMax(),
jd.getCronSpec(),
jd.getCronPolicy() == NamedJobDefinition.CronPolicy.KEEP_EXISTING ?
IJobClusterDefinition.CronPolicy.KEEP_EXISTING : IJobClusterDefinition.CronPolicy.KEEP_NEW),
jd.getMigrationConfig(),
jd.getIsReadyForJobMaster(),
jd.getParameters(),
processLabels(jd)
),
"user");
return request;
}
// public static final JobClusterManagerProto.SubmitJobRequest toSubmitJobClusterRequest(final SubmitJobRequest jd)
// throws InvalidJobException {
//
// final JobClusterManagerProto.SubmitJobRequest request = new JobClusterManagerProto.SubmitJobRequest(
// jd.getName(),
// jd.getUser(),
// Optional.of(
// new JobDefinition(
// jd.getName(),
// jd.getUser(),
// (DataFormatAdapter.extractArtifactName(jd.getJobJarFileLocation())).orElse(""),
// jd.getVersion(),
// jd.getParametersList().stream().map(p -> new Parameter(p.getName(), p.getValue())).collect(Collectors.toList()),
// jd.hasJobSla() ? toJobSla(jd.getJobSla()) : null,
// jd.getSubscriptionTimeoutSecs(),
// jd.hasSchedulingInfo() ? toSchedulingInfo(jd.getSchedulingInfo()) : null,
// jd.getSchedulingInfo() == null ? -1 : jd.getSchedulingInfo().getStagesMap().size(),
// jd.getLabelsList().stream().map(l -> new Label(l.getName(), l.getValue())).collect(Collectors.toList()))
// ));
//
// return request;
// }
public static final JobClusterManagerProto.SubmitJobRequest toSubmitJobClusterRequest(final MantisJobDefinition jd)
throws InvalidJobException {
final JobClusterManagerProto.SubmitJobRequest request = new JobClusterManagerProto.SubmitJobRequest(
jd.getName(),
jd.getUser(),
new JobDefinition(
jd.getName(),
jd.getUser(),
(DataFormatAdapter.extractArtifactName(jd.getJobJarFileLocation())).orElse(""),
jd.getVersion(),
jd.getParameters(),
jd.getJobSla(),
jd.getSubscriptionTimeoutSecs(),
jd.getSchedulingInfo(),
jd.getSchedulingInfo() == null ? -1 : jd.getSchedulingInfo().getStages().size(),
processLabels(jd),
jd.getDeploymentStrategy()));
return request;
}
public static JobClusterInfo toJobClusterInfo(MantisJobClusterMetadataView jobClusterMetadataView) {
List<JobClusterInfo.JarInfo> jarInfoList = DataFormatAdapter.convertNamedJobJarListToJarInfoList(jobClusterMetadataView.getJars());
JobClusterInfo jobClusterInfo = new JobClusterInfo(jobClusterMetadataView.getName(),
jobClusterMetadataView.getSla(),
jobClusterMetadataView.getOwner(),
jobClusterMetadataView.isDisabled(),
jobClusterMetadataView.isCronActive(),
jarInfoList,
jobClusterMetadataView.getParameters(),
jobClusterMetadataView.getLabels());
return jobClusterInfo;
}
protected static List<Label> processLabels(MantisJobDefinition jd) {
Map<String, Label> labelMap = new HashMap<>();
jd.getLabels().forEach(l -> labelMap.put(l.getName(), l));
if (jd.getDeploymentStrategy() != null &&
!Strings.isNullOrEmpty(jd.getDeploymentStrategy().getResourceClusterId())) {
Label rcLabel = new Label(
SystemLabels.MANTIS_RESOURCE_CLUSTER_NAME_LABEL.label,
jd.getDeploymentStrategy().getResourceClusterId());
labelMap.put(rcLabel.getName(), rcLabel);
}
return ImmutableList.copyOf(labelMap.values());
}
public static class JobIdInfo {
private final String jobId;
private final String version;
private final MantisJobState state;
private final String submittedAt;
private final String terminatedAt;
private final String user;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public JobIdInfo(@JsonProperty("jobId") String jobId,
@JsonProperty("version") String version,
@JsonProperty("state") MantisJobState state,
@JsonProperty("submittedAt") String submittedAt,
@JsonProperty("terminatedAt") String terminatedAt,
@JsonProperty("user") String user) {
this.jobId = jobId;
this.version = version;
this.state = state;
this.submittedAt = submittedAt;
this.terminatedAt = terminatedAt;
this.user = user;
}
public String getJobId() {
return jobId;
}
public String getVersion() {
return version;
}
public MantisJobState getState() {
return state;
}
public String getSubmittedAt() {
return submittedAt;
}
public String getTerminatedAt() {
return terminatedAt;
}
public String getUser() {
return user;
}
@Override
public String toString() {
return "JobIdInfo{" +
"jobId='" + jobId + '\'' +
", version='" + version + '\'' +
", state=" + state +
", submittedAt='" + submittedAt + '\'' +
", terminatedAt='" + terminatedAt + '\'' +
", user='" + user + '\'' +
'}';
}
public static class Builder {
private String jobId;
private String version;
private MantisJobState state;
private String submittedAt = "";
private String terminatedAt = "";
private String user = "";
public Builder() {
}
public Builder withJobIdStr(String jobId) {
this.jobId = jobId;
return this;
}
public Builder withJobId(JobId jId) {
jobId = jId.getId();
return this;
}
public Builder withJobState(JobState st) {
state = toJobState(st);
return this;
}
public Builder withVersion(String version) {
this.version = version;
return this;
}
public Builder withSubmittedAt(long time) {
submittedAt = Long.toString(time);
return this;
}
public Builder withTerminatedAt(long time) {
if(time != -1) {
terminatedAt = Long.toString(time);
}
return this;
}
public Builder withUser(String user) {
this.user = user;
return this;
}
public JobIdInfo build() {
return new JobIdInfo(jobId,version,state,submittedAt,terminatedAt,user);
}
}
}
private static MantisJobState toJobState(final JobState state) {
switch (state) {
case Accepted:
return MantisJobState.Accepted;
case Launched:
return MantisJobState.Launched;
case Terminating_normal:
case Completed:
return MantisJobState.Completed;
case Terminating_abnormal:
case Failed:
return MantisJobState.Failed;
case Noop:
return MantisJobState.Noop;
default:
throw new IllegalArgumentException("cannot translate JobState to MantisJobState " + state);
}
}
public static final CompactJobInfo toCompactJobInfo(final MantisJobMetadataView view) {
MantisJobMetadata jm = view.getJobMetadata();
int workers=0;
double totCPUs = 0.0;
double totMem = 0.0;
Map<String, Integer> stSmry = new HashMap<>();
for (MantisStageMetadata s: view.getStageMetadataList()) {
workers += s.getNumWorkers();
totCPUs += s.getNumWorkers() * s.getMachineDefinition().getCpuCores();
totMem += s.getNumWorkers() * s.getMachineDefinition().getMemoryMB();
}
for (MantisWorkerMetadata w: view.getWorkerMetadataList()) {
final Integer prevVal = stSmry.get(w.getState() + "");
if (prevVal == null) {
stSmry.put(w.getState() + "", 1);
} else {
stSmry.put(w.getState() + "", prevVal + 1);
}
}
return new CompactJobInfo(
jm.getJobId(),
(jm.getJarUrl() != null) ? jm.getJarUrl().toString() : "",
jm.getSubmittedAt(),
view.getTerminatedAt(),
jm.getUser(),
jm.getState(),
jm.getSla() != null ? jm.getSla().getDurationType() : MantisJobDurationType.Transient,
jm.getNumStages(),
workers,
totCPUs,
totMem,
stSmry,
jm.getLabels()
);
}
}
| 8,151 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/proto/JobStatus.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.proto;
import io.mantisrx.server.core.Status;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
public class JobStatus {
private final Status status;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public JobStatus(@JsonProperty("status") final Status status) {
this.status = status;
}
public Status getStatus() {
return status;
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final JobStatus jobStatus = (JobStatus) o;
return Objects.equals(status, jobStatus.status);
}
@Override
public int hashCode() {
return Objects.hash(status);
}
@Override
public String toString() {
return "JobStatus{" +
"status=" + status +
'}';
}
}
| 8,152 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/proto/JobDiscoveryRouteProto.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.proto;
import io.mantisrx.master.jobcluster.proto.BaseResponse;
import io.mantisrx.server.core.JobSchedulingInfo;
import java.util.Optional;
import rx.Observable;
public class JobDiscoveryRouteProto {
public static class SchedInfoResponse extends BaseResponse {
private final Optional<Observable<JobSchedulingInfo>> schedInfoStream;
public SchedInfoResponse(final long requestId,
final ResponseCode responseCode,
final String message,
final Observable<JobSchedulingInfo> schedInfoStream) {
super(requestId, responseCode, message);
this.schedInfoStream = Optional.ofNullable(schedInfoStream);
}
public SchedInfoResponse(final long requestId,
final ResponseCode responseCode,
final String message) {
super(requestId, responseCode, message);
this.schedInfoStream = Optional.empty();
}
public Optional<Observable<JobSchedulingInfo>> getSchedInfoStream() {
return schedInfoStream;
}
}
public static class JobClusterInfoResponse extends BaseResponse {
private final Optional<Observable<JobClusterInfo>> jobClusterInfoObs;
public JobClusterInfoResponse(final long requestId,
final ResponseCode responseCode,
final String message,
final Observable<JobClusterInfo> jobClusterInfoObservable) {
super(requestId, responseCode, message);
this.jobClusterInfoObs = Optional.ofNullable(jobClusterInfoObservable);
}
public JobClusterInfoResponse(final long requestId,
final ResponseCode responseCode,
final String message) {
super(requestId, responseCode, message);
this.jobClusterInfoObs = Optional.empty();
}
public Optional<Observable<JobClusterInfo>> getJobClusterInfoObs() {
return jobClusterInfoObs;
}
}
}
| 8,153 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/utils/JobRouteUtils.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.utils;
import static io.mantisrx.master.api.akka.route.utils.QueryParamUtils.paramValue;
import static io.mantisrx.master.api.akka.route.utils.QueryParamUtils.paramValueAsBool;
import static io.mantisrx.master.api.akka.route.utils.QueryParamUtils.paramValueAsInt;
import static io.mantisrx.master.api.akka.route.utils.QueryParamUtils.paramValuesAsInt;
import static io.mantisrx.master.api.akka.route.utils.QueryParamUtils.paramValuesAsMetaState;
import io.mantisrx.master.jobcluster.job.JobState;
import io.mantisrx.master.jobcluster.job.worker.WorkerHeartbeat;
import io.mantisrx.master.jobcluster.job.worker.WorkerState;
import io.mantisrx.master.jobcluster.job.worker.WorkerStatus;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.server.core.PostJobStatusRequest;
import io.mantisrx.server.core.Status;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.scheduler.WorkerEvent;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class JobRouteUtils {
private static final Logger logger = LoggerFactory.getLogger(JobRouteUtils.class);
public static final String QUERY_PARAM_LIMIT = "limit";
public static final String QUERY_PARAM_JOB_STATE = "jobState";
public static final String QUERY_PARAM_STAGE_NUM = "stageNumber";
public static final String QUERY_PARAM_WORKER_INDEX = "workerIndex";
public static final String QUERY_PARAM_WORKER_NUM = "workerNumber";
public static final String QUERY_PARAM_WORKER_STATE = "workerState";
public static final String QUERY_PARAM_ACTIVE_ONLY = "activeOnly";
public static final String QUERY_PARAM_LABELS_QUERY = "labels";
public static final String QUERY_PARAM_LABELS_OPERAND = "labels.op";
public static WorkerEvent createWorkerStatusRequest(final PostJobStatusRequest req) {
final Status status = req.getStatus();
if (status.getType() != Status.TYPE.HEARTBEAT) {
final WorkerId workerId = new WorkerId(req.getJobId(), status.getWorkerIndex(), status.getWorkerNumber());
if (logger.isTraceEnabled()) {
logger.trace("forwarding worker status type {} from worker {}", status.getType().name(), workerId);
}
return new WorkerStatus(status);
} else {
return new WorkerHeartbeat(status);
}
}
public static JobClusterManagerProto.ListJobsRequest createListJobsRequest(final Map<String, List<String>> params,
final Optional<String> regex,
final boolean activeOnlyDefault) {
if(params == null) {
if (regex.isPresent()) {
return new JobClusterManagerProto.ListJobsRequest(regex.get());
} else {
return new JobClusterManagerProto.ListJobsRequest();
}
}
final Optional<Integer> limit = paramValueAsInt(params, QUERY_PARAM_LIMIT);
final Optional<JobState.MetaState> jobState = paramValue(params, QUERY_PARAM_JOB_STATE).map(p -> JobState.MetaState.valueOf(p));
final List<Integer> stageNumber = paramValuesAsInt(params, QUERY_PARAM_STAGE_NUM);
final List<Integer> workerIndex = paramValuesAsInt(params, QUERY_PARAM_WORKER_INDEX);
final List<Integer> workerNumber = paramValuesAsInt(params, QUERY_PARAM_WORKER_NUM);
final List<WorkerState.MetaState> workerState = paramValuesAsMetaState(params, QUERY_PARAM_WORKER_STATE);
final Optional<Boolean> activeOnly = Optional.of(paramValueAsBool(params, QUERY_PARAM_ACTIVE_ONLY).orElse(activeOnlyDefault));
final Optional<String> labelsQuery = paramValue(params, QUERY_PARAM_LABELS_QUERY);
final Optional<String> labelsOperand = paramValue(params, QUERY_PARAM_LABELS_OPERAND);
return new JobClusterManagerProto.ListJobsRequest(new JobClusterManagerProto.ListJobCriteria(limit,
jobState,
stageNumber,
workerIndex,
workerNumber,
workerState,
activeOnly,
regex,
labelsQuery,
labelsOperand));
}
public static JobClusterManagerProto.ListJobIdsRequest createListJobIdsRequest(final Map<String, List<String>> params, final Optional<String> regex,
final boolean activeOnlyDefault) {
if(params == null) {
return new JobClusterManagerProto.ListJobIdsRequest();
}
final Optional<Integer> limit = paramValueAsInt(params, QUERY_PARAM_LIMIT);
final Optional<JobState.MetaState> jobState = paramValue(params, QUERY_PARAM_JOB_STATE).map(p -> JobState.MetaState.valueOf(p));
// list job ids is used on job cluster detail page, the UI does not set this flag explicitly but expects to see completed jobs as well
final Optional<Boolean> activeOnly = Optional.of(paramValueAsBool(params, QUERY_PARAM_ACTIVE_ONLY).orElse(activeOnlyDefault));
final Optional<String> labelsQuery = paramValue(params, QUERY_PARAM_LABELS_QUERY);
final Optional<String> labelsOperand = paramValue(params, QUERY_PARAM_LABELS_OPERAND);
return new JobClusterManagerProto.ListJobIdsRequest(limit, jobState, activeOnly, regex, labelsQuery, labelsOperand);
}
}
| 8,154 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/utils/JobDiscoveryHeartbeats.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.utils;
import io.mantisrx.master.api.akka.route.proto.JobClusterInfo;
import io.mantisrx.server.core.JobSchedulingInfo;
public class JobDiscoveryHeartbeats {
public static final JobClusterInfo JOB_CLUSTER_INFO_HB_INSTANCE = new JobClusterInfo(JobSchedulingInfo.HB_JobId, null);
public static final JobSchedulingInfo SCHED_INFO_HB_INSTANCE = new JobSchedulingInfo(JobSchedulingInfo.HB_JobId, null);
}
| 8,155 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/utils/StreamingUtils.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.utils;
import static io.mantisrx.master.api.akka.route.utils.JobDiscoveryHeartbeats.JOB_CLUSTER_INFO_HB_INSTANCE;
import static io.mantisrx.master.api.akka.route.utils.JobDiscoveryHeartbeats.SCHED_INFO_HB_INSTANCE;
import akka.http.javadsl.model.sse.ServerSentEvent;
import io.mantisrx.master.api.akka.route.proto.JobClusterInfo;
import io.mantisrx.server.core.JobSchedulingInfo;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class StreamingUtils {
private static final Logger logger = LoggerFactory.getLogger(StreamingUtils.class);
private static final ObjectMapper mapper = new ObjectMapper().configure(
DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
private static volatile Optional<ServerSentEvent> cachedSchedInfoHbEvent = Optional.empty();
private static volatile Optional<ServerSentEvent> cachedJobClusterInfoHbEvent = Optional.empty();
static {
try {
cachedJobClusterInfoHbEvent = Optional.of(ServerSentEvent.create(mapper.writeValueAsString(
JOB_CLUSTER_INFO_HB_INSTANCE)));
cachedSchedInfoHbEvent = Optional.of(ServerSentEvent.create(mapper.writeValueAsString(
SCHED_INFO_HB_INSTANCE)));
} catch (JsonProcessingException e) {
logger.error("Failed to cache serialized Heartbeat event", e);
}
}
public static Optional<ServerSentEvent> from(final JobSchedulingInfo jsi) {
try {
if (jsi.getJobId().equals(JobSchedulingInfo.HB_JobId) && cachedSchedInfoHbEvent.isPresent()) {
return cachedSchedInfoHbEvent;
}
return Optional.ofNullable(ServerSentEvent.create(mapper.writeValueAsString(jsi)));
} catch (JsonProcessingException e) {
logger.warn("failed to serialize Job Scheduling Info {}", jsi);
}
return Optional.empty();
}
public static Optional<ServerSentEvent> from(final JobClusterInfo jci) {
try {
if (jci.getName().equals(JobSchedulingInfo.HB_JobId) &&
cachedJobClusterInfoHbEvent.isPresent()) {
return cachedJobClusterInfoHbEvent;
}
return Optional.ofNullable(ServerSentEvent.create(mapper.writeValueAsString(jci)));
} catch (JsonProcessingException e) {
logger.warn("failed to serialize Job Cluster Info {}", jci);
}
return Optional.empty();
}
}
| 8,156 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/utils/QueryParamUtils.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.utils;
import io.mantisrx.master.jobcluster.job.worker.WorkerState;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class QueryParamUtils {
private static final Logger logger = LoggerFactory.getLogger(QueryParamUtils.class);
public static Optional<String> paramValue(final Map<String, List<String>> params, final String key) {
List<String> values = params.get(key);
return Optional.ofNullable(values)
.filter(vs -> vs.size() > 0)
.map(x -> x.get(0));
}
public static Optional<Integer> paramValueAsInt(final Map<String, List<String>> params, final String key) {
List<String> values = params.get(key);
return Optional.ofNullable(values)
.filter(vs -> vs.size() > 0)
.map(x -> {
try {
return Integer.parseInt(x.get(0));
} catch (NumberFormatException e) {
return null;
}
});
}
public static Optional<Boolean> paramValueAsBool(final Map<String, List<String>> params, final String key) {
List<String> values = params.get(key);
return Optional.ofNullable(values)
.filter(vs -> vs.size() > 0)
.map(x -> {
try {
return Boolean.valueOf(x.get(0));
} catch (NumberFormatException e) {
return null;
}
});
}
public static List<Integer> paramValuesAsInt(final Map<String, List<String>> params, final String key) {
List<String> values = params.get(key);
if (values == null) {
return Collections.emptyList();
} else {
return values.stream().map(s -> {
try {
return Integer.parseInt(s);
} catch (NumberFormatException e) {
return null;
}
}).collect(Collectors.toList());
}
}
public static List<WorkerState.MetaState> paramValuesAsMetaState(final Map<String, List<String>> params, final String key) {
List<String> values = params.get(key);
if (values != null) {
return new ArrayList<>(values.stream()
.map(s -> WorkerState.MetaState.valueOf(s))
.collect(Collectors.toSet()));
} else {
return Collections.emptyList();
}
}
}
| 8,157 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/handlers/JobArtifactRouteHandlerImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.handlers;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SERVER_ERROR;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS;
import io.mantisrx.master.jobcluster.proto.JobArtifactProto;
import io.mantisrx.server.core.domain.JobArtifact;
import io.mantisrx.server.master.persistence.IMantisPersistenceProvider;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class JobArtifactRouteHandlerImpl implements JobArtifactRouteHandler {
private final IMantisPersistenceProvider mantisStorageProvider;
public JobArtifactRouteHandlerImpl(IMantisPersistenceProvider mantisStorageProvider) {
this.mantisStorageProvider = mantisStorageProvider;
}
@Override
public CompletionStage<JobArtifactProto.SearchJobArtifactsResponse> search(JobArtifactProto.SearchJobArtifactsRequest request) {
return CompletableFuture.supplyAsync(() -> {
try {
final List<JobArtifact> jobArtifactList = mantisStorageProvider.listJobArtifacts(request.getName(), request.getVersion());
return new JobArtifactProto.SearchJobArtifactsResponse(request.requestId, SUCCESS, "", jobArtifactList);
} catch (IOException e) {
log.warn("Error while fetching job artifacts. Traceback: {}", e.getMessage(), e);
return new JobArtifactProto.SearchJobArtifactsResponse(request.requestId, SERVER_ERROR, e.getMessage(), Collections.emptyList());
}
});
}
@Override
public CompletionStage<JobArtifactProto.ListJobArtifactsByNameResponse> listArtifactsByName(JobArtifactProto.ListJobArtifactsByNameRequest request) {
return CompletableFuture.supplyAsync(() -> {
try {
final List<String> artifactNames = mantisStorageProvider.listJobArtifactsByName(request.getPrefix(), request.getContains());
return new JobArtifactProto.ListJobArtifactsByNameResponse(request.requestId, SUCCESS, "", artifactNames);
} catch (IOException e) {
log.warn("Error while searching job artifact names. Traceback: {}", e.getMessage(), e);
return new JobArtifactProto.ListJobArtifactsByNameResponse(request.requestId, SERVER_ERROR, e.getMessage(), Collections.emptyList());
}
});
}
@Override
public CompletionStage<JobArtifactProto.UpsertJobArtifactResponse> upsert(JobArtifactProto.UpsertJobArtifactRequest request) {
final JobArtifact jobArtifact = request.getJobArtifact();
return CompletableFuture.supplyAsync(() -> {
try {
mantisStorageProvider.addNewJobArtifact(jobArtifact);
return new JobArtifactProto.UpsertJobArtifactResponse(request.requestId, SUCCESS, "", jobArtifact.getArtifactID());
} catch (IOException e) {
log.warn("Error while storing new job artifact. Traceback: {}", e.getMessage(), e);
return new JobArtifactProto.UpsertJobArtifactResponse(request.requestId, SERVER_ERROR, e.getMessage(), jobArtifact.getArtifactID());
}
});
}
}
| 8,158 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/handlers/ResourceClusterRouteHandler.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.handlers;
import io.mantisrx.master.resourcecluster.proto.GetResourceClusterSpecRequest;
import io.mantisrx.master.resourcecluster.proto.ListResourceClusterRequest;
import io.mantisrx.master.resourcecluster.proto.ProvisionResourceClusterRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterAPIProto.DeleteResourceClusterResponse;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterAPIProto.GetResourceClusterResponse;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterAPIProto.ListResourceClustersResponse;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleRuleProto.CreateAllResourceClusterScaleRulesRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleRuleProto.CreateResourceClusterScaleRuleRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleRuleProto.GetResourceClusterScaleRulesRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleRuleProto.GetResourceClusterScaleRulesResponse;
import io.mantisrx.master.resourcecluster.proto.ScaleResourceRequest;
import io.mantisrx.master.resourcecluster.proto.ScaleResourceResponse;
import io.mantisrx.master.resourcecluster.proto.UpgradeClusterContainersRequest;
import io.mantisrx.master.resourcecluster.proto.UpgradeClusterContainersResponse;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import java.util.concurrent.CompletionStage;
public interface ResourceClusterRouteHandler {
CompletionStage<ListResourceClustersResponse> get(final ListResourceClusterRequest request);
CompletionStage<GetResourceClusterResponse> create(final ProvisionResourceClusterRequest request);
CompletionStage<DeleteResourceClusterResponse> delete(final ClusterID clusterId);
CompletionStage<GetResourceClusterResponse> get(final GetResourceClusterSpecRequest request);
CompletionStage<ScaleResourceResponse> scale(final ScaleResourceRequest request);
CompletionStage<UpgradeClusterContainersResponse> upgrade(final UpgradeClusterContainersRequest request);
CompletionStage<GetResourceClusterScaleRulesResponse> createSingleScaleRule(
CreateResourceClusterScaleRuleRequest request);
CompletionStage<GetResourceClusterScaleRulesResponse> createAllScaleRule(
CreateAllResourceClusterScaleRulesRequest rule);
CompletionStage<GetResourceClusterScaleRulesResponse> getClusterScaleRules(
GetResourceClusterScaleRulesRequest request);
}
| 8,159 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/handlers/JobArtifactRouteHandler.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.handlers;
import io.mantisrx.master.jobcluster.proto.JobArtifactProto;
import java.util.concurrent.CompletionStage;
public interface JobArtifactRouteHandler {
/**
* Upsert given job artifact to the metadata store.
*/
CompletionStage<JobArtifactProto.UpsertJobArtifactResponse> upsert(final JobArtifactProto.UpsertJobArtifactRequest request);
/**
* Search job artifacts with given name an optionally given version.
* If version is not provided the result will contain the list of
* all job artifacts matching the name.
*/
CompletionStage<JobArtifactProto.SearchJobArtifactsResponse> search(final JobArtifactProto.SearchJobArtifactsRequest request);
/**
* Search job artifact names by given prefix. Returns only the names for faster lookups.
*/
CompletionStage<JobArtifactProto.ListJobArtifactsByNameResponse> listArtifactsByName(final JobArtifactProto.ListJobArtifactsByNameRequest request);
}
| 8,160 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/handlers/JobClusterRouteHandler.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.handlers;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateSchedulingInfoRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateSchedulingInfoResponse;
import java.util.concurrent.CompletionStage;
public interface JobClusterRouteHandler {
CompletionStage<JobClusterManagerProto.CreateJobClusterResponse> create(final JobClusterManagerProto.CreateJobClusterRequest request);
CompletionStage<JobClusterManagerProto.UpdateJobClusterResponse> update(final JobClusterManagerProto.UpdateJobClusterRequest request);
CompletionStage<JobClusterManagerProto.DeleteJobClusterResponse> delete(final JobClusterManagerProto.DeleteJobClusterRequest request);
CompletionStage<JobClusterManagerProto.DisableJobClusterResponse> disable(final JobClusterManagerProto.DisableJobClusterRequest request);
CompletionStage<JobClusterManagerProto.EnableJobClusterResponse> enable(final JobClusterManagerProto.EnableJobClusterRequest request);
CompletionStage<JobClusterManagerProto.UpdateJobClusterArtifactResponse> updateArtifact(final JobClusterManagerProto.UpdateJobClusterArtifactRequest request);
CompletionStage<UpdateSchedulingInfoResponse> updateSchedulingInfo(
String clusterName,
final UpdateSchedulingInfoRequest request);
CompletionStage<JobClusterManagerProto.UpdateJobClusterSLAResponse> updateSLA(final JobClusterManagerProto.UpdateJobClusterSLARequest request);
CompletionStage<JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyResponse> updateWorkerMigrateStrategy(final JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyRequest request);
CompletionStage<JobClusterManagerProto.UpdateJobClusterLabelsResponse> updateLabels(final JobClusterManagerProto.UpdateJobClusterLabelsRequest request);
CompletionStage<JobClusterManagerProto.SubmitJobResponse> submit(final JobClusterManagerProto.SubmitJobRequest request);
CompletionStage<JobClusterManagerProto.GetJobClusterResponse> getJobClusterDetails(final JobClusterManagerProto.GetJobClusterRequest request);
CompletionStage<JobClusterManagerProto.GetLatestJobDiscoveryInfoResponse> getLatestJobDiscoveryInfo(final JobClusterManagerProto.GetLatestJobDiscoveryInfoRequest request);
CompletionStage<JobClusterManagerProto.ListJobClustersResponse> getAllJobClusters(final JobClusterManagerProto.ListJobClustersRequest request);
}
| 8,161 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/handlers/JobClusterRouteHandlerAkkaImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.handlers;
import static akka.pattern.PatternsCS.ask;
import akka.actor.ActorRef;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.master.JobClustersManagerActor.UpdateSchedulingInfo;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateSchedulingInfoRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateSchedulingInfoResponse;
import io.mantisrx.server.master.config.ConfigurationProvider;
import java.time.Duration;
import java.util.Optional;
import java.util.concurrent.CompletionStage;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class JobClusterRouteHandlerAkkaImpl implements JobClusterRouteHandler {
private static final Logger logger = LoggerFactory.getLogger(JobClusterRouteHandlerAkkaImpl.class);
private final ActorRef jobClustersManagerActor;
private final Counter allJobClustersGET;
private final Duration timeout;
public JobClusterRouteHandlerAkkaImpl(ActorRef jobClusterManagerActor) {
this.jobClustersManagerActor = jobClusterManagerActor;
long timeoutMs = Optional.ofNullable(ConfigurationProvider.getConfig().getMasterApiAskTimeoutMs()).orElse(1000L);
this.timeout = Duration.ofMillis(timeoutMs);
Metrics m = new Metrics.Builder()
.id("JobClusterRouteHandler")
.addCounter("allJobClustersGET")
.build();
Metrics metrics = MetricsRegistry.getInstance().registerAndGet(m);
allJobClustersGET = metrics.getCounter("allJobClustersGET");
}
@Override
public CompletionStage<JobClusterManagerProto.CreateJobClusterResponse> create(final JobClusterManagerProto.CreateJobClusterRequest request) {
CompletionStage<JobClusterManagerProto.CreateJobClusterResponse> response = ask(jobClustersManagerActor, request, timeout)
.thenApply(JobClusterManagerProto.CreateJobClusterResponse.class::cast);
return response;
}
@Override
public CompletionStage<JobClusterManagerProto.UpdateJobClusterResponse> update(JobClusterManagerProto.UpdateJobClusterRequest request) {
CompletionStage<JobClusterManagerProto.UpdateJobClusterResponse> response = ask(jobClustersManagerActor, request, timeout)
.thenApply(JobClusterManagerProto.UpdateJobClusterResponse.class::cast);
return response;
}
@Override
public CompletionStage<JobClusterManagerProto.DeleteJobClusterResponse> delete(JobClusterManagerProto.DeleteJobClusterRequest request) {
CompletionStage<JobClusterManagerProto.DeleteJobClusterResponse> response = ask(jobClustersManagerActor, request, timeout)
.thenApply(JobClusterManagerProto.DeleteJobClusterResponse.class::cast);
return response;
}
@Override
public CompletionStage<JobClusterManagerProto.DisableJobClusterResponse> disable(JobClusterManagerProto.DisableJobClusterRequest request) {
CompletionStage<JobClusterManagerProto.DisableJobClusterResponse> response = ask(jobClustersManagerActor, request, timeout)
.thenApply(JobClusterManagerProto.DisableJobClusterResponse.class::cast);
return response;
}
@Override
public CompletionStage<JobClusterManagerProto.EnableJobClusterResponse> enable(JobClusterManagerProto.EnableJobClusterRequest request) {
CompletionStage<JobClusterManagerProto.EnableJobClusterResponse> response = ask(jobClustersManagerActor, request, timeout)
.thenApply(JobClusterManagerProto.EnableJobClusterResponse.class::cast);
return response;
}
@Override
public CompletionStage<JobClusterManagerProto.UpdateJobClusterArtifactResponse> updateArtifact(JobClusterManagerProto.UpdateJobClusterArtifactRequest request) {
CompletionStage<JobClusterManagerProto.UpdateJobClusterArtifactResponse> response = ask(jobClustersManagerActor, request, timeout)
.thenApply(JobClusterManagerProto.UpdateJobClusterArtifactResponse.class::cast);
return response;
}
@Override
public CompletionStage<UpdateSchedulingInfoResponse> updateSchedulingInfo(String clusterName, UpdateSchedulingInfoRequest request) {
CompletionStage<UpdateSchedulingInfoResponse> response =
ask(
jobClustersManagerActor,
new UpdateSchedulingInfo(request.requestId, clusterName, request.getSchedulingInfo(),
request.getVersion()),
timeout)
.thenApply(UpdateSchedulingInfoResponse.class::cast);
return response;
}
@Override
public CompletionStage<JobClusterManagerProto.UpdateJobClusterSLAResponse> updateSLA(JobClusterManagerProto.UpdateJobClusterSLARequest request) {
CompletionStage<JobClusterManagerProto.UpdateJobClusterSLAResponse> response = ask(jobClustersManagerActor, request, timeout)
.thenApply(JobClusterManagerProto.UpdateJobClusterSLAResponse.class::cast);
return response;
}
@Override
public CompletionStage<JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyResponse> updateWorkerMigrateStrategy(JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyRequest request) {
CompletionStage<JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyResponse> response = ask(jobClustersManagerActor, request, timeout)
.thenApply(JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyResponse.class::cast);
return response;
}
@Override
public CompletionStage<JobClusterManagerProto.UpdateJobClusterLabelsResponse> updateLabels(JobClusterManagerProto.UpdateJobClusterLabelsRequest request) {
CompletionStage<JobClusterManagerProto.UpdateJobClusterLabelsResponse> response = ask(jobClustersManagerActor, request, timeout)
.thenApply(JobClusterManagerProto.UpdateJobClusterLabelsResponse.class::cast);
return response;
}
@Override
public CompletionStage<JobClusterManagerProto.SubmitJobResponse> submit(JobClusterManagerProto.SubmitJobRequest request) {
CompletionStage<JobClusterManagerProto.SubmitJobResponse> response = ask(jobClustersManagerActor, request, timeout)
.thenApply(JobClusterManagerProto.SubmitJobResponse.class::cast);
return response;
}
@Override
public CompletionStage<JobClusterManagerProto.GetJobClusterResponse> getJobClusterDetails(JobClusterManagerProto.GetJobClusterRequest request) {
CompletionStage<JobClusterManagerProto.GetJobClusterResponse> response = ask(jobClustersManagerActor, request, timeout)
.thenApply(JobClusterManagerProto.GetJobClusterResponse.class::cast);
return response;
}
@Override
public CompletionStage<JobClusterManagerProto.ListJobClustersResponse> getAllJobClusters(JobClusterManagerProto.ListJobClustersRequest request) {
allJobClustersGET.increment();
CompletionStage<JobClusterManagerProto.ListJobClustersResponse> response = ask(jobClustersManagerActor, request, timeout)
.thenApply(JobClusterManagerProto.ListJobClustersResponse.class::cast);
return response;
}
@Override
public CompletionStage<JobClusterManagerProto.GetLatestJobDiscoveryInfoResponse> getLatestJobDiscoveryInfo(JobClusterManagerProto.GetLatestJobDiscoveryInfoRequest request) {
CompletionStage<JobClusterManagerProto.GetLatestJobDiscoveryInfoResponse> response = ask(jobClustersManagerActor, request, timeout)
.thenApply(JobClusterManagerProto.GetLatestJobDiscoveryInfoResponse.class::cast);
return response; }
}
| 8,162 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/handlers/ResourceClusterRouteHandlerAkkaImpl.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.handlers;
import static akka.pattern.Patterns.ask;
import akka.actor.ActorRef;
import io.mantisrx.master.resourcecluster.proto.GetResourceClusterSpecRequest;
import io.mantisrx.master.resourcecluster.proto.ListResourceClusterRequest;
import io.mantisrx.master.resourcecluster.proto.ProvisionResourceClusterRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterAPIProto.DeleteResourceClusterRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterAPIProto.DeleteResourceClusterResponse;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterAPIProto.GetResourceClusterResponse;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterAPIProto.ListResourceClustersResponse;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleRuleProto.CreateAllResourceClusterScaleRulesRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleRuleProto.CreateResourceClusterScaleRuleRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleRuleProto.GetResourceClusterScaleRulesRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleRuleProto.GetResourceClusterScaleRulesResponse;
import io.mantisrx.master.resourcecluster.proto.ScaleResourceRequest;
import io.mantisrx.master.resourcecluster.proto.ScaleResourceResponse;
import io.mantisrx.master.resourcecluster.proto.UpgradeClusterContainersRequest;
import io.mantisrx.master.resourcecluster.proto.UpgradeClusterContainersResponse;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import java.time.Duration;
import java.util.concurrent.CompletionStage;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class ResourceClusterRouteHandlerAkkaImpl implements ResourceClusterRouteHandler {
private final ActorRef resourceClustersHostManagerActor;
private final Duration timeout;
private final Duration longOperationTimeout;
public ResourceClusterRouteHandlerAkkaImpl(ActorRef resourceClustersHostManagerActor) {
this.resourceClustersHostManagerActor = resourceClustersHostManagerActor;
this.timeout = Duration.ofMillis(ConfigurationProvider.getConfig().getMasterApiAskTimeoutMs());
this.longOperationTimeout = Duration.ofMillis(
ConfigurationProvider.getConfig().getMasterApiLongOperationAskTimeoutMs());
}
@Override
public CompletionStage<ListResourceClustersResponse> get(ListResourceClusterRequest request) {
CompletionStage<ListResourceClustersResponse> response =
ask(this.resourceClustersHostManagerActor, request, timeout)
.thenApply(ListResourceClustersResponse.class::cast);
return response;
}
@Override
public CompletionStage<GetResourceClusterResponse> create(
ProvisionResourceClusterRequest request) {
CompletionStage<GetResourceClusterResponse> response =
ask(this.resourceClustersHostManagerActor, request, timeout)
.thenApply(GetResourceClusterResponse.class::cast);
return response;
}
@Override
public CompletionStage<DeleteResourceClusterResponse> delete(ClusterID clusterId) {
CompletionStage<DeleteResourceClusterResponse> response =
ask(this.resourceClustersHostManagerActor,
DeleteResourceClusterRequest.builder().clusterId(clusterId).build(),
timeout)
.thenApply(DeleteResourceClusterResponse.class::cast);
return response;
}
@Override
public CompletionStage<GetResourceClusterResponse> get(GetResourceClusterSpecRequest request) {
CompletionStage<GetResourceClusterResponse> response =
ask(this.resourceClustersHostManagerActor, request, timeout)
.thenApply(GetResourceClusterResponse.class::cast);
return response;
}
@Override
public CompletionStage<ScaleResourceResponse> scale(ScaleResourceRequest request) {
CompletionStage<ScaleResourceResponse> response =
ask(this.resourceClustersHostManagerActor, request, this.longOperationTimeout)
.thenApply(ScaleResourceResponse.class::cast);
return response;
}
@Override
public CompletionStage<UpgradeClusterContainersResponse> upgrade(UpgradeClusterContainersRequest request) {
CompletionStage<UpgradeClusterContainersResponse> response =
ask(this.resourceClustersHostManagerActor, request, this.longOperationTimeout)
.thenApply(UpgradeClusterContainersResponse.class::cast);
return response;
}
@Override
public CompletionStage<GetResourceClusterScaleRulesResponse> createSingleScaleRule(
CreateResourceClusterScaleRuleRequest request) {
return ask(this.resourceClustersHostManagerActor, request, timeout)
.thenApply(GetResourceClusterScaleRulesResponse.class::cast);
}
@Override
public CompletionStage<GetResourceClusterScaleRulesResponse> createAllScaleRule(
CreateAllResourceClusterScaleRulesRequest request) {
return ask(this.resourceClustersHostManagerActor, request, timeout)
.thenApply(GetResourceClusterScaleRulesResponse.class::cast);
}
@Override
public CompletionStage<GetResourceClusterScaleRulesResponse> getClusterScaleRules(
GetResourceClusterScaleRulesRequest request) {
return ask(this.resourceClustersHostManagerActor, request, timeout)
.thenApply(GetResourceClusterScaleRulesResponse.class::cast);
}
}
| 8,163 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/handlers/JobStatusRouteHandlerAkkaImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.handlers;
import akka.NotUsed;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.http.javadsl.model.ws.Message;
import akka.http.scaladsl.model.ws.TextMessage;
import akka.stream.OverflowStrategy;
import akka.stream.javadsl.Flow;
import akka.stream.javadsl.Sink;
import akka.stream.javadsl.Source;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.master.api.akka.route.proto.JobStatus;
import io.mantisrx.master.events.JobStatusConnectedWSActor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class JobStatusRouteHandlerAkkaImpl implements JobStatusRouteHandler {
private static final Logger logger = LoggerFactory.getLogger(JobStatusRouteHandlerAkkaImpl.class);
private final ActorRef statusEventBrokerActor;
private final ActorSystem actorSystem;
public JobStatusRouteHandlerAkkaImpl(final ActorSystem actorSystem,
final ActorRef statusEventBrokerActor) {
this.actorSystem = actorSystem;
this.statusEventBrokerActor = statusEventBrokerActor;
}
/**
* Based on https://markatta.com/codemonkey/blog/2016/04/18/chat-with-akka-http-websockets/
* @param jobId job for which job status is requested
* @return a flow that ignores the incoming messages from the WS client, and
* creates a akka Source to emit a stream of JobStatus messages to the WS client
*/
@Override
public Flow<Message, Message, NotUsed> jobStatus(final String jobId) {
ActorRef jobStatusConnectedWSActor = actorSystem.actorOf(JobStatusConnectedWSActor.props(jobId, statusEventBrokerActor),
"JobStatusConnectedWSActor-" + jobId + "-" + System.currentTimeMillis());
Sink<Message, NotUsed> incomingMessagesIgnored = Flow.<Message>create().to(Sink.ignore());
Source<Message, NotUsed> backToWebSocket = Source.<JobStatus>actorRef(100, OverflowStrategy.dropHead())
.mapMaterializedValue((ActorRef outgoingActor) -> {
jobStatusConnectedWSActor.tell(
new JobStatusConnectedWSActor.Connected(outgoingActor),
ActorRef.noSender()
);
return NotUsed.getInstance();
})
.map(js -> new TextMessage.Strict(Jackson.toJson(js)));
return Flow.fromSinkAndSource(incomingMessagesIgnored, backToWebSocket);
}
}
| 8,164 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/handlers/JobDiscoveryRouteHandler.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.handlers;
import static io.mantisrx.master.api.akka.route.proto.JobDiscoveryRouteProto.SchedInfoResponse;
import io.mantisrx.master.api.akka.route.proto.JobDiscoveryRouteProto;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import java.util.concurrent.CompletionStage;
public interface JobDiscoveryRouteHandler {
CompletionStage<SchedInfoResponse> schedulingInfoStream(final JobClusterManagerProto.GetJobSchedInfoRequest request,
final boolean sendHeartbeats);
CompletionStage<JobDiscoveryRouteProto.JobClusterInfoResponse> lastSubmittedJobIdStream(final JobClusterManagerProto.GetLastSubmittedJobIdStreamRequest request,
final boolean sendHeartbeats);
}
| 8,165 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/handlers/JobDiscoveryRouteHandlerAkkaImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.handlers;
import static akka.pattern.PatternsCS.ask;
import static io.mantisrx.master.api.akka.route.utils.JobDiscoveryHeartbeats.JOB_CLUSTER_INFO_HB_INSTANCE;
import static io.mantisrx.master.api.akka.route.utils.JobDiscoveryHeartbeats.SCHED_INFO_HB_INSTANCE;
import akka.actor.ActorRef;
import com.github.benmanes.caffeine.cache.AsyncLoadingCache;
import com.github.benmanes.caffeine.cache.Caffeine;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.master.api.akka.route.proto.JobClusterInfo;
import io.mantisrx.master.api.akka.route.proto.JobDiscoveryRouteProto;
import io.mantisrx.master.jobcluster.proto.BaseResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobSchedInfoRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobSchedInfoResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLastSubmittedJobIdStreamRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLastSubmittedJobIdStreamResponse;
import io.mantisrx.server.core.JobSchedulingInfo;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.domain.JobId;
import java.time.Duration;
import java.util.HashMap;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.subjects.BehaviorSubject;
public class JobDiscoveryRouteHandlerAkkaImpl implements JobDiscoveryRouteHandler {
private static final Logger logger = LoggerFactory.getLogger(JobDiscoveryRouteHandlerAkkaImpl.class);
private final ActorRef jobClustersManagerActor;
private final Duration askTimeout;
// We want to heartbeat at least once before the idle conn timeout to keep the SSE stream conn alive
private final Duration serverIdleConnectionTimeout;
private final Counter schedInfoStreamErrors;
private final Counter lastSubmittedJobIdStreamErrors;
private final AsyncLoadingCache<GetJobSchedInfoRequest, GetJobSchedInfoResponse> schedInfoCache;
private final AsyncLoadingCache<GetLastSubmittedJobIdStreamRequest, GetLastSubmittedJobIdStreamResponse> lastSubmittedJobIdStreamRespCache;
public JobDiscoveryRouteHandlerAkkaImpl(ActorRef jobClustersManagerActor, Duration serverIdleTimeout) {
this.jobClustersManagerActor = jobClustersManagerActor;
long timeoutMs = Optional.ofNullable(ConfigurationProvider.getConfig().getMasterApiAskTimeoutMs()).orElse(1000L);
this.askTimeout = Duration.ofMillis(timeoutMs);
this.serverIdleConnectionTimeout = serverIdleTimeout;
schedInfoCache = Caffeine.newBuilder()
.expireAfterWrite(5, TimeUnit.SECONDS)
.maximumSize(500)
.buildAsync(this::jobSchedInfo);
lastSubmittedJobIdStreamRespCache = Caffeine.newBuilder()
.expireAfterWrite(5, TimeUnit.SECONDS)
.maximumSize(500)
.buildAsync(this::lastSubmittedJobId);
Metrics m = new Metrics.Builder()
.id("JobDiscoveryRouteHandlerAkkaImpl")
.addCounter("schedInfoStreamErrors")
.addCounter("lastSubmittedJobIdStreamErrors")
.build();
this.schedInfoStreamErrors = m.getCounter("schedInfoStreamErrors");
this.lastSubmittedJobIdStreamErrors = m.getCounter("lastSubmittedJobIdStreamErrors");
}
private CompletableFuture<GetJobSchedInfoResponse> jobSchedInfo(final GetJobSchedInfoRequest request, Executor executor) {
return ask(jobClustersManagerActor, request, askTimeout)
.thenApply(GetJobSchedInfoResponse.class::cast)
.toCompletableFuture();
}
@Override
public CompletionStage<JobDiscoveryRouteProto.SchedInfoResponse> schedulingInfoStream(final GetJobSchedInfoRequest request,
final boolean sendHeartbeats) {
CompletionStage<GetJobSchedInfoResponse> response = schedInfoCache.get(request);
try {
AtomicBoolean isJobCompleted = new AtomicBoolean(false);
final String jobId = request.getJobId().getId();
final JobSchedulingInfo completedJobSchedulingInfo = new JobSchedulingInfo(jobId, new HashMap<>());
CompletionStage<JobDiscoveryRouteProto.SchedInfoResponse> jobSchedInfoObsCS = response
.thenApply(getJobSchedInfoResp -> {
Optional<BehaviorSubject<JobSchedulingInfo>> jobStatusSubjectO = getJobSchedInfoResp.getJobSchedInfoSubject();
if (getJobSchedInfoResp.responseCode.equals(BaseResponse.ResponseCode.SUCCESS) && jobStatusSubjectO.isPresent()) {
BehaviorSubject<JobSchedulingInfo> jobSchedulingInfoObs = jobStatusSubjectO.get();
Observable<JobSchedulingInfo> heartbeats =
Observable.interval(5, serverIdleConnectionTimeout.getSeconds() - 1, TimeUnit.SECONDS)
.map(x -> {
if(!isJobCompleted.get()) {
return SCHED_INFO_HB_INSTANCE;
} else {
return completedJobSchedulingInfo;
}
})
.takeWhile(x -> sendHeartbeats == true);
// Job SchedulingInfo obs completes on job shutdown. Use the do On completed as a signal to inform the user that there are no workers to connect to.
// TODO For future a more explicit key in the payload saying the job is completed.
Observable<JobSchedulingInfo> jobSchedulingInfoWithHBObs = Observable.merge(jobSchedulingInfoObs.doOnCompleted(() -> isJobCompleted.set(true)), heartbeats);
return new JobDiscoveryRouteProto.SchedInfoResponse(
getJobSchedInfoResp.requestId,
getJobSchedInfoResp.responseCode,
getJobSchedInfoResp.message,
jobSchedulingInfoWithHBObs
);
} else {
logger.info("Failed to get Sched info stream for {}", request.getJobId().getId());
schedInfoStreamErrors.increment();
return new JobDiscoveryRouteProto.SchedInfoResponse(
getJobSchedInfoResp.requestId,
getJobSchedInfoResp.responseCode,
getJobSchedInfoResp.message
);
}
});
return jobSchedInfoObsCS;
} catch (Exception e) {
logger.error("caught exception fetching sched info stream for {}", request.getJobId().getId(), e);
schedInfoStreamErrors.increment();
return CompletableFuture.completedFuture(new JobDiscoveryRouteProto.SchedInfoResponse(
0,
BaseResponse.ResponseCode.SERVER_ERROR,
"Failed to get SchedulingInfo stream for jobId " + request.getJobId().getId() + " error: " + e.getMessage()
));
}
}
private CompletableFuture<GetLastSubmittedJobIdStreamResponse> lastSubmittedJobId(final GetLastSubmittedJobIdStreamRequest request, Executor executor) {
return ask(jobClustersManagerActor, request, askTimeout)
.thenApply(GetLastSubmittedJobIdStreamResponse.class::cast)
.toCompletableFuture();
}
@Override
public CompletionStage<JobDiscoveryRouteProto.JobClusterInfoResponse> lastSubmittedJobIdStream(final GetLastSubmittedJobIdStreamRequest request,
final boolean sendHeartbeats) {
CompletionStage<GetLastSubmittedJobIdStreamResponse> response = lastSubmittedJobIdStreamRespCache.get(request);
try {
return response
.thenApply(lastSubmittedJobIdResp -> {
Optional<BehaviorSubject<JobId>> jobIdSubjectO = lastSubmittedJobIdResp.getjobIdBehaviorSubject();
if (lastSubmittedJobIdResp.responseCode.equals(BaseResponse.ResponseCode.SUCCESS) && jobIdSubjectO.isPresent()) {
Observable<JobClusterInfo> jobClusterInfoObs = jobIdSubjectO.get().map(jobId -> new JobClusterInfo(jobId.getCluster(), jobId.getId()));
Observable<JobClusterInfo> heartbeats =
Observable.interval(5, serverIdleConnectionTimeout.getSeconds() - 1, TimeUnit.SECONDS)
.map(x -> JOB_CLUSTER_INFO_HB_INSTANCE)
.takeWhile(x -> sendHeartbeats == true);
Observable<JobClusterInfo> jobClusterInfoWithHB = Observable.merge(jobClusterInfoObs, heartbeats);
return new JobDiscoveryRouteProto.JobClusterInfoResponse(
lastSubmittedJobIdResp.requestId,
lastSubmittedJobIdResp.responseCode,
lastSubmittedJobIdResp.message,
jobClusterInfoWithHB
);
} else {
logger.info("Failed to get lastSubmittedJobId stream for job cluster {}", request.getClusterName());
lastSubmittedJobIdStreamErrors.increment();
return new JobDiscoveryRouteProto.JobClusterInfoResponse(
lastSubmittedJobIdResp.requestId,
lastSubmittedJobIdResp.responseCode,
lastSubmittedJobIdResp.message
);
}
});
} catch (Exception e) {
logger.error("caught exception fetching lastSubmittedJobId stream for {}", request.getClusterName(), e);
lastSubmittedJobIdStreamErrors.increment();
return CompletableFuture.completedFuture(new JobDiscoveryRouteProto.JobClusterInfoResponse(
0,
BaseResponse.ResponseCode.SERVER_ERROR,
"Failed to get last submitted jobId stream for " + request.getClusterName() + " error: " + e.getMessage()
));
}
}
}
| 8,166 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/handlers/JobRouteHandler.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.handlers;
import io.mantisrx.master.jobcluster.proto.BaseResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.server.master.scheduler.WorkerEvent;
import java.util.concurrent.CompletionStage;
public interface JobRouteHandler {
CompletionStage<JobClusterManagerProto.KillJobResponse> kill(final JobClusterManagerProto.KillJobRequest request);
CompletionStage<JobClusterManagerProto.ResubmitWorkerResponse> resubmitWorker(final JobClusterManagerProto.ResubmitWorkerRequest request);
CompletionStage<JobClusterManagerProto.ScaleStageResponse> scaleStage(final JobClusterManagerProto.ScaleStageRequest request);
CompletionStage<BaseResponse> workerStatus(final WorkerEvent event);
//TODO CompletionStage<JobClusterManagerProto.ScaleStageResponse> updateScalingPolicy(final JobClusterManagerProto.Update request);
CompletionStage<JobClusterManagerProto.GetJobDetailsResponse> getJobDetails(final JobClusterManagerProto.GetJobDetailsRequest request);
CompletionStage<JobClusterManagerProto.ListJobsResponse> listJobs(final JobClusterManagerProto.ListJobsRequest request);
CompletionStage<JobClusterManagerProto.ListJobIdsResponse> listJobIds(final JobClusterManagerProto.ListJobIdsRequest request);
CompletionStage<JobClusterManagerProto.ListArchivedWorkersResponse> listArchivedWorkers(final JobClusterManagerProto.ListArchivedWorkersRequest request);
}
| 8,167 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/handlers/JobStatusRouteHandler.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.handlers;
import akka.NotUsed;
import akka.http.javadsl.model.ws.Message;
import akka.stream.javadsl.Flow;
public interface JobStatusRouteHandler {
Flow<Message, Message, NotUsed> jobStatus(final String jobId);
}
| 8,168 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/api/akka/route/handlers/JobRouteHandlerAkkaImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.handlers;
import static akka.pattern.PatternsCS.ask;
import akka.actor.ActorRef;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.master.jobcluster.proto.BaseResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.scheduler.WorkerEvent;
import java.time.Duration;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class JobRouteHandlerAkkaImpl implements JobRouteHandler {
private static final Logger logger = LoggerFactory.getLogger(JobRouteHandlerAkkaImpl.class);
private final ActorRef jobClustersManagerActor;
private final Counter listAllJobs;
private final Counter listJobIds;
private final Counter listArchivedWorkers;
private final Duration timeout;
public JobRouteHandlerAkkaImpl(ActorRef jobClusterManagerActor) {
this.jobClustersManagerActor = jobClusterManagerActor;
long timeoutMs = Optional.ofNullable(ConfigurationProvider.getConfig().getMasterApiAskTimeoutMs()).orElse(1000L);
this.timeout = Duration.ofMillis(timeoutMs);
Metrics m = new Metrics.Builder()
.id("JobRouteHandler")
.addCounter("listAllJobs")
.addCounter("listJobIds")
.addCounter("listArchivedWorkers")
.build();
Metrics metrics = MetricsRegistry.getInstance().registerAndGet(m);
this.listAllJobs = metrics.getCounter("listAllJobs");
this.listJobIds = metrics.getCounter("listJobIds");
this.listArchivedWorkers = metrics.getCounter("listArchivedWorkers");
}
@Override
public CompletionStage<JobClusterManagerProto.KillJobResponse> kill(JobClusterManagerProto.KillJobRequest request) {
return ask(jobClustersManagerActor, request, timeout)
.thenApply(JobClusterManagerProto.KillJobResponse.class::cast);
}
@Override
public CompletionStage<JobClusterManagerProto.ResubmitWorkerResponse> resubmitWorker(JobClusterManagerProto.ResubmitWorkerRequest request) {
return ask(jobClustersManagerActor, request, timeout)
.thenApply(JobClusterManagerProto.ResubmitWorkerResponse.class::cast);
}
@Override
public CompletionStage<JobClusterManagerProto.ScaleStageResponse> scaleStage(JobClusterManagerProto.ScaleStageRequest request) {
return ask(jobClustersManagerActor, request, timeout)
.thenApply(JobClusterManagerProto.ScaleStageResponse.class::cast);
}
@Override
public CompletionStage<BaseResponse> workerStatus(final WorkerEvent request) {
jobClustersManagerActor.tell(request, ActorRef.noSender());
return CompletableFuture.completedFuture(new BaseResponse(0L, BaseResponse.ResponseCode.SUCCESS, "forwarded worker status"));
}
@Override
public CompletionStage<JobClusterManagerProto.GetJobDetailsResponse> getJobDetails(final JobClusterManagerProto.GetJobDetailsRequest request) {
return ask(jobClustersManagerActor, request, timeout)
.thenApply(JobClusterManagerProto.GetJobDetailsResponse.class::cast);
}
@Override
public CompletionStage<JobClusterManagerProto.ListJobsResponse> listJobs(JobClusterManagerProto.ListJobsRequest request) {
logger.debug("request {}", request);
listAllJobs.increment();
return ask(jobClustersManagerActor, request, timeout)
.thenApply(JobClusterManagerProto.ListJobsResponse.class::cast);
}
@Override
public CompletionStage<JobClusterManagerProto.ListJobIdsResponse> listJobIds(JobClusterManagerProto.ListJobIdsRequest request) {
logger.debug("request {}", request);
listJobIds.increment();
return ask(jobClustersManagerActor, request, timeout)
.thenApply(JobClusterManagerProto.ListJobIdsResponse.class::cast);
}
@Override
public CompletionStage<JobClusterManagerProto.ListArchivedWorkersResponse> listArchivedWorkers(JobClusterManagerProto.ListArchivedWorkersRequest request) {
listArchivedWorkers.increment();
return ask(jobClustersManagerActor, request, timeout)
.thenApply(JobClusterManagerProto.ListArchivedWorkersResponse.class::cast);
}
}
| 8,169 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/events/WorkerRegistryV2.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.events;
import static java.util.stream.Collectors.toMap;
import akka.actor.Props;
import io.mantisrx.master.events.LifecycleEventsProto.WorkerStatusEvent;
import io.mantisrx.master.jobcluster.job.JobState;
import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata;
import io.mantisrx.master.jobcluster.job.worker.WorkerState;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.scheduler.WorkerRegistry;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.stream.Collectors;
import javax.annotation.Nullable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This Actor holds a registry of all running workers for all jobs in the system.
* The Job Actor sends a message with a complete snapshot of running workers to the LifeCycleEventPublisher
* The LifeCycleEventPublisher then forwards them to this Actor.
*/
public class WorkerRegistryV2 implements WorkerRegistry, WorkerEventSubscriber {
private final Logger logger = LoggerFactory.getLogger(WorkerRegistryV2.class);
private final ConcurrentMap<JobId, List<IMantisWorkerMetadata>> jobToWorkerInfoMap = new ConcurrentHashMap<>();
public static final WorkerRegistryV2 INSTANCE = new WorkerRegistryV2();
public static Props props() {
return Props.create(WorkerRegistryV2.class);
}
WorkerRegistryV2() {
logger.info("WorkerRegistryV2 created");
}
/**
* Iterate through all jobs and addup the worker list size for each
* @return
*/
@Override
public int getNumRunningWorkers(@Nullable ClusterID resourceCluster) {
if(logger.isDebugEnabled()) { logger.debug("In getNumRunningWorkers"); }
int cnt = jobToWorkerInfoMap.values().stream()
.map(workerList -> workerList.stream()
.filter(wm -> Optional.ofNullable(resourceCluster).equals(wm.getResourceCluster()))
.filter(wm -> WorkerState.isRunningState(wm.getState()))
.collect(Collectors.toList())
.size()
)
.reduce(0,(a, b) -> a + b);
if(logger.isDebugEnabled()) { logger.debug("Returning {} from getNumRunningWorkers", cnt); }
return cnt;
}
/**
* Return a Set of all running workers in the system
* @return
*/
@Override
public Set<WorkerId> getAllRunningWorkers(@Nullable ClusterID resourceCluster) {
return jobToWorkerInfoMap.values().stream()
.flatMap(workerList -> workerList.stream()
.filter(wm -> Optional.ofNullable(resourceCluster).equals(wm.getResourceCluster()))
.filter(wm -> WorkerState.isRunningState(wm.getState()))
.map(workerMeta -> workerMeta.getWorkerId()))
.collect(Collectors.toSet());
}
/**
* Return a mapping of workerId to slaveID for all running workers in the system
* @return
*/
@Override
public Map<WorkerId, String> getAllRunningWorkerSlaveIdMappings(@Nullable ClusterID resourceCluster) {
return
jobToWorkerInfoMap.values().stream()
.flatMap(workerList ->
workerList.stream()
.filter(wm -> Optional.ofNullable(resourceCluster).equals(wm.getResourceCluster()))
.filter(wm -> WorkerState.isRunningState(wm.getState())))
.collect(toMap(
IMantisWorkerMetadata::getWorkerId,
IMantisWorkerMetadata::getSlaveID,
(s1, s2) -> (s1 != null) ? s1 : s2));
}
/**
* Check whether a workerId is valid
* @param workerId
* @return
*/
@Override
public boolean isWorkerValid(WorkerId workerId) {
if(logger.isDebugEnabled()) { logger.debug("In isWorkerValid event {}", workerId); }
Optional<JobId> jIdOp = JobId.fromId(workerId.getJobId());
if(!jIdOp.isPresent()) {
logger.warn("Invalid job Id {}", workerId.getJobId());
return false;
}
List<IMantisWorkerMetadata> mantisWorkerMetadataList = jobToWorkerInfoMap.get(jIdOp.get());
boolean isValid = false;
if(mantisWorkerMetadataList != null) {
isValid = mantisWorkerMetadataList.stream().anyMatch((mData) -> mData.getWorkerId().equals(workerId));
} else {
logger.warn("No such job {} found in job To worker map ", jIdOp.get());
}
return isValid;
}
/**
* Return the accepted At time for the given worker
* @param workerId
* @return
*/
@Override
public Optional<Long> getAcceptedAt(WorkerId workerId) {
if(logger.isDebugEnabled()) { logger.debug("In getAcceptedAt for worker {}", workerId); }
Optional<JobId> jId = JobId.fromId(workerId.getJobId());
if(!jId.isPresent()) {
return Optional.empty();
}
List<IMantisWorkerMetadata> mantisWorkerMetadataList = jobToWorkerInfoMap.get(jId.get());
if(mantisWorkerMetadataList != null) {
Optional<IMantisWorkerMetadata> mantisWorkerMetadata = mantisWorkerMetadataList.stream().filter(mData -> mData.getWorkerId().equals(workerId)).findAny();
if (mantisWorkerMetadata.isPresent()) {
logger.info("Found worker {} return acceptedAt {}", workerId, mantisWorkerMetadata.get().getAcceptedAt());
return Optional.of(mantisWorkerMetadata.get().getAcceptedAt());
}
}
return Optional.empty();
}
/**
* When the worker info subject completes this method is invoked to clean up state.
* @param jobId
* @return
*/
private boolean deregisterJob(JobId jobId) {
logger.info("De-registering {}", jobId);
return jobToWorkerInfoMap.remove(jobId) != null;
}
@Override
public void process(LifecycleEventsProto.WorkerListChangedEvent event) {
if(logger.isDebugEnabled()) { logger.debug("on WorkerListChangedEvent for job {} with workers {}", event.getWorkerInfoListHolder().getJobId(), event.getWorkerInfoListHolder().getWorkerMetadataList().size()); }
JobId jId = event.getWorkerInfoListHolder().getJobId();
jobToWorkerInfoMap.put(jId, event.getWorkerInfoListHolder().getWorkerMetadataList());
}
@Override
public void process(LifecycleEventsProto.JobStatusEvent statusEvent) {
if(logger.isDebugEnabled()) { logger.debug("In JobStatusEvent {}", statusEvent); }
JobState jobState = statusEvent.getJobState();
if(JobState.isTerminalState(jobState)) {
final JobId jobId = statusEvent.getJobId();
deregisterJob(jobId);
}
}
@Override
public void process(WorkerStatusEvent workerStatusEvent) {
}
}
| 8,170 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/events/WorkerMetricsCollector.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.events;
import com.netflix.spectator.api.Tag;
import io.mantisrx.common.metrics.Gauge;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.common.metrics.Timer;
import io.mantisrx.common.metrics.spectator.MetricGroupId;
import io.mantisrx.master.events.LifecycleEventsProto.JobStatusEvent;
import io.mantisrx.master.events.LifecycleEventsProto.WorkerListChangedEvent;
import io.mantisrx.master.events.LifecycleEventsProto.WorkerStatusEvent;
import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata;
import io.mantisrx.master.jobcluster.job.worker.WorkerState;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.shaded.com.google.common.util.concurrent.AbstractScheduledService;
import io.mantisrx.shaded.org.apache.curator.shaded.com.google.common.base.Preconditions;
import io.netty.util.internal.ConcurrentSet;
import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import lombok.RequiredArgsConstructor;
import lombok.Value;
import lombok.extern.slf4j.Slf4j;
/**
* The goal of this service is to keep emitting metrics around how long it took for the worker to be
* scheduled, to be prepared and how long it was running for.
*/
@RequiredArgsConstructor
@Slf4j
public class WorkerMetricsCollector extends AbstractScheduledService implements
WorkerEventSubscriber {
private final ConcurrentMap<JobId, Map<WorkerId, IMantisWorkerMetadata>> jobWorkers =
new ConcurrentHashMap<>();
private final ConcurrentMap<String, WorkerMetrics> clusterWorkersMetrics =
new ConcurrentHashMap<>();
private final ConcurrentSet<CleanupJobEvent> jobsToBeCleaned = new ConcurrentSet<>();
private final Duration cleanupInterval;
private final Duration epochDuration;
private final Clock clock;
private final WorkerMetricsCollectorMetrics workerMetricsCollectorMetrics = new WorkerMetricsCollectorMetrics();
@Override
protected void runOneIteration() {
Instant current = clock.instant();
Iterator<CleanupJobEvent> iterator = jobsToBeCleaned.iterator();
while (iterator.hasNext()) {
CleanupJobEvent event = iterator.next();
if (current.isAfter(event.getExpiry())) {
jobWorkers.remove(event.getJobId());
iterator.remove();
}
}
workerMetricsCollectorMetrics.reportJobWorkersSize(jobWorkers.size());
}
@Override
protected Scheduler scheduler() {
return Scheduler.newFixedDelaySchedule(
epochDuration.toMillis(),
epochDuration.toMillis(),
TimeUnit.MILLISECONDS);
}
@Override
public void process(WorkerListChangedEvent event) {
final JobId jobId = event.getWorkerInfoListHolder().getJobId();
final List<IMantisWorkerMetadata> workers =
event.getWorkerInfoListHolder().getWorkerMetadataList();
jobWorkers.put(jobId,
workers.stream().collect(Collectors.toMap(IMantisWorkerMetadata::getWorkerId, m -> m)));
workerMetricsCollectorMetrics.reportJobWorkersSize(jobWorkers.size());
}
@Override
public void process(JobStatusEvent statusEvent) {
if (statusEvent.getJobState().isTerminal()) {
cleanUp(statusEvent.getJobId());
}
}
private WorkerMetrics getWorkerMetrics(String clusterName) {
return clusterWorkersMetrics.computeIfAbsent(
clusterName, dontCare -> new WorkerMetrics(clusterName));
}
@Override
public void process(WorkerStatusEvent workerStatusEvent) {
try {
final WorkerState workerState =
workerStatusEvent.getWorkerState();
final JobId jobId = JobId.fromId(workerStatusEvent.getWorkerId().getJobId()).get();
final WorkerId workerId = workerStatusEvent.getWorkerId();
Preconditions.checkNotNull(jobWorkers.get(jobId));
final IMantisWorkerMetadata metadata = jobWorkers.get(jobId).get(workerId);
if (metadata == null) {
log.warn("Unknown workerId: {} for metrics collector in job: {}", workerId, jobId);
return;
}
final WorkerMetrics workerMetrics = getWorkerMetrics(
metadata.getResourceCluster().map(ClusterID::getResourceID).orElse("mesos"));
switch (workerState) {
case Accepted:
// do nothing; This is the initial state
break;
case Launched:
// this represents the scheduling time
workerMetrics.reportSchedulingDuration(
Math.max(0L, workerStatusEvent.getTimestamp() - metadata.getAcceptedAt()));
break;
case StartInitiated:
// do nothing; this event gets sent when the worker has received the request; it's too granular and expected to be really low -
// so there's no point in measuring it.
break;
case Started:
workerMetrics.reportWorkerPreparationDuration(
Math.max(0L, workerStatusEvent.getTimestamp() - metadata.getLaunchedAt()));
break;
case Failed:
case Completed:
workerMetrics.reportRunningDuration(
Math.max(0L, workerStatusEvent.getTimestamp() - metadata.getStartedAt()));
break;
case Noop:
break;
case Unknown:
log.error("Unknown WorkerStatusEvent {}", workerStatusEvent);
break;
}
} catch (Exception e) {
log.error("Failed to process worker status event {}", workerStatusEvent, e);
}
}
private void cleanUp(JobId jobId) {
jobsToBeCleaned.add(new CleanupJobEvent(jobId, clock.instant().plus(cleanupInterval)));
}
private static class WorkerMetrics {
public static final String SCHEDULING_DURATION = "schedulingDuration";
public static final String PREPARATION_DURATION = "preparationDuration";
public static final String RUNNING_DURATION = "runningDuration";
private final Timer schedulingDuration;
private final Timer preparationDuration;
private final Timer runningDuration;
public WorkerMetrics(final String clusterName) {
MetricGroupId metricGroupId =
new MetricGroupId(
"WorkerMetricsCollector",
Tag.of("cluster", clusterName),
Tag.of("resourceCluster", clusterName));
Metrics m = new Metrics.Builder()
.id(metricGroupId)
.addTimer(SCHEDULING_DURATION)
.addTimer(PREPARATION_DURATION)
.addTimer(RUNNING_DURATION)
.build();
m = MetricsRegistry.getInstance().registerAndGet(m);
this.schedulingDuration = m.getTimer(SCHEDULING_DURATION);
this.preparationDuration = m.getTimer(PREPARATION_DURATION);
this.runningDuration = m.getTimer(RUNNING_DURATION);
}
private void reportSchedulingDuration(long durationInMillis) {
this.schedulingDuration.record(durationInMillis, TimeUnit.MILLISECONDS);
}
private void reportWorkerPreparationDuration(long durationInMillis) {
this.preparationDuration.record(durationInMillis, TimeUnit.MILLISECONDS);
}
private void reportRunningDuration(long durationInMillis) {
this.runningDuration.record(durationInMillis, TimeUnit.MILLISECONDS);
}
}
private static class WorkerMetricsCollectorMetrics {
public static final String JOB_WORKERS_MAP_SIZE = "jobWorkersMapSize";
private final Gauge jobWorkersMapSize;
public WorkerMetricsCollectorMetrics() {
MetricGroupId metricGroupId = new MetricGroupId("WorkerMetricsCollector");
Metrics m = new Metrics.Builder()
.id(metricGroupId)
.addGauge(JOB_WORKERS_MAP_SIZE)
.build();
this.jobWorkersMapSize = m.getGauge(JOB_WORKERS_MAP_SIZE);
}
private void reportJobWorkersSize(int size) {
jobWorkersMapSize.set(size);
}
}
@Value
private static class CleanupJobEvent {
JobId jobId;
Instant expiry;
}
}
| 8,171 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/events/LifecycleEventsProto.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.events;
import static java.util.Optional.empty;
import static java.util.Optional.ofNullable;
import io.mantisrx.master.jobcluster.WorkerInfoListHolder;
import io.mantisrx.master.jobcluster.job.JobState;
import io.mantisrx.master.jobcluster.job.worker.WorkerState;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.server.core.Status;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.domain.DataFormatAdapter;
import io.mantisrx.server.master.domain.JobId;
import java.util.Optional;
import lombok.AllArgsConstructor;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.ToString;
public class LifecycleEventsProto {
public static final class AuditEvent {
public enum AuditEventType {
// job cluster events
JOB_CLUSTER_CREATE,
JOB_CLUSTER_EXISTS,
JOB_CLUSTER_FAILURE,
JOB_CLUSTER_UPDATE,
JOB_CLUSTER_DELETE,
JOB_CLUSTER_DISABLED,
JOB_CLUSTER_ENABLED,
// job events
JOB_SUBMIT,
JOB_START,
JOB_TERMINATE,
JOB_SHUTDOWN,
JOB_DELETE,
JOB_SCALE_UP,
JOB_SCALE_DOWN,
JOB_SCALE_UPDATE,
JOB_FAILURE,
// worker events
WORKER_START,
WORKER_TERMINATE,
WORKER_RESUBMIT,
WORKER_RESUBMITS_LIMIT,
WORKER_STATUS_HB,
// agent cluster events
CLUSTER_SCALE_UP,
CLUSTER_SCALE_DOWN,
CLUSTER_ACTIVE_VMS,
//actor events
JOB_CLUSTER_ACTOR_CREATE,
JOB_CLUSTER_ACTOR_TERMINATE,
}
private final AuditEventType auditEventType;
private final String operand;
private final String data;
public AuditEvent(AuditEventType auditEventType, String operand, String data) {
this.auditEventType = auditEventType;
this.operand = operand;
this.data = data;
}
public AuditEventType getAuditEventType() {
return auditEventType;
}
public String getOperand() {
return operand;
}
public String getData() {
return data;
}
@Override
public String toString() {
return "AuditEvent{" +
"auditEventType=" + auditEventType +
", operand='" + operand + '\'' +
", data='" + data + '\'' +
'}';
}
}
@Getter
@EqualsAndHashCode
@AllArgsConstructor
@ToString
public static class StatusEvent {
public enum StatusEventType {
ERROR, WARN, INFO, DEBUG, HEARTBEAT
}
protected final StatusEventType statusEventType;
protected final String message;
protected final long timestamp;
public StatusEvent(StatusEventType type, String message) {
this(type, message, System.currentTimeMillis());
}
}
@Getter
@ToString
public static final class WorkerStatusEvent extends StatusEvent {
private final int stageNum;
private final WorkerId workerId;
private final WorkerState workerState;
private final Optional<String> hostName;
public WorkerStatusEvent(final StatusEventType type,
final String message,
final int stageNum,
final WorkerId workerId,
final WorkerState workerState) {
super(type, message);
this.stageNum = stageNum;
this.workerId = workerId;
this.workerState = workerState;
this.hostName = empty();
}
public WorkerStatusEvent(final StatusEventType type,
final String message,
final int stageNum,
final WorkerId workerId,
final WorkerState workerState,
final long ts) {
super(type, message, ts);
this.stageNum = stageNum;
this.workerId = workerId;
this.workerState = workerState;
this.hostName = empty();
}
public WorkerStatusEvent(final StatusEventType type,
final String message,
final int stageNum,
final WorkerId workerId,
final WorkerState workerState,
final String hostName,
final long ts) {
super(type, message, ts);
this.stageNum = stageNum;
this.workerId = workerId;
this.workerState = workerState;
this.hostName = ofNullable(hostName);
}
public WorkerStatusEvent(final StatusEventType type,
final String message,
final int stageNum,
final WorkerId workerId,
final WorkerState workerState,
final Optional<String> hostName) {
super(type, message);
this.stageNum = stageNum;
this.workerId = workerId;
this.workerState = workerState;
this.hostName = hostName;
}
}
@ToString
@Getter
public static final class JobStatusEvent extends StatusEvent {
private final JobId jobId;
private final JobState jobState;
public JobStatusEvent(final StatusEventType type,
final String message,
final JobId jobId,
final JobState jobState) {
super(type, message);
this.jobId = jobId;
this.jobState = jobState;
}
}
@ToString
public static final class JobClusterStatusEvent extends StatusEvent {
private final String jobCluster;
public JobClusterStatusEvent(final StatusEventType type,
final String message,
final String jobCluster) {
super(type, message);
this.jobCluster = jobCluster;
}
public String getJobCluster() {
return jobCluster;
}
}
public static Status from(final StatusEvent ev) {
Status.TYPE type;
switch (ev.statusEventType) {
case INFO:
type = Status.TYPE.INFO;
break;
case WARN:
type = Status.TYPE.WARN;
break;
case DEBUG:
type = Status.TYPE.DEBUG;
break;
case ERROR:
type = Status.TYPE.ERROR;
break;
case HEARTBEAT:
type = Status.TYPE.HEARTBEAT;
break;
default:
throw new IllegalArgumentException("status event type cannot be translated to Status Type "+ ev.statusEventType.name());
}
Status status = new Status("None", -1, -1, -1, Status.TYPE.DEBUG, "Invalid", MantisJobState.Noop);
if (ev instanceof LifecycleEventsProto.JobStatusEvent) {
JobStatusEvent jse = (JobStatusEvent) ev;
status = new Status(jse.jobId.getId(), -1, -1, -1, type, jse.getJobId() + " " + jse.message,
DataFormatAdapter.convertToMantisJobState(jse.jobState));
} else if (ev instanceof LifecycleEventsProto.JobClusterStatusEvent) {
JobClusterStatusEvent jcse = (JobClusterStatusEvent) ev;
status = new Status(jcse.jobCluster, -1, -1, -1, type, jcse.getJobCluster() + " " + jcse.message,
MantisJobState.Noop);
} else if (ev instanceof LifecycleEventsProto.WorkerStatusEvent) {
WorkerStatusEvent wse = (WorkerStatusEvent) ev;
status = new Status(wse.workerId.getJobId(), wse.stageNum, wse.workerId.getWorkerIndex(), wse.workerId.getWorkerNum(), type,
wse.getWorkerId().getId() + " " + wse.message,
DataFormatAdapter.convertWorkerStateToMantisJobState(wse.workerState));
}
return status;
}
public static class WorkerListChangedEvent {
private final WorkerInfoListHolder workerInfoListHolder;
public WorkerListChangedEvent(WorkerInfoListHolder workerInfoListHolder) {
this.workerInfoListHolder = workerInfoListHolder;
}
public WorkerInfoListHolder getWorkerInfoListHolder() {
return workerInfoListHolder;
}
@Override
public String toString() {
return "WorkerListChangedEvent{" +
"workerInfoListHolder=" + workerInfoListHolder +
'}';
}
}
}
| 8,172 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/events/LifecycleEventPublisherImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.events;
import io.mantisrx.master.events.LifecycleEventsProto.WorkerStatusEvent;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class LifecycleEventPublisherImpl implements LifecycleEventPublisher {
private final AuditEventSubscriber auditEventSubscriber;
private final StatusEventSubscriber statusEventSubscriber;
private final WorkerEventSubscriber workerEventSubscriber;
public LifecycleEventPublisherImpl(final AuditEventSubscriber auditEventSubscriber,
final StatusEventSubscriber statusEventSubscriber,
final WorkerEventSubscriber workerEventSubscriber) {
this.auditEventSubscriber = auditEventSubscriber;
this.statusEventSubscriber = statusEventSubscriber;
this.workerEventSubscriber = workerEventSubscriber;
}
@Override
public void publishAuditEvent(final LifecycleEventsProto.AuditEvent auditEvent) {
auditEventSubscriber.process(auditEvent);
}
@Override
public void publishStatusEvent(final LifecycleEventsProto.StatusEvent statusEvent) {
try {
statusEventSubscriber.process(statusEvent);
if (statusEvent instanceof LifecycleEventsProto.JobStatusEvent) {
LifecycleEventsProto.JobStatusEvent jobStatusEvent = (LifecycleEventsProto.JobStatusEvent) statusEvent;
workerEventSubscriber.process(jobStatusEvent);
} else if (statusEvent instanceof WorkerStatusEvent) {
workerEventSubscriber.process((WorkerStatusEvent) statusEvent);
}
} catch (Exception e) {
log.error("Failed to publish the event={}; Ignoring the failure as this is just a listener interface", statusEvent, e);
}
}
@Override
public void publishWorkerListChangedEvent(LifecycleEventsProto.WorkerListChangedEvent workerListChangedEvent) {
workerEventSubscriber.process(workerListChangedEvent);
}
}
| 8,173 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/events/StatusEventBrokerActor.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.events;
import akka.actor.AbstractActor;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.actor.Terminated;
import akka.dispatch.BoundedMessageQueueSemantics;
import akka.dispatch.RequiresMessageQueue;
import io.mantisrx.master.api.akka.route.proto.JobStatus;
import io.mantisrx.master.jobcluster.job.JobState;
import io.mantisrx.server.core.Status;
import io.mantisrx.shaded.com.google.common.collect.EvictingQueue;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* JobStatus Broker that receives StatusEvents from all actors and demultiplexes to client connections interested in
* events for a specific jobId
*/
public class StatusEventBrokerActor extends AbstractActor
implements RequiresMessageQueue<BoundedMessageQueueSemantics> {
private final Logger logger = LoggerFactory.getLogger(StatusEventBrokerActor.class);
private final Map<String, Set<ActorRef>> jobIdToActorMap = new HashMap<>();
private final Map<ActorRef, String> actorToJobIdMap = new HashMap<>();
private final ActorRef agentsErrorMonitorActorRef;
private final Map<String, EvictingQueue<Status>> jobIdToStatusEventsBuf = new HashMap<>();
public static final int MAX_STATUS_HISTORY_PER_JOB = 100;
public static Props props(ActorRef agentsErrorMonitorActorRef) {
return Props.create(StatusEventBrokerActor.class, agentsErrorMonitorActorRef);
}
public StatusEventBrokerActor(ActorRef agentsErrorMonitorActorRef) {
this.agentsErrorMonitorActorRef = agentsErrorMonitorActorRef;
}
public static class JobStatusRequest {
private final String jobId;
public JobStatusRequest(final String jobId) {
this.jobId = jobId;
}
public String getJobId() {
return jobId;
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final JobStatusRequest that = (JobStatusRequest) o;
return Objects.equals(jobId, that.jobId);
}
@Override
public int hashCode() {
return Objects.hash(jobId);
}
@Override
public String toString() {
return "JobStatusRequest{" +
"jobId='" + jobId + '\'' +
'}';
}
}
private void onJobStatusRequest(final JobStatusRequest jsr) {
logger.debug("got request {}", jsr);
ActorRef sender = sender();
jobIdToActorMap.computeIfAbsent(jsr.jobId, (jobId) -> new HashSet<>());
jobIdToActorMap.get(jsr.jobId).add(sender);
actorToJobIdMap.put(sender, jsr.jobId);
getContext().watch(sender);
// replay buffered status events on new connection
EvictingQueue<Status> statusEventsBuf = jobIdToStatusEventsBuf.get(jsr.jobId);
if (statusEventsBuf != null) {
statusEventsBuf.forEach(se -> sender.tell(new JobStatus(se), ActorRef.noSender()));
}
}
private void cleanupIfTerminalState(final LifecycleEventsProto.StatusEvent se) {
if (se instanceof LifecycleEventsProto.JobStatusEvent) {
LifecycleEventsProto.JobStatusEvent jse = (LifecycleEventsProto.JobStatusEvent) se;
if (JobState.isTerminalState(jse.getJobState())) {
jobIdToStatusEventsBuf.remove(jse.getJobId());
}
}
}
// sends JobStatus messages to active connections by jobId
private void onStatusEvent(final LifecycleEventsProto.StatusEvent se) {
Status status = LifecycleEventsProto.from(se);
String jobId = status.getJobId();
// add Status to job event history
jobIdToStatusEventsBuf
.computeIfAbsent(jobId, (j) -> EvictingQueue.create(MAX_STATUS_HISTORY_PER_JOB))
.add(status);
cleanupIfTerminalState(se);
Set<ActorRef> jobStatusActiveConnections = jobIdToActorMap.get(jobId);
if (jobStatusActiveConnections != null && !jobStatusActiveConnections.isEmpty()) {
logger.debug("Sending job status {}", se);
jobStatusActiveConnections.forEach(connActor -> connActor.tell(new JobStatus(status), self()));
} else {
logger.debug("Job status dropped, no active subscribers for {}", jobId);
}
if(se instanceof LifecycleEventsProto.WorkerStatusEvent) {
this.agentsErrorMonitorActorRef.tell(se, getSelf());
}
}
private void onTerminated(final Terminated t) {
logger.info("actor terminated {}", t);
ActorRef terminatedActor = t.actor();
String jobId = actorToJobIdMap.get(terminatedActor);
if (jobId != null) {
jobIdToActorMap.get(jobId).remove(terminatedActor);
}
actorToJobIdMap.remove(terminatedActor);
}
@Override
public Receive createReceive() {
return receiveBuilder()
.match(JobStatusRequest.class, jsr -> onJobStatusRequest(jsr))
.match(LifecycleEventsProto.StatusEvent.class, js -> onStatusEvent(js))
.match(Terminated.class, t -> onTerminated(t))
.build();
}
}
| 8,174 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/events/LifecycleEventPublisher.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.events;
import io.mantisrx.master.events.LifecycleEventsProto.AuditEvent;
import io.mantisrx.master.events.LifecycleEventsProto.StatusEvent;
import io.mantisrx.master.events.LifecycleEventsProto.WorkerListChangedEvent;
public interface LifecycleEventPublisher {
void publishAuditEvent(LifecycleEventsProto.AuditEvent auditEvent);
void publishStatusEvent(LifecycleEventsProto.StatusEvent statusEvent);
void publishWorkerListChangedEvent(
LifecycleEventsProto.WorkerListChangedEvent workerListChangedEvent);
public static LifecycleEventPublisher noop() {
return NOOP;
}
static LifecycleEventPublisher NOOP = new NoopLifecycleEventPublisher();
class NoopLifecycleEventPublisher implements LifecycleEventPublisher {
@Override
public void publishAuditEvent(AuditEvent auditEvent) {
}
@Override
public void publishStatusEvent(StatusEvent statusEvent) {
}
@Override
public void publishWorkerListChangedEvent(WorkerListChangedEvent workerListChangedEvent) {
}
}
}
| 8,175 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/events/WorkerEventSubscriber.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.events;
import io.mantisrx.master.events.LifecycleEventsProto.JobStatusEvent;
import io.mantisrx.master.events.LifecycleEventsProto.WorkerListChangedEvent;
import io.mantisrx.master.events.LifecycleEventsProto.WorkerStatusEvent;
import io.mantisrx.shaded.com.google.common.collect.ImmutableList;
import java.util.Collection;
public interface WorkerEventSubscriber {
void process(final LifecycleEventsProto.WorkerListChangedEvent event);
void process(final LifecycleEventsProto.JobStatusEvent statusEvent);
void process(final LifecycleEventsProto.WorkerStatusEvent workerStatusEvent);
default WorkerEventSubscriber and(WorkerEventSubscriber other) {
return allOf(ImmutableList.of(this, other));
}
static WorkerEventSubscriber allOf(Collection<WorkerEventSubscriber> subscriberList) {
return new WorkerEventSubscriber() {
@Override
public void process(WorkerListChangedEvent event) {
subscriberList.forEach(subscriber -> subscriber.process(event));
}
@Override
public void process(JobStatusEvent statusEvent) {
subscriberList.forEach(subscriber -> subscriber.process(statusEvent));
}
@Override
public void process(WorkerStatusEvent workerStatusEvent) {
subscriberList.forEach(subscriber -> subscriber.process(workerStatusEvent));
}
};
}
}
| 8,176 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/events/JobRegistryImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.events;
import io.mantisrx.master.jobcluster.IJobClusterMetadata;
import io.mantisrx.master.jobcluster.job.IMantisJobMetadata;
import io.mantisrx.server.master.domain.JobClusterDefinitionImpl;
import io.mantisrx.server.master.domain.JobId;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
public class JobRegistryImpl implements JobRegistry {
ConcurrentMap<String, IJobClusterMetadata> jobClusterMap = new ConcurrentHashMap<>() ;
ConcurrentMap<JobId, IMantisJobMetadata> jobMap = new ConcurrentHashMap<>();
ConcurrentMap<String, Set<IMantisJobMetadata>> clusterToJobsMap = new ConcurrentHashMap<>();
@Override
public void addClusters(List<IJobClusterMetadata> jobClusters) {
jobClusters.forEach((jc) -> {
jobClusterMap.put(jc.getJobClusterDefinition().getName(), jc);
});
}
@Override
public void updateCluster(IJobClusterMetadata clusterMetadata) {
jobClusterMap.put(clusterMetadata.getJobClusterDefinition().getName(), clusterMetadata);
}
@Override
public void deleteJobCluster(String clusterName) {
jobClusterMap.remove(clusterName);
}
@Override
public void addJobs(String clusterName, List<IMantisJobMetadata> jobList) {
jobList.forEach((jb) -> {
jobMap.put(jb.getJobId(),jb);
});
clusterToJobsMap.computeIfAbsent(clusterName,(x -> new HashSet<>())).addAll(jobList);
}
@Override
public void addCompletedJobs(List<JobClusterDefinitionImpl.CompletedJob> completedJobList) {
}
@Override
public void updateJob(IMantisJobMetadata jobMetadata) {
}
@Override
public void removeJob(JobId jobId) {
}
}
| 8,177 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/events/WorkerEventSubscriberLoggingImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.events;
import io.mantisrx.master.events.LifecycleEventsProto.WorkerStatusEvent;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class WorkerEventSubscriberLoggingImpl implements WorkerEventSubscriber {
private static final Logger logger = LoggerFactory.getLogger(WorkerEventSubscriberLoggingImpl.class);
@Override
public void process(LifecycleEventsProto.WorkerListChangedEvent event) {
logger.info("Received worker list changed event {}", event);
}
@Override
public void process(LifecycleEventsProto.JobStatusEvent statusEvent) {
logger.info("Received status event {}", statusEvent);
}
@Override
public void process(WorkerStatusEvent workerStatusEvent) {
logger.debug("Received worker status event {}", workerStatusEvent);
}
}
| 8,178 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/events/StatusEventSubscriber.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.events;
public interface StatusEventSubscriber {
void process(final LifecycleEventsProto.StatusEvent statusEvent);
}
| 8,179 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/events/AuditEventSubscriber.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.events;
public interface AuditEventSubscriber {
void process(final LifecycleEventsProto.AuditEvent event);
}
| 8,180 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/events/AuditEventSubscriberLoggingImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.events;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class AuditEventSubscriberLoggingImpl implements AuditEventSubscriber {
private static final Logger logger = LoggerFactory.getLogger(AuditEventSubscriberLoggingImpl.class);
@Override
public void process(final LifecycleEventsProto.AuditEvent event) {
logger.info("[AUDIT] {}", event);
}
}
| 8,181 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/events/StatusEventSubscriberAkkaImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.events;
import akka.actor.ActorRef;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class StatusEventSubscriberAkkaImpl implements StatusEventSubscriber {
private static final Logger logger = LoggerFactory.getLogger(StatusEventSubscriberAkkaImpl.class);
private final ActorRef statusEventBrokerActor;
public StatusEventSubscriberAkkaImpl(final ActorRef statusEventBrokerActor) {
this.statusEventBrokerActor = statusEventBrokerActor;
}
@Override
public void process(final LifecycleEventsProto.StatusEvent statusEvent) {
logger.debug("[STATUS] {}", statusEvent);
statusEventBrokerActor.tell(statusEvent, ActorRef.noSender());
}
}
| 8,182 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/events/AuditEventBrokerActor.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.events;
import akka.actor.AbstractActor;
import akka.actor.Props;
import akka.dispatch.BoundedMessageQueueSemantics;
import akka.dispatch.RequiresMessageQueue;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class AuditEventBrokerActor extends AbstractActor
implements RequiresMessageQueue<BoundedMessageQueueSemantics> {
private static final Logger logger = LoggerFactory.getLogger(AuditEventBrokerActor.class);
private final AuditEventSubscriber auditEventSubscriber;
public static Props props(AuditEventSubscriber auditEventSubscriber) {
return Props.create(AuditEventBrokerActor.class, auditEventSubscriber);
}
public AuditEventBrokerActor(AuditEventSubscriber auditEventSubscriber) {
this.auditEventSubscriber = auditEventSubscriber;
}
private void onAuditEvent(final LifecycleEventsProto.AuditEvent auditEvent) {
this.auditEventSubscriber.process(auditEvent);
}
@Override
public Receive createReceive() {
return receiveBuilder()
.match(LifecycleEventsProto.AuditEvent.class, auditEvent -> onAuditEvent(auditEvent))
.build();
}
}
| 8,183 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/events/AuditEventSubscriberAkkaImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.events;
import akka.actor.ActorRef;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class AuditEventSubscriberAkkaImpl implements AuditEventSubscriber {
private static final Logger logger = LoggerFactory.getLogger(AuditEventSubscriberAkkaImpl.class);
private final ActorRef auditEventBrokerActor;
public AuditEventSubscriberAkkaImpl(final ActorRef auditEventBrokerActor) {
this.auditEventBrokerActor = auditEventBrokerActor;
}
@Override
public void process(final LifecycleEventsProto.AuditEvent auditEvent) {
logger.debug("[AUDIT] {}", auditEvent);
auditEventBrokerActor.tell(auditEvent, ActorRef.noSender());
}
}
| 8,184 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/events/JobRegistry.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.events;
import io.mantisrx.master.jobcluster.IJobClusterMetadata;
import io.mantisrx.master.jobcluster.job.IMantisJobMetadata;
import io.mantisrx.server.master.domain.JobClusterDefinitionImpl;
import io.mantisrx.server.master.domain.JobId;
import java.util.List;
public interface JobRegistry {
public void addClusters(List<IJobClusterMetadata> jobClusters);
public void updateCluster(IJobClusterMetadata clusterMetadata);
public void deleteJobCluster(String clusterName);
void addJobs(String clusterName, List<IMantisJobMetadata> jobList);
public void addCompletedJobs(List<JobClusterDefinitionImpl.CompletedJob> completedJobList);
public void updateJob(IMantisJobMetadata jobMetadata);
public void removeJob(JobId jobId);
}
| 8,185 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/events/JobStatusConnectedWSActor.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.events;
import akka.actor.AbstractActor;
import akka.actor.ActorRef;
import akka.actor.PoisonPill;
import akka.actor.Props;
import akka.actor.Terminated;
import io.mantisrx.master.api.akka.route.proto.JobStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Proxy actor that receives the StatusEvent messages from the StatusEventBrokerActor and forwards
* it onto the Websocket connection from the client
*/
public class JobStatusConnectedWSActor extends AbstractActor {
private final Logger logger = LoggerFactory.getLogger(JobStatusConnectedWSActor.class);
public static Props props(final String jobId, final ActorRef statusEventBrokerActor) {
return Props.create(JobStatusConnectedWSActor.class, jobId, statusEventBrokerActor);
}
private final String jobId;
private final ActorRef statusEventBrokerActor;
public JobStatusConnectedWSActor(final String jobId, final ActorRef statusEventBrokerActor) {
this.jobId = jobId;
this.statusEventBrokerActor = statusEventBrokerActor;
}
// Proto
public static class Connected {
private final ActorRef wsActor;
public Connected(final ActorRef wsActor) {
this.wsActor = wsActor;
}
public ActorRef getWsActor() {
return wsActor;
}
@Override
public String toString() {
return "Connected{" +
"wsActor=" + wsActor +
'}';
}
}
// Behavior
private final Receive waitingBehavior() {
return receiveBuilder()
.match(Connected.class, this::onConnected)
.build();
}
private void onConnected(final Connected connectedMsg) {
logger.info("connected {}", connectedMsg);
statusEventBrokerActor.tell(new StatusEventBrokerActor.JobStatusRequest(jobId), self());
getContext().watch(connectedMsg.wsActor);
Receive connected = connectedBehavior(connectedMsg.wsActor);
getContext().become(connected);
}
private void onTerminated(final Terminated t) {
logger.info("actor terminated {}", t);
getSelf().tell(PoisonPill.getInstance(), ActorRef.noSender());
}
private Receive connectedBehavior(final ActorRef wsActor) {
return receiveBuilder()
.match(JobStatus.class, js -> {
logger.debug("writing to WS {}", js);
wsActor.tell(js, self());
})
.match(Terminated.class, t -> onTerminated(t))
.build();
}
@Override
public Receive createReceive() {
return waitingBehavior();
}
}
| 8,186 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/events/StatusEventSubscriberLoggingImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.events;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class StatusEventSubscriberLoggingImpl implements StatusEventSubscriber {
private static final Logger logger = LoggerFactory.getLogger(StatusEventSubscriberLoggingImpl.class);
@Override
public void process(final LifecycleEventsProto.StatusEvent statusEvent) {
String message = " " + statusEvent.statusEventType + " " + statusEvent.message + " ";
if (statusEvent instanceof LifecycleEventsProto.WorkerStatusEvent) {
LifecycleEventsProto.WorkerStatusEvent wse = (LifecycleEventsProto.WorkerStatusEvent) statusEvent;
message = wse.getWorkerId().getId() + message + wse.getWorkerState();
} else if (statusEvent instanceof LifecycleEventsProto.JobStatusEvent) {
LifecycleEventsProto.JobStatusEvent jse = (LifecycleEventsProto.JobStatusEvent) statusEvent;
message = jse.getJobId() + message + jse.getJobState();
} else if (statusEvent instanceof LifecycleEventsProto.JobClusterStatusEvent) {
LifecycleEventsProto.JobClusterStatusEvent jcse = (LifecycleEventsProto.JobClusterStatusEvent) statusEvent;
message = jcse.getJobCluster() + message;
}
logger.info("[STATUS] {}", message);
}
}
| 8,187 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/test/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/test/java/io/mantisrx/server/master/client/MantisMasterClientApiTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.client;
//import io.mantisrx.master.api.proto.CreateJobClusterRequest;
//import io.mantisrx.master.api.proto.SubmitJobRequest;
//import io.mantisrx.master.core.proto.JobDefinition;
//import io.mantisrx.master.core.proto.MachineDefinition;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import io.mantisrx.server.core.master.MasterDescription;
import io.mantisrx.server.core.master.MasterMonitor;
import io.netty.channel.ChannelOption;
import io.netty.channel.WriteBufferWaterMark;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import mantis.io.reactivex.netty.RxNetty;
import mantis.io.reactivex.netty.pipeline.PipelineConfigurators;
import mantis.io.reactivex.netty.protocol.http.server.HttpServer;
import mantis.io.reactivex.netty.protocol.http.server.HttpServerRequest;
import mantis.io.reactivex.netty.protocol.http.server.HttpServerResponse;
import mantis.io.reactivex.netty.protocol.http.server.RequestHandler;
import org.junit.AfterClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.functions.Action0;
import rx.functions.Action1;
import rx.functions.Func1;
import rx.functions.Func2;
import rx.schedulers.Schedulers;
import rx.subjects.BehaviorSubject;
public class MantisMasterClientApiTest {
private static final Logger logger = LoggerFactory.getLogger(MantisMasterClientApiTest.class);
private static AtomicInteger port = new AtomicInteger(8950);
private static List<HttpServer<String, String>> startedServers = new ArrayList<>();
@AfterClass
public static void cleanup() throws InterruptedException {
for (HttpServer<String, String> startedServer : startedServers) {
logger.info("shutting down server on port {}", startedServer.getServerPort());
startedServer.shutdown();
}
}
public HttpServer<String, String> createHttpServer(int port) {
final HttpServer<String, String> server = RxNetty.newHttpServerBuilder(
port,
new RequestHandler<String, String>() {
@Override
public Observable<Void> handle(HttpServerRequest<String> req, HttpServerResponse<String> resp) {
resp.writeAndFlush("200 OK");
return Observable.empty();
}
})
.pipelineConfigurator(PipelineConfigurators.httpServerConfigurator())
.channelOption(ChannelOption.WRITE_BUFFER_WATER_MARK, WriteBufferWaterMark.DEFAULT)
.build();
return server;
}
@Test
public void testScaleStageRequestRetries() throws InterruptedException {
MasterMonitor mockMasterMonitor = mock(MasterMonitor.class);
final BehaviorSubject<MasterDescription> mdSubject = BehaviorSubject.create();
when(mockMasterMonitor.getMasterObservable()).thenReturn(mdSubject);
MantisMasterClientApi mantisMasterClientApi = new MantisMasterClientApi(mockMasterMonitor);
final int serverPort = port.incrementAndGet();
final String jobId = "test-job-id";
final int stageNum = 1;
final int numWorkers = 2;
final String reason = "test reason";
mdSubject.onNext(new MasterDescription("localhost", "127.0.0.1", serverPort, 7090, 7091, "status", 8900, System.currentTimeMillis()));
final CountDownLatch retryLatch = new CountDownLatch(2);
final Func1<Observable<? extends Throwable>, Observable<?>> retryLogic = new Func1<Observable<? extends Throwable>, Observable<?>>() {
@Override
public Observable<?> call(Observable<? extends Throwable> attempts) {
return attempts
.zipWith(Observable.range(1, 5), new Func2<Throwable, Integer, Integer>() {
@Override
public Integer call(Throwable t1, Integer integer) {
return integer;
}
})
.flatMap(new Func1<Integer, Observable<?>>() {
@Override
public Observable<?> call(Integer retryCount) {
logger.info(retryCount + " retrying conx after sleeping for 250ms");
if (retryCount == 2) {
Schedulers.newThread().createWorker().schedule(new Action0() {
@Override
public void call() {
final HttpServer<String, String> httpServer = createHttpServer(serverPort);
startedServers.add(httpServer);
httpServer.start();
}
});
}
retryLatch.countDown();
return Observable.timer(250, TimeUnit.MILLISECONDS);
}
});
}
};
final Observable<Boolean> resultObs = mantisMasterClientApi.scaleJobStage(jobId, stageNum, numWorkers, reason)
.retryWhen(retryLogic);
final CountDownLatch completedLatch = new CountDownLatch(1);
resultObs
.doOnError(new Action1<Throwable>() {
@Override
public void call(Throwable throwable) {
fail("got unexpected error" + throwable.getMessage());
}
})
.doOnCompleted(new Action0() {
@Override
public void call() {
completedLatch.countDown();
}
}).subscribe();
assertTrue(retryLatch.await(5, TimeUnit.SECONDS));
assertTrue(completedLatch.await(5, TimeUnit.SECONDS));
}
@Test
public void testScaleStageRequestRetriesNewMaster() throws InterruptedException {
MasterMonitor mockMasterMonitor = mock(MasterMonitor.class);
final BehaviorSubject<MasterDescription> mdSubject = BehaviorSubject.create();
when(mockMasterMonitor.getMasterObservable()).thenReturn(mdSubject);
MantisMasterClientApi mantisMasterClientApi = new MantisMasterClientApi(mockMasterMonitor);
final int oldMasterPort = port.incrementAndGet();
final int newMasterPort = port.incrementAndGet();
final String jobId = "test-job-id";
final int stageNum = 1;
final int numWorkers = 2;
final String reason = "test reason";
mdSubject.onNext(new MasterDescription("localhost", "127.0.0.1", oldMasterPort, 7090, 7091, "status", 8900, System.currentTimeMillis()));
final CountDownLatch retryLatch = new CountDownLatch(3);
final Func1<Observable<? extends Throwable>, Observable<?>> retryLogic = new Func1<Observable<? extends Throwable>, Observable<?>>() {
@Override
public Observable<?> call(Observable<? extends Throwable> attempts) {
return attempts
.zipWith(Observable.range(1, 5), new Func2<Throwable, Integer, Integer>() {
@Override
public Integer call(Throwable t1, Integer integer) {
return integer;
}
})
.flatMap(new Func1<Integer, Observable<?>>() {
@Override
public Observable<?> call(Integer retryCount) {
logger.info(retryCount + " retrying conx after sleeping for 250ms");
if (retryCount == 2) {
Schedulers.newThread().createWorker().schedule(new Action0() {
@Override
public void call() {
final HttpServer<String, String> httpServer = createHttpServer(newMasterPort);
startedServers.add(httpServer);
httpServer.start();
}
});
}
if (retryCount == 3) {
mdSubject.onNext(new MasterDescription("localhost", "127.0.0.1", newMasterPort, 7090, 7091, "status", 8900, System.currentTimeMillis()));
}
retryLatch.countDown();
return Observable.timer(250, TimeUnit.MILLISECONDS);
}
});
}
};
final Observable<Boolean> resultObs = mantisMasterClientApi.scaleJobStage(jobId, stageNum, numWorkers, reason)
.retryWhen(retryLogic);
final CountDownLatch completedLatch = new CountDownLatch(1);
resultObs
.doOnError(new Action1<Throwable>() {
@Override
public void call(Throwable throwable) {
fail("got unexpected error" + throwable.getMessage());
}
})
.doOnCompleted(new Action0() {
@Override
public void call() {
completedLatch.countDown();
}
}).subscribe();
assertTrue(retryLatch.await(5, TimeUnit.SECONDS));
assertTrue(completedLatch.await(5, TimeUnit.SECONDS));
}
}
| 8,188 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/test/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/test/java/io/mantisrx/server/master/client/MasterClientWrapperTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.client;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import io.mantisrx.server.core.Configurations;
import io.mantisrx.server.core.CoreConfiguration;
import io.mantisrx.server.core.WorkerAssignments;
import io.mantisrx.server.core.WorkerHost;
import io.reactivex.mantis.remote.observable.EndpointChange;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.junit.Test;
import rx.Observable;
import rx.functions.Func1;
import rx.schedulers.Schedulers;
public class MasterClientWrapperTest {
private static final int sinkStageNumber = 3;
static Properties zkProps = new Properties();
static {
zkProps.put("mantis.zookeeper.connectString", "100.67.80.172:2181,100.67.71.221:2181,100.67.89.26:2181,100.67.71.34:2181,100.67.80.18:2181");
zkProps.put("mantis.zookeeper.leader.announcement.path", "/leader");
zkProps.put("mantis.zookeeper.root", "/mantis/master");
zkProps.put("mantis.localmode", "false");
}
MasterClientWrapper clientWrapper = null;
//@Before
public void init() {
HighAvailabilityServices haServices = HighAvailabilityServicesUtil.createHAServices(
Configurations.frmProperties(zkProps, CoreConfiguration.class));
clientWrapper = new MasterClientWrapper(haServices.getMasterClientApi());
}
// @Test
public void getNamedJobIdsTest() {
String jobname = "APIRequestSource";
CountDownLatch cdLatch = new CountDownLatch(1);
clientWrapper
.getNamedJobsIds(jobname)
.subscribe((jId) -> {
cdLatch.countDown();
System.out.println("job id " + jId);
assertTrue(jId.startsWith(jobname));
});
try {
cdLatch.await(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
fail();
}
}
// @Test
public void getSinkLocationsTest() {
String jobname = "APIRequestSource";
CountDownLatch cdLatch = new CountDownLatch(1);
clientWrapper
.getNamedJobsIds(jobname)
.flatMap((jName) -> {
return clientWrapper.getSinkLocations(jName, 1, 0, 0);
})
.subscribe((ep) -> {
System.out.println("Got EP " + ep.getEndpoint() + " type " + ep.getType());
cdLatch.countDown();
});
try {
cdLatch.await(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
fail();
}
}
//@Test
public void getSchedulingInfoTest() {
String jobname = "GroupByIP";
CountDownLatch cdLatch = new CountDownLatch(3);
Observable<String> jobidO = clientWrapper.getNamedJobsIds(jobname).take(1).cache().subscribeOn(Schedulers.io());
Observable<MantisMasterGateway> mmciO = clientWrapper.getMasterClientApi().take(1).cache().subscribeOn(Schedulers.io());
Observable<EndpointChange> epO = jobidO.map((jId) -> clientWrapper.getSinkLocations(jId, sinkStageNumber, 0, 0))
.flatMap(e -> e)
.take(3)
.doOnNext((ep) -> System.out.println("Ep change: " + ep))
.doOnNext((ep) -> cdLatch.countDown());
Observable<Boolean> deleteWorkerO = jobidO.zipWith(mmciO, (String jId, MantisMasterGateway mmci) -> {
System.out.println("Job id is " + jId);
return mmci.schedulingChanges(jId)
.map(jsi -> {
Map<Integer, WorkerAssignments> workerAssignments = jsi.getWorkerAssignments();
System.out.println("WorkerAssignments -> " + workerAssignments);
WorkerAssignments workerAssignmentsForSink = workerAssignments.get(sinkStageNumber);
System.out.println("WorkerAssignmentsForSink -> " + workerAssignmentsForSink);
Map<Integer, WorkerHost> hostsForSink = workerAssignmentsForSink.getHosts();
System.out.println("Host map -> " + hostsForSink);
assertTrue(!hostsForSink.isEmpty());
Iterator<Entry<Integer, WorkerHost>> it = hostsForSink.entrySet().iterator();
while (it.hasNext()) {
Entry<Integer, WorkerHost> e = it.next();
return e.getValue().getWorkerNumber();
}
return -1;
})
.take(1)
.map((Integer workerNo) -> {
System.out.println("Worker no is -> " + workerNo);
return mmci.resubmitJobWorker(jId, "tester", workerNo, "testing");
}).flatMap(b -> b);
})
.flatMap(b -> b)
.doOnNext((result) -> {
assertTrue(result);
cdLatch.countDown();
});
epO.subscribeOn(Schedulers.io()).subscribe((ep) -> System.out.println(ep), (t) -> t.printStackTrace(), () -> System.out.println("ep change completed"));
deleteWorkerO.toBlocking().subscribe((n) -> System.out.println(n), (t) -> t.printStackTrace(),
() -> System.out.println("worker deletion completed"));
try {
cdLatch.await(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
fail();
}
}
// @Test
public void testJobStatusEndpoint() {
HighAvailabilityServices haServices = HighAvailabilityServicesUtil.createHAServices(
Configurations.frmProperties(zkProps, CoreConfiguration.class));
MasterClientWrapper clientWrapper = new MasterClientWrapper(haServices.getMasterClientApi());
String jobId = "PriamRequestSource-45";
clientWrapper.getMasterClientApi()
.flatMap(new Func1<MantisMasterGateway, Observable<String>>() {
@Override
public Observable<String> call(MantisMasterGateway mantisMasterClientApi) {
Integer sinkStage = null;
return mantisMasterClientApi.getJobStatusObservable(jobId)
.map((status) -> {
return status;
})
;
}
}).take(2).toBlocking().subscribe((ep) -> {
System.out.println("Endpoint Change -> " + ep);
});
}
@Test
public void testNamedJobExists() {
HighAvailabilityServices haServices = HighAvailabilityServicesUtil.createHAServices(
Configurations.frmProperties(zkProps, CoreConfiguration.class));
MasterClientWrapper clientWrapper = new MasterClientWrapper(haServices.getMasterClientApi());
CountDownLatch cdLatch = new CountDownLatch(1);
clientWrapper.namedJobExists("APIRequestSource")
.subscribe((exists) -> {
assertTrue(exists);
cdLatch.countDown();
});
try {
cdLatch.await(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
fail();
}
}
}
| 8,189 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/main/java/io/mantisrx/server/master/resourcecluster/ResourceClusterGatewayClient.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.resourcecluster;
import static org.asynchttpclient.Dsl.asyncHttpClient;
import static org.asynchttpclient.Dsl.post;
import com.spotify.futures.CompletableFutures;
import io.mantisrx.common.Ack;
import io.mantisrx.server.core.CoreConfiguration;
import io.mantisrx.server.core.master.MasterDescription;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.CompletableFuture;
import lombok.Getter;
import lombok.ToString;
import lombok.extern.slf4j.Slf4j;
import org.asynchttpclient.AsyncHttpClient;
import org.asynchttpclient.DefaultAsyncHttpClientConfig.Builder;
import org.asynchttpclient.Request;
@ToString(of = {"masterDescription", "clusterID"})
@Slf4j
public class ResourceClusterGatewayClient implements ResourceClusterGateway, Closeable {
private final ClusterID clusterID;
@Getter
private final MasterDescription masterDescription;
private AsyncHttpClient client;
private final ObjectMapper mapper;
public ResourceClusterGatewayClient(
ClusterID clusterID,
MasterDescription masterDescription,
CoreConfiguration configuration) {
this.clusterID = clusterID;
this.masterDescription = masterDescription;
this.mapper = new ObjectMapper();
this.client = buildCloseableHttpClient(configuration);
}
@Override
public void close() throws IOException {
client.close();
}
@Override
public CompletableFuture<Ack> registerTaskExecutor(TaskExecutorRegistration registration) {
return performAction("registerTaskExecutor", registration);
}
@Override
public CompletableFuture<Ack> heartBeatFromTaskExecutor(TaskExecutorHeartbeat heartbeat) {
return performAction("heartBeatFromTaskExecutor", heartbeat);
}
@Override
public CompletableFuture<Ack> notifyTaskExecutorStatusChange(
TaskExecutorStatusChange taskExecutorStatusChange) {
return performAction("notifyTaskExecutorStatusChange", taskExecutorStatusChange);
}
@Override
public CompletableFuture<Ack> disconnectTaskExecutor(
TaskExecutorDisconnection taskExecutorDisconnection) {
return performAction("disconnectTaskExecutor", taskExecutorDisconnection);
}
private CompletableFuture<Ack> performAction(String action, Object body) {
try {
final String bodyStr = mapper.writeValueAsString(body);
final Request request = post(
getActionUri(action)).setBody(bodyStr).addHeader("Content-Type", "application/json").build();
log.debug("request={}", request);
return client.executeRequest(request).toCompletableFuture().thenCompose(response -> {
if (response.getStatusCode() == 200) {
return CompletableFuture.completedFuture(Ack.getInstance());
}
else if (response.getStatusCode() == 429) {
log.warn("request was throttled on control plane side: {}", request);
return CompletableFutures.exceptionallyCompletedFuture(
new RequestThrottledException("request was throttled on control plane side: " + request));
}
else {
try {
log.error("failed request {} with response {}", request, response.getResponseBody());
return CompletableFutures.exceptionallyCompletedFuture(
mapper.readValue(response.getResponseBody(), Throwable.class));
} catch (Exception e) {
return CompletableFutures.exceptionallyCompletedFuture(
new Exception(String.format("response=%s", response), e));
}
}
});
} catch (Exception e) {
return CompletableFutures.exceptionallyCompletedFuture(e);
}
}
private String getActionUri(String action) {
String uri = String.format("http://%s:%d/api/v1/resourceClusters/%s/actions/%s",
masterDescription.getHostname(), masterDescription.getApiPort(), clusterID.getResourceID(),
action);
log.debug("uri={}", uri);
return uri;
}
private AsyncHttpClient buildCloseableHttpClient(CoreConfiguration configuration) {
return asyncHttpClient(
new Builder()
.setMaxConnections(configuration.getAsyncHttpClientMaxConnectionsPerHost())
.setConnectTimeout(configuration.getAsyncHttpClientConnectionTimeoutMs())
.setRequestTimeout(configuration.getAsyncHttpClientRequestTimeoutMs())
.setReadTimeout(configuration.getAsyncHttpClientReadTimeoutMs())
.build());
}
}
| 8,190 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/main/java/io/mantisrx/server/master/client/MantisMasterGateway.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.client;
import io.mantisrx.common.Ack;
import io.mantisrx.common.Label;
import io.mantisrx.runtime.JobSla;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.server.core.JobAssignmentResult;
import io.mantisrx.server.core.JobSchedulingInfo;
import io.mantisrx.server.core.NamedJobInfo;
import io.mantisrx.server.core.Status;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import rx.Observable;
public interface MantisMasterGateway {
Observable<JobSchedulingInfo> schedulingChanges(final String jobId);
Observable<Boolean> scaleJobStage(
final String jobId,
final int stageNum,
final int numWorkers,
final String reason);
Observable<Boolean> resubmitJobWorker(final String jobId, final String user, final int workerNum,
final String reason);
Observable<NamedJobInfo> namedJobInfo(final String jobName);
Observable<Boolean> namedJobExists(final String jobName);
Observable<Integer> getSinkStageNum(final String jobId);
Observable<JobSubmitResponse> submitJob(final String name, final String version,
final List<Parameter> parameters,
final JobSla jobSla,
final SchedulingInfo schedulingInfo);
Observable<JobSubmitResponse> submitJob(final String name, final String version,
final List<Parameter> parameters,
final JobSla jobSla,
final long subscriptionTimeoutSecs,
final SchedulingInfo schedulingInfo);
Observable<JobSubmitResponse> submitJob(final String name, final String version,
final List<Parameter> parameters,
final JobSla jobSla,
final long subscriptionTimeoutSecs,
final SchedulingInfo schedulingInfo,
final boolean readyForJobMaster);
Observable<JobSubmitResponse> submitJob(final String name, final String version,
final List<Parameter> parameters,
final JobSla jobSla,
final long subscriptionTimeoutSecs,
final SchedulingInfo schedulingInfo,
final boolean readyForJobMaster,
final WorkerMigrationConfig migrationConfig);
Observable<JobSubmitResponse> submitJob(final String name, final String version,
final List<Parameter> parameters,
final JobSla jobSla,
final long subscriptionTimeoutSecs,
final SchedulingInfo schedulingInfo,
final boolean readyForJobMaster,
final WorkerMigrationConfig migrationConfig,
final List<Label> labels);
Observable<Void> killJob(final String jobId);
Observable<Void> killJob(final String jobId, final String user, final String reason);
Observable<String> getJobsOfNamedJob(final String jobName, final MantisJobState.MetaState state);
Observable<String> getJobStatusObservable(final String jobId);
Observable<JobAssignmentResult> assignmentResults(String jobId);
/**
* Update the status of the worker to the mantis-master.
*
* @param status status that contains all the information about the worker such as the WorkerId,
* State of the worker, etc...
* @return Acknowledgement if the update was received by the mantis-master.
*/
CompletableFuture<Ack> updateStatus(Status status);
}
| 8,191 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/main/java/io/mantisrx/server/master/client/HighAvailabilityServicesUtil.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.client;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.server.core.CoreConfiguration;
import io.mantisrx.server.core.master.LocalMasterMonitor;
import io.mantisrx.server.core.master.MasterDescription;
import io.mantisrx.server.core.master.MasterMonitor;
import io.mantisrx.server.core.zookeeper.CuratorService;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.ResourceClusterGateway;
import io.mantisrx.server.master.resourcecluster.ResourceClusterGatewayClient;
import io.mantisrx.shaded.com.google.common.util.concurrent.AbstractIdleService;
import io.mantisrx.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import lombok.extern.slf4j.Slf4j;
import rx.Scheduler;
import rx.Subscription;
import rx.schedulers.Schedulers;
/**
* HighAvailabilityServicesUtil helps you create HighAvailabilityServices instance based on the core configuration.
*/
@Slf4j
public class HighAvailabilityServicesUtil {
private final static AtomicReference<HighAvailabilityServices> HAServiceInstanceRef = new AtomicReference<>();
public static HighAvailabilityServices createHAServices(CoreConfiguration configuration) {
if (configuration.isLocalMode()) {
log.warn("HA service running in local mode. This is only valid in local test.");
if (HAServiceInstanceRef.get() == null) {
String[] parts = configuration.getZkConnectionString().split(":");
if (parts.length != 2) {
throw new RuntimeException(
"invalid local mode connection string: " + configuration.getZkConnectionString());
}
int apiPort = Integer.parseInt(parts[1]);
HAServiceInstanceRef.compareAndSet(null, new LocalHighAvailabilityServices(
new MasterDescription(
parts[0],
"127.0.0.1",
apiPort,
apiPort,
apiPort,
"api/postjobstatus",
apiPort + 6,
System.currentTimeMillis()),
configuration));
}
}
else {
if (HAServiceInstanceRef.get() == null) {
HAServiceInstanceRef.compareAndSet(null, new ZkHighAvailabilityServices(configuration));
}
}
return HAServiceInstanceRef.get();
}
private static class LocalHighAvailabilityServices extends AbstractIdleService implements HighAvailabilityServices {
private final MasterMonitor masterMonitor;
private final CoreConfiguration configuration;
public LocalHighAvailabilityServices(MasterDescription masterDescription, CoreConfiguration configuration) {
this.masterMonitor = new LocalMasterMonitor(masterDescription);
this.configuration = configuration;
}
@Override
public MantisMasterGateway getMasterClientApi() {
return new MantisMasterClientApi(this.masterMonitor);
}
@Override
public MasterMonitor getMasterMonitor() {
return this.masterMonitor;
}
@Override
public ResourceLeaderConnection<ResourceClusterGateway> connectWithResourceManager(ClusterID clusterID) {
return new ResourceLeaderConnection<ResourceClusterGateway>() {
final MasterMonitor masterMonitor = LocalHighAvailabilityServices.this.masterMonitor;
@Override
public ResourceClusterGateway getCurrent() {
return new ResourceClusterGatewayClient(clusterID, masterMonitor.getLatestMaster(), configuration);
}
@Override
public void register(ResourceLeaderChangeListener<ResourceClusterGateway> changeListener) {
}
};
}
@Override
protected void startUp() throws Exception {
}
@Override
protected void shutDown() throws Exception {
}
}
/**
* Zookeeper based implementation of HighAvailabilityServices that finds the various leader instances
* through metadata stored on zookeeper.
*/
private static class ZkHighAvailabilityServices extends AbstractIdleService implements
HighAvailabilityServices {
private final CuratorService curatorService;
private final Counter resourceLeaderChangeCounter;
private final Counter resourceLeaderAlreadyRegisteredCounter;
private final AtomicInteger rmConnections = new AtomicInteger(0);
private final CoreConfiguration configuration;
public ZkHighAvailabilityServices(CoreConfiguration configuration) {
curatorService = new CuratorService(configuration);
final Metrics metrics = MetricsRegistry.getInstance().registerAndGet(new Metrics.Builder()
.name("ZkHighAvailabilityServices")
.addCounter("resourceLeaderChangeCounter")
.addCounter("resourceLeaderAlreadyRegisteredCounter")
.build());
resourceLeaderChangeCounter = metrics.getCounter("resourceLeaderChangeCounter");
resourceLeaderAlreadyRegisteredCounter = metrics.getCounter("resourceLeaderAlreadyRegisteredCounter");
this.configuration = configuration;
}
@Override
protected void startUp() throws Exception {
curatorService.start();
}
@Override
protected void shutDown() throws Exception {
curatorService.shutdown();
}
@Override
public MantisMasterGateway getMasterClientApi() {
return new MantisMasterClientApi(curatorService.getMasterMonitor());
}
@Override
public MasterMonitor getMasterMonitor() {
return curatorService.getMasterMonitor();
}
@Override
public ResourceLeaderConnection<ResourceClusterGateway> connectWithResourceManager(
ClusterID clusterID) {
return new ResourceLeaderConnection<ResourceClusterGateway>() {
final MasterMonitor masterMonitor = curatorService.getMasterMonitor();
ResourceClusterGateway currentResourceClusterGateway =
new ResourceClusterGatewayClient(clusterID, masterMonitor.getLatestMaster(), configuration);
final String nameFormat =
"ResourceClusterGatewayCxn (" + rmConnections.getAndIncrement() + ")-%d";
final Scheduler scheduler =
Schedulers
.from(
Executors
.newSingleThreadExecutor(
new ThreadFactoryBuilder().setNameFormat(nameFormat).build()));
final List<Subscription> subscriptions = new ArrayList<>();
@Override
public ResourceClusterGateway getCurrent() {
return currentResourceClusterGateway;
}
@Override
public void register(ResourceLeaderChangeListener<ResourceClusterGateway> changeListener) {
Subscription subscription = masterMonitor
.getMasterObservable()
.observeOn(scheduler)
.subscribe(nextDescription -> {
log.info("nextDescription={}", nextDescription);
if (nextDescription.equals(((ResourceClusterGatewayClient)currentResourceClusterGateway).getMasterDescription())) {
resourceLeaderAlreadyRegisteredCounter.increment();
return;
}
ResourceClusterGateway previous = currentResourceClusterGateway;
currentResourceClusterGateway = new ResourceClusterGatewayClient(clusterID, nextDescription, configuration);
resourceLeaderChangeCounter.increment();
changeListener.onResourceLeaderChanged(previous, currentResourceClusterGateway);
});
subscriptions.add(subscription);
}
};
}
}
}
| 8,192 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/main/java/io/mantisrx/server/master/client/MasterClientWrapper.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.client;
import com.mantisrx.common.utils.Services;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.common.network.Endpoint;
import io.mantisrx.common.network.WorkerEndpoint;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.server.core.Configurations;
import io.mantisrx.server.core.CoreConfiguration;
import io.mantisrx.server.core.JobSchedulingInfo;
import io.mantisrx.server.core.NamedJobInfo;
import io.mantisrx.server.core.WorkerAssignments;
import io.mantisrx.server.core.WorkerHost;
import io.reactivex.mantis.remote.observable.EndpointChange;
import io.reactivex.mantis.remote.observable.ToDeltaEndpointInjector;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Observer;
import rx.functions.Action0;
import rx.functions.Action1;
import rx.functions.Func1;
import rx.subjects.PublishSubject;
public class MasterClientWrapper {
public static final String InvalidNamedJob = "No_such_named_job";
private static final Logger logger = LoggerFactory.getLogger(MasterClientWrapper.class);
private final Counter masterConnectRetryCounter;
private final MantisMasterGateway masterClientApi;
private final PublishSubject<JobSinkNumWorkers> numSinkWorkersSubject = PublishSubject.create();
private final PublishSubject<JobNumWorkers> numWorkersSubject = PublishSubject.create();
// blocks until getting master info from zookeeper
public MasterClientWrapper(MantisMasterGateway gateway) {
this.masterClientApi = gateway;
Metrics m = new Metrics.Builder()
.name(MasterClientWrapper.class.getCanonicalName())
.addCounter("MasterConnectRetryCount")
.build();
m = MetricsRegistry.getInstance().registerAndGet(m);
masterConnectRetryCounter = m.getCounter("MasterConnectRetryCount");
}
public static String getWrappedHost(String host, int workerNumber) {
return host + "-" + workerNumber;
}
public static String getUnwrappedHost(String wrappedHost) {
final int i = wrappedHost.lastIndexOf('-');
if (i < 0)
return wrappedHost;
return wrappedHost.substring(0, i);
}
public static void main(String[] args) throws InterruptedException {
Properties zkProps = new Properties();
zkProps.put("mantis.zookeeper.connectString", "ec2-50-19-255-1.compute-1.amazonaws.com:2181,ec2-54-235-159-245.compute-1.amazonaws.com:2181,ec2-50-19-255-97.compute-1.amazonaws.com:2181,ec2-184-73-152-248.compute-1.amazonaws.com:2181,ec2-50-17-247-179.compute-1.amazonaws.com:2181");
zkProps.put("mantis.zookeeper.leader.announcement.path", "/leader");
zkProps.put("mantis.zookeeper.root", "/mantis/master");
String jobId = "GroupByIPNJ-12";
HighAvailabilityServices haServices =
HighAvailabilityServicesUtil.createHAServices(Configurations.frmProperties(zkProps, CoreConfiguration.class));
Services.startAndWait(haServices);
MasterClientWrapper clientWrapper = new MasterClientWrapper(haServices.getMasterClientApi());
clientWrapper.getMasterClientApi()
.flatMap(new Func1<MantisMasterGateway, Observable<EndpointChange>>() {
@Override
public Observable<EndpointChange> call(MantisMasterGateway mantisMasterClientApi) {
Integer sinkStage = null;
return mantisMasterClientApi.getSinkStageNum(jobId)
.take(1) // only need to figure out sink stage number once
.flatMap(new Func1<Integer, Observable<EndpointChange>>() {
@Override
public Observable<EndpointChange> call(Integer integer) {
logger.info("Getting sink locations for " + jobId);
return clientWrapper.getSinkLocations(jobId, integer, 0, 0);
}
});
}
}).toBlocking().subscribe((ep) -> {
System.out.println("Endpoint Change -> " + ep);
});
Thread.sleep(50000);
}
public void addNumSinkWorkersObserver(Observer<JobSinkNumWorkers> numSinkWorkersObserver) {
numSinkWorkersSubject.subscribe(numSinkWorkersObserver);
}
public void addNumWorkersObserver(Observer<JobNumWorkers> numWorkersObserver) {
numWorkersSubject.subscribe(numWorkersObserver);
}
/**
* Returns an Observable that emits only once, after the MasterClientApi has been initialized
*/
public Observable<MantisMasterGateway> getMasterClientApi() {
return Observable.just(masterClientApi);
}
private List<Endpoint> getAllNonJobMasterEndpoints(final String jobId, final Map<Integer, WorkerAssignments> workerAssignments) {
List<Endpoint> endpoints = new ArrayList<>();
int totalWorkers = 0;
for (Map.Entry<Integer, WorkerAssignments> workerAssignment : workerAssignments.entrySet()) {
final Integer stageNum = workerAssignment.getKey();
// skip workers for stage 0
if (stageNum == 0) {
continue;
}
final WorkerAssignments assignments = workerAssignment.getValue();
logger.info("job {} Creating endpoints conx from {} worker assignments for stage {}",
jobId, assignments.getHosts().size(), stageNum);
if (logger.isDebugEnabled()) {
logger.debug("stage {} hosts: {}", stageNum, assignments.getHosts());
}
totalWorkers += assignments.getNumWorkers();
for (WorkerHost host : assignments.getHosts().values()) {
final int workerIndex = host.getWorkerIndex();
if (host.getState() == MantisJobState.Started) {
logger.info("job " + jobId + ": creating new endpoint for worker number=" + host.getWorkerNumber()
+ ", index=" + host.getWorkerIndex() + ", host:port=" + host.getHost() + ":" +
host.getPort().get(0));
Endpoint ep = new WorkerEndpoint(getWrappedHost(host.getHost(), host.getWorkerNumber()), host.getPort().get(0),
stageNum, host.getMetricsPort(), host.getWorkerIndex(), host.getWorkerNumber(),
// completed callback
new Action0() {
@Override
public void call() {
logger.info("job " + jobId + " WorkerIndex " + workerIndex + " completed");
}
},
// error callback
new Action1<Throwable>() {
@Override
public void call(Throwable t1) {
logger.info("job " + jobId + " WorkerIndex " + workerIndex + " failed");
}
}
);
endpoints.add(ep);
}
}
}
numWorkersSubject.onNext(new JobNumWorkers(jobId, totalWorkers));
return endpoints;
}
public Observable<EndpointChange> getAllWorkerMetricLocations(final String jobId) {
final ConditionalRetry schedInfoRetry = new ConditionalRetry(masterConnectRetryCounter, "AllSchedInfoRetry", 10);
Observable<List<Endpoint>> schedulingUpdates =
masterClientApi
.schedulingChanges(jobId)
.doOnError(new Action1<Throwable>() {
@Override
public void call(Throwable throwable) {
logger.warn("Error on scheduling changes observable: " + throwable);
}
})
.retryWhen(schedInfoRetry.getRetryLogic())
.map(new Func1<JobSchedulingInfo, Map<Integer, WorkerAssignments>>() {
@Override
public Map<Integer, WorkerAssignments> call(JobSchedulingInfo jobSchedulingInfo) {
logger.info("Got scheduling info for " + jobId);
return jobSchedulingInfo.getWorkerAssignments();
}
})
.filter(new Func1<Map<Integer, WorkerAssignments>, Boolean>() {
@Override
public Boolean call(Map<Integer, WorkerAssignments> workerAssignments) {
return workerAssignments != null;
}
})
.map(new Func1<Map<Integer, WorkerAssignments>, List<Endpoint>>() {
@Override
public List<Endpoint> call(Map<Integer, WorkerAssignments> workerAssignments) {
return getAllNonJobMasterEndpoints(jobId, workerAssignments);
}
})
.doOnError(new Action1<Throwable>() {
@Override
public void call(Throwable throwable) {
logger.error(throwable.getMessage(), throwable);
}
});
return (new ToDeltaEndpointInjector(schedulingUpdates)).deltas();
}
public Observable<EndpointChange> getSinkLocations(final String jobId, final int sinkStage,
final int forPartition, final int totalPartitions) {
final ConditionalRetry schedInfoRetry = new ConditionalRetry(masterConnectRetryCounter, "SchedInfoRetry", 10);
Observable<List<Endpoint>> schedulingUpdates =
masterClientApi
.schedulingChanges(jobId)
.doOnError((Throwable throwable) -> {
logger.warn(throwable.getMessage());
})
.retryWhen(schedInfoRetry.getRetryLogic())
.map((JobSchedulingInfo jobSchedulingInfo) -> {
logger.info("Got scheduling info for {}", jobId);
if (logger.isDebugEnabled()) {
logger.debug("Worker Assignments {}", jobSchedulingInfo.getWorkerAssignments().get(sinkStage));
}
return jobSchedulingInfo.getWorkerAssignments().get(sinkStage);
})
// Worker assignments can be empty if the job has completed so do not filter these events out
.map((WorkerAssignments workerAssignments) -> {
List<Endpoint> endpoints = new ArrayList<>();
if (workerAssignments != null) {
logger.info("job " + jobId + " Creating endpoints conx from " + workerAssignments.getHosts().size() + " worker assignments");
for (WorkerHost host : workerAssignments.getHosts().values()) {
final int workerIndex = host.getWorkerIndex();
final int totalFromPartitions = workerAssignments.getNumWorkers();
final int runningWorkers = (int) workerAssignments
.getHosts()
.values()
.stream()
.filter(e -> MantisJobState.isOnStartedState(e.getState()))
.count();
numSinkWorkersSubject.onNext(new JobSinkNumWorkers(jobId, totalFromPartitions, runningWorkers));
if (usePartition(workerIndex, totalFromPartitions, forPartition, totalPartitions)) {
//logger.info("Using partition " + workerIndex);
if (host.getState() == MantisJobState.Started) {
Endpoint ep = new Endpoint(getWrappedHost(host.getHost(), host.getWorkerNumber()), host.getPort().get(0),
// completed callback
() -> logger.info("job " + jobId + " WorkerIndex " + workerIndex + " completed"),
// error callback
t1 -> logger.info("job " + jobId + " WorkerIndex " + workerIndex + " failed")
);
endpoints.add(ep);
}
}
}
} else {
logger.info("job " + jobId + " Has no active workers!");
}
return endpoints;
})
.doOnError((Throwable throwable) -> {
logger.error(throwable.getMessage(), throwable);
});
return (new ToDeltaEndpointInjector(schedulingUpdates)).deltas();
}
private boolean usePartition(int fromPartition, int fromTotalPartitions, int toPartition, int toTotalPartitions) {
if (toPartition < 0 || toTotalPartitions == 0)
return true; // not partitioning
long n = Math.round((double) fromTotalPartitions / (double) toTotalPartitions);
long beg = toPartition * n;
long end = toPartition == toTotalPartitions - 1 ? fromTotalPartitions : (toPartition + 1) * n;
return beg < fromTotalPartitions && fromPartition >= beg && fromPartition < end;
}
public Observable<Boolean> namedJobExists(final String jobName) {
final ConditionalRetry namedJobRetry = new ConditionalRetry(masterConnectRetryCounter, "NamedJobExists", Integer.MAX_VALUE);
logger.info("verifying if job name exists: " + jobName);
return masterClientApi.namedJobExists(jobName).retryWhen(namedJobRetry.getRetryLogic());
}
public Observable<String> getNamedJobsIds(final String jobName) {
final ConditionalRetry namedJobsIdsRetry = new ConditionalRetry(masterConnectRetryCounter, "NamedJobsIds", Integer.MAX_VALUE);
logger.info("verifying if job name exists: " + jobName);
return masterClientApi.namedJobExists(jobName)
.onErrorResumeNext((Throwable throwable) -> {
logger.error(throwable.getMessage());
return Observable.empty();
})
.take(1)
.map((exists) -> {
if (!exists) {
final Exception exception = new Exception("No such Job Cluster " + jobName);
namedJobsIdsRetry.setErrorRef(exception);
return Observable.just(new NamedJobInfo(jobName, InvalidNamedJob));
}
logger.info("Getting Job cluster info for " + jobName);
return masterClientApi.namedJobInfo(jobName);
})
.doOnError((Throwable throwable) -> {
logger.error(throwable.getMessage(), throwable);
})
.retryWhen(namedJobsIdsRetry.getRetryLogic())
.flatMap((Observable<NamedJobInfo> namedJobInfo) -> {
return namedJobInfo.map((NamedJobInfo nji) -> {
return nji.getJobId();
});
});
}
public static class JobSinkNumWorkers {
protected final int numSinkWorkers;
protected final int numSinkRunningWorkers;
private final String jobId;
public JobSinkNumWorkers(String jobId, int numSinkWorkers, int numSinkRunningWorkers) {
this.jobId = jobId;
this.numSinkWorkers = numSinkWorkers;
this.numSinkRunningWorkers = numSinkRunningWorkers;
}
public String getJobId() {
return jobId;
}
public int getNumSinkWorkers() {
return numSinkWorkers;
}
public int getNumSinkRunningWorkers() {
return numSinkRunningWorkers;
}
}
public static class JobNumWorkers {
protected final int numWorkers;
private final String jobId;
public JobNumWorkers(String jobId, int numWorkers) {
this.jobId = jobId;
this.numWorkers = numWorkers;
}
public String getJobId() {
return jobId;
}
public int getNumWorkers() {
return numWorkers;
}
}
}
| 8,193 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/main/java/io/mantisrx/server/master/client/MantisProtoAdapter.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//package io.mantisrx.server.master.client;
//
//import io.mantisrx.common.Label;
//import io.mantisrx.master.api.proto.CreateJobClusterRequest;
//import io.mantisrx.master.api.proto.UpdateJobClusterRequest;
//import io.mantisrx.master.core.proto.JobDefinition;
//import io.mantisrx.runtime.JobConstraints;
//import io.mantisrx.runtime.JobOwner;
//import io.mantisrx.runtime.JobSla;
//import io.mantisrx.runtime.MachineDefinition;
//import io.mantisrx.runtime.MantisJobDefinition;
//import io.mantisrx.runtime.MantisJobDurationType;
//import io.mantisrx.runtime.NamedJobDefinition;
//import io.mantisrx.runtime.WorkerMigrationConfig;
//import io.mantisrx.runtime.descriptor.SchedulingInfo;
//import io.mantisrx.runtime.descriptor.StageScalingPolicy;
//import io.mantisrx.runtime.descriptor.StageSchedulingInfo;
//import io.mantisrx.runtime.parameter.Parameter;
//
//import java.net.MalformedURLException;
//import java.net.URL;
//import java.util.stream.Collectors;
//
//public class MantisProtoAdapter {
// public static final StageScalingPolicy.Strategy toStageScalingStrategy(final io.mantisrx.master.core.proto.StageScalingPolicy.Strategy s) {
// return new StageScalingPolicy.Strategy(
// StageScalingPolicy.ScalingReason.valueOf(s.getReason().name()),
// s.getScaleDownBelowPct(),
// s.getScaleUpAbovePct(),
// s.hasRollingCount() ?
// new StageScalingPolicy.RollingCount(
// s.getRollingCount().getCount(),
// s.getRollingCount().getOf()) :
// null
// );
// }
// public static final StageScalingPolicy toStageScalingPolicy(final io.mantisrx.master.core.proto.StageScalingPolicy p) {
// return new StageScalingPolicy(
// p.getStage(),
// p.getMin(),
// p.getMax(),
// p.getIncrement(),
// p.getDecrement(),
// p.getCoolDownSecs(),
// p.getStrategiesMap().entrySet().stream().collect(
// Collectors.toMap(
// e -> StageScalingPolicy.ScalingReason.valueOf(e.getKey()),
// e -> toStageScalingStrategy(e.getValue())
// )
// )
// );
// }
//
// public static final MachineDefinition toMachineDefinition(final io.mantisrx.master.core.proto.MachineDefinition md) {
// return new MachineDefinition(md.getCpuCores(),
// md.getMemoryMB(), md.getNetworkMbps(), md.getDiskMB(), md.getNumPorts());
// }
//
// private static final StageSchedulingInfo toStageSchedulingInfo(final io.mantisrx.master.core.proto.SchedulingInfo.StageSchedulingInfo s) {
// return new StageSchedulingInfo(
// s.getNumberOfInstances(),
// toMachineDefinition(s.getMachineDefinition()),
// s.getHardConstraintsList().stream().map(c -> JobConstraints.valueOf(c.name())).collect(Collectors.toList()),
// s.getSoftConstraintsList().stream().map(c -> JobConstraints.valueOf(c.name())).collect(Collectors.toList()),
// s.hasScalingPolicy() ? toStageScalingPolicy(s.getScalingPolicy()) : null,
// s.getScalable()
// );
// }
// private static final SchedulingInfo toSchedulingInfo(final io.mantisrx.master.core.proto.SchedulingInfo s) {
//
// return new SchedulingInfo(
// s.getStagesMap().entrySet().stream()
// .collect(Collectors.toMap(e -> e.getKey(),
// e -> toStageSchedulingInfo(e.getValue())))
// );
// }
//
// public static final JobSla toJobSla(final io.mantisrx.master.core.proto.JobSla protoSla) {
// return new JobSla(protoSla.getRuntimeLimitSecs(),
// protoSla.getMinRuntimeSecs(),
// JobSla.StreamSLAType.valueOf(protoSla.getSlaType().name()),
// MantisJobDurationType.valueOf(protoSla.getDurationType().name()),
// protoSla.getUserProvidedType());
// }
//
// private static final WorkerMigrationConfig toMigrationConfig(final io.mantisrx.master.core.proto.WorkerMigrationConfig cfg) {
// return new WorkerMigrationConfig(
// WorkerMigrationConfig.MigrationStrategyEnum.valueOf(cfg.getStrategy().name()),
// cfg.getConfigString()
// );
// }
//
// private static final JobOwner toJobOwner(final io.mantisrx.master.core.proto.JobOwner owner) {
// return new JobOwner(
// owner.getName(),
// owner.getTeamName(),
// owner.getDescription(),
// owner.getContactEmail(),
// owner.getRepo()
// );
// }
//
// public static NamedJobDefinition toNamedJobDefinition(final CreateJobClusterRequest request) throws MalformedURLException {
// JobDefinition jd = request.getJobDefinition();
// io.mantisrx.master.core.proto.JobOwner owner = request.getOwner();
// MantisJobDefinition jobDefinition = new MantisJobDefinition(
// jd.getName(),
// jd.getUser(),
// jd.getUrl() == null ? null : new URL(jd.getUrl()),
// jd.getVersion(),
// jd.getParametersList().stream().map(p -> new Parameter(p.getName(), p.getValue())).collect(Collectors.toList()),
// jd.hasJobSla() ? toJobSla(jd.getJobSla()) : null,
// jd.getSubscriptionTimeoutSecs(),
// jd.hasSchedulingInfo() ? toSchedulingInfo(jd.getSchedulingInfo()) : null,
// jd.getSlaMin(),
// jd.getSlaMax(),
// jd.getCronSpec(),
// NamedJobDefinition.CronPolicy.valueOf(jd.getCronPolicy().name()),
// jd.getIsReadyForJobMaster(),
// jd.hasMigrationConfig() ? toMigrationConfig(jd.getMigrationConfig()) : WorkerMigrationConfig.DEFAULT,
// jd.getLabelsList().stream().map(l -> new Label(l.getName(), l.getValue())).collect(Collectors.toList()));
// return new NamedJobDefinition(
// jobDefinition,
// request.hasOwner() ? toJobOwner(owner) : null
// );
// }
//
// public static NamedJobDefinition toNamedJobDefinition(final UpdateJobClusterRequest request) throws MalformedURLException {
// JobDefinition jd = request.getJobDefinition();
// io.mantisrx.master.core.proto.JobOwner owner = request.getOwner();
// MantisJobDefinition jobDefinition = new MantisJobDefinition(
// jd.getName(),
// jd.getUser(),
// jd.getUrl() == null ? null : new URL(jd.getUrl()),
// jd.getVersion(),
// jd.getParametersList().stream().map(p -> new Parameter(p.getName(), p.getValue())).collect(Collectors.toList()),
// jd.hasJobSla() ? toJobSla(jd.getJobSla()) : null,
// jd.getSubscriptionTimeoutSecs(),
// jd.hasSchedulingInfo() ? toSchedulingInfo(jd.getSchedulingInfo()) : null,
// jd.getSlaMin(),
// jd.getSlaMax(),
// jd.getCronSpec(),
// NamedJobDefinition.CronPolicy.valueOf(jd.getCronPolicy().name()),
// jd.getIsReadyForJobMaster(),
// jd.hasMigrationConfig() ? toMigrationConfig(jd.getMigrationConfig()) : WorkerMigrationConfig.DEFAULT,
// jd.getLabelsList().stream().map(l -> new Label(l.getName(), l.getValue())).collect(Collectors.toList()));
// return new NamedJobDefinition(
// jobDefinition,
// request.hasOwner() ? toJobOwner(owner) : null
// );
// }
//
//}
| 8,194 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/main/java/io/mantisrx/server/master/client/TaskStatusUpdateHandlerImpl.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.client;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.server.core.Status;
import lombok.extern.slf4j.Slf4j;
import org.apache.flink.util.ExceptionUtils;
@Slf4j
public class TaskStatusUpdateHandlerImpl implements TaskStatusUpdateHandler {
private final Counter failureCounter;
private final Counter workerSentHeartbeats;
private final MantisMasterGateway masterMonitor;
TaskStatusUpdateHandlerImpl(MantisMasterGateway masterGateway) {
final Metrics metrics = MetricsRegistry.getInstance().registerAndGet(new Metrics.Builder()
.name("ReportStatusServiceHttpImpl")
.addCounter("failureCounter")
.addCounter("workerSentHeartbeats")
.build());
this.failureCounter = metrics.getCounter("failureCounter");
this.workerSentHeartbeats = metrics.getCounter("workerSentHeartbeats");
this.masterMonitor = masterGateway;
}
@Override
public void onStatusUpdate(Status status) {
log.info("onStatusUpdate for status: {}", status);
masterMonitor
.updateStatus(status)
.whenComplete((ack, throwable) -> {
if (ack != null) {
workerSentHeartbeats.increment();
} else {
Throwable cleaned = ExceptionUtils.stripExecutionException(throwable);
failureCounter.increment();
log.error("Failed to send status update", cleaned);
}
});
}
}
| 8,195 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/main/java/io/mantisrx/server/master/client/ConditionalRetry.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.client;
import io.mantisrx.common.metrics.Counter;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.functions.Func1;
import rx.functions.Func2;
public class ConditionalRetry {
private static final Logger logger = LoggerFactory.getLogger(ConditionalRetry.class);
private final Counter counter;
private final String name;
private final AtomicReference<Throwable> errorRef = new AtomicReference<>(null);
private final Func1<Observable<? extends Throwable>, Observable<?>> retryLogic;
public ConditionalRetry(Counter counter, String name) {
this(counter, name, Integer.MAX_VALUE);
}
public ConditionalRetry(Counter counter, String name, final int max) {
this.counter = counter;
this.name = name;
this.retryLogic =
new Func1<Observable<? extends Throwable>, Observable<?>>() {
@Override
public Observable<?> call(Observable<? extends Throwable> attempts) {
return attempts
.zipWith(Observable.range(1, max), new Func2<Throwable, Integer, Integer>() {
@Override
public Integer call(Throwable t1, Integer integer) {
return integer;
}
})
.flatMap(new Func1<Integer, Observable<?>>() {
@Override
public Observable<?> call(Integer integer) {
if (errorRef.get() != null)
return Observable.error(errorRef.get());
if (ConditionalRetry.this.counter != null)
ConditionalRetry.this.counter.increment();
long delay = 2 * (integer > 10 ? 10 : integer);
logger.info(": retrying " + ConditionalRetry.this.name +
" after sleeping for " + delay + " secs");
return Observable.timer(delay, TimeUnit.SECONDS);
}
});
}
};
}
public void setErrorRef(Throwable error) {
errorRef.set(error);
}
public Counter getCounter() {
return counter;
}
public Func1<Observable<? extends Throwable>, Observable<?>> getRetryLogic() {
return retryLogic;
}
}
| 8,196 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/main/java/io/mantisrx/server/master/client/MasterClientException.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.client;
public class MasterClientException extends Exception {
public MasterClientException(String s) {
super(s);
}
public MasterClientException(String s, Throwable t) {
super(s, t);
}
public MasterClientException(Exception e) {
super(e);
}
}
| 8,197 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/main/java/io/mantisrx/server/master/client/HighAvailabilityServices.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.client;
import io.mantisrx.server.core.master.MasterMonitor;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.ResourceClusterGateway;
import io.mantisrx.shaded.com.google.common.util.concurrent.Service;
/**
* HighAvailabilityServices is a container for a group of services which are considered to be highly available because
* of multiple standbys capable of handling the service in case the leader goes down for instance.
* <p>
* In Mantis, the following services are considered highly-available:
* 1. Mantis master which handles all the job-cluster/job/stage/worker interactions.
* 2. Resource Manager which handles all the resource specific interactions such as resource status updates,
* registrations and heartbeats.
* <p>
* These services can be obtained from the HighAvailabilityServices implementation.
*/
public interface HighAvailabilityServices extends Service {
MantisMasterGateway getMasterClientApi();
MasterMonitor getMasterMonitor();
ResourceLeaderConnection<ResourceClusterGateway> connectWithResourceManager(ClusterID clusterID);
}
| 8,198 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-client/src/main/java/io/mantisrx/server/master/client/StageScaleRequest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.client;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
public class StageScaleRequest {
@JsonProperty("JobId")
private final String jobId;
@JsonProperty("StageNumber")
private final int stageNumber;
@JsonProperty("NumWorkers")
private final int numWorkers;
@JsonProperty("Reason")
private final String reason;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public StageScaleRequest(final String jobId,
final int stageNumber,
final int numWorkers,
final String reason) {
this.jobId = jobId;
this.stageNumber = stageNumber;
this.numWorkers = numWorkers;
this.reason = reason;
}
public String getJobId() {
return jobId;
}
public int getStageNumber() {
return stageNumber;
}
public int getNumWorkers() {
return numWorkers;
}
public String getReason() {
return reason;
}
@Override
public String toString() {
return "StageScaleRequest{" +
"jobId='" + jobId + '\'' +
", stageNumber=" + stageNumber +
", numWorkers=" + numWorkers +
", reason='" + reason + '\'' +
'}';
}
}
| 8,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.