text
stringlengths 7
1.01M
|
|---|
package net.ocnl.rollout.storage;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.nullValue;
import static org.junit.Assert.assertThat;
import org.junit.Test;
public abstract class StorageContractTest {
public abstract IRolloutStorage createNewStorage();
@Test
public void it_can_get_stored_features_by_key() {
IRolloutStorage storage = createNewStorage();
storage.write("key", "value");
assertThat(storage.read("key"), is("value"));
}
@Test
public void the_value_of_an_unknown_key_is_null() {
IRolloutStorage storage = createNewStorage();
assertThat(storage.read("key"), is(nullValue()));
}
}
|
package kelvin.mite.mixin.client;
import net.minecraft.client.render.VertexConsumerProvider;
import net.minecraft.client.render.entity.CreeperEntityRenderer;
import net.minecraft.client.render.entity.EntityRendererFactory;
import net.minecraft.client.render.entity.MobEntityRenderer;
import net.minecraft.client.render.entity.model.CreeperEntityModel;
import net.minecraft.client.util.math.MatrixStack;
import net.minecraft.entity.Entity;
import net.minecraft.entity.mob.CreeperEntity;
import net.minecraft.util.math.MathHelper;
import org.spongepowered.asm.mixin.Mixin;
import org.spongepowered.asm.mixin.Overwrite;
@Mixin(CreeperEntityRenderer.class)
public abstract class CreeperEntityRendererMixin {
@Overwrite
public void scale(CreeperEntity creeperEntity, MatrixStack matrixStack, float f) {
float g = creeperEntity.getClientFuseTime(f);
float h = 1.0F + MathHelper.sin(g * 100.0F) * g * 0.01F;
g = MathHelper.clamp(g, 0.0F, 1.0F);
g *= g;
g *= g;
float i = (1.0F + g * 0.4F) * h;
float j = (1.0F + g * 0.1F) / h;
float s = 1.0f;
if (creeperEntity.isBaby()) {
s = 0.5f;
}
matrixStack.scale(i * s, j * s, i * s);
}
}
|
package org.hadatac.console.controllers.triplestore;
import java.util.List;
import java.io.File;
import org.hadatac.Constants;
import org.hadatac.console.controllers.Application;
import org.pac4j.play.java.Secure;
import play.mvc.*;
import org.hadatac.console.controllers.AuthApplication;
import org.hadatac.console.controllers.annotator.AutoAnnotator;
import org.hadatac.console.views.html.triplestore.*;
import org.hadatac.data.loader.DataContext;
import org.hadatac.metadata.loader.MetadataContext;
import org.hadatac.metadata.loader.PermissionsContext;
import org.hadatac.metadata.loader.URIUtils;
import org.hadatac.utils.Feedback;
import org.hadatac.utils.NameSpace;
import org.hadatac.utils.NameSpaces;
import com.typesafe.config.ConfigFactory;
import org.hadatac.entity.pojo.DataFile;
import org.hadatac.entity.pojo.Measurement;
import be.objectify.deadbolt.java.actions.Group;
import be.objectify.deadbolt.java.actions.Restrict;
import javax.inject.Inject;
public class Clean extends Controller {
@Inject
Application application;
@Secure(authorizers = Constants.DATA_MANAGER_ROLE)
public Result clean(String oper,Http.Request request) {
return ok(clean.render(oper, application.getUserEmail(request)));
}
@Secure(authorizers = Constants.DATA_MANAGER_ROLE)
public Result postClean(String oper,Http.Request request) {
return ok(clean.render(oper, application.getUserEmail(request)));
}
public static String playClean(String oper) {
String result = "";
if (oper.equals("metadata")) {
MetadataContext metadata = new
MetadataContext("user",
"password",
ConfigFactory.load().getString("hadatac.solr.triplestore"),
false);
result = metadata.clean(Feedback.WEB);
NameSpaces.getInstance().reload();
} else if (oper.equals("usergraph")) {
PermissionsContext permission = new
PermissionsContext("user",
"password",
ConfigFactory.load().getString("hadatac.solr.permissions"),
false);
result = permission.clean(Feedback.WEB);
DataContext userCollection = new
DataContext("user",
"password",
ConfigFactory.load().getString("hadatac.solr.users"),
false);
result = userCollection.cleanDataUsers(Feedback.WEB);
DataContext linkedCollection = new
DataContext("user",
"password",
ConfigFactory.load().getString("hadatac.solr.data"),
false);
result = linkedCollection.cleanDataAccounts(Feedback.WEB);
} else if (oper.equals("collections")) {
DataContext collection = new
DataContext("user",
"password",
ConfigFactory.load().getString("hadatac.solr.data"),
false);
result = collection.cleanDataAcquisitions(Feedback.WEB);
} else if (oper.equals("acquisitions")) {
DataContext acquisition = new
DataContext("user",
"password",
ConfigFactory.load().getString("hadatac.solr.data"),
false);
result = acquisition.cleanAcquisitionData(Feedback.WEB);
} else if (oper.equals("unprocessed")) {
List<DataFile> selected = DataFile.findByStatus(DataFile.UNPROCESSED);
String message = "0 unprocessed datafiles deleted";
if (selected != null && selected.size() > 0) {
message = selected.size() + " unprocessed datafiles deleted";
for (DataFile df : selected) {
Clean.deleteDataFile(df);
}
}
result = message;
} else if (oper.equals("processed")) {
List<DataFile> selected = DataFile.findByStatus(DataFile.PROCESSED);
String message = "0 processed datafiles deleted";
if (selected != null && selected.size() > 0) {
message = selected.size() + " processed datafiles deleted";
for (DataFile df : selected) {
Clean.deleteDataFile(df);
}
}
result = message;
} else if (oper.equals("working")) {
List<DataFile> selected = DataFile.findByStatus(DataFile.WORKING);
String message = "0 working datafiles deleted";
if (selected != null && selected.size() > 0) {
message = selected.size() + " working datafiles deleted";
for (DataFile df : selected) {
Clean.deleteDataFile(df);
}
}
result = message;
}
return result;
}
private static void deleteDataFile(DataFile dataFile) {
File file = new File(dataFile.getAbsolutePath());
if (dataFile.getPureFileName().startsWith("DA-")) {
Measurement.deleteFromSolr(dataFile.getDatasetUri());
NameSpace.deleteTriplesByNamedGraph(URIUtils.replacePrefixEx(dataFile.getDataAcquisitionUri()));
} else {
try {
AutoAnnotator.deleteAddedTriples(file, dataFile);
} catch (Exception e) {
System.out.print("Can not delete triples ingested by " + dataFile.getFileName() + " ..");
}
}
file.delete();
dataFile.delete();
}
}
|
/*
* CPAchecker is a tool for configurable software verification.
* This file is part of CPAchecker.
*
* Copyright (C) 2007-2015 Dirk Beyer
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
* CPAchecker web page:
* http://cpachecker.sosy-lab.org
*/
package org.sosy_lab.cpachecker.util.cwriter;
import static com.google.common.collect.FluentIterable.from;
import static org.sosy_lab.cpachecker.util.AbstractStates.extractLocation;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Deque;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.ListIterator;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import org.sosy_lab.common.configuration.Configuration;
import org.sosy_lab.common.configuration.InvalidConfigurationException;
import org.sosy_lab.common.configuration.Option;
import org.sosy_lab.common.configuration.Options;
import org.sosy_lab.cpachecker.cfa.model.CFAEdge;
import org.sosy_lab.cpachecker.cfa.model.CFANode;
import org.sosy_lab.cpachecker.cfa.model.c.CFunctionReturnEdge;
import org.sosy_lab.cpachecker.cpa.arg.ARGState;
import org.sosy_lab.cpachecker.util.LoopStructure;
import org.sosy_lab.cpachecker.util.LoopStructure.Loop;
import org.sosy_lab.cpachecker.util.Pair;
@Options(prefix="cwriter.withLoops")
public class LoopCollectingEdgeVisitor implements EdgeVisitor {
enum LoopDetectionStrategy {
ALL_LOOPS, ONLY_LAST_LOOP
}
@Option(toUppercase=true,
description="Option to change the behaviour of the loop detection for"
+ " generating the Counterexample-C-Code that will probably be used to generate"
+ " invariants. Note that last loop means the first loop encountered when"
+ " backwards traversing the given ARGPath, thus, the last loop may contain"
+ " other loops, which are in turn also counted to the last loop.", secure=true)
private LoopDetectionStrategy loopDetectionStrategy = LoopDetectionStrategy.ALL_LOOPS;
private final LoopStructure loopStructure;
private final List<Pair<CFAEdge, ARGState>> cfaPath = new ArrayList<>();
private final Deque<Loop> loopStack = new ArrayDeque<>();
private final Map<Loop, Set<ARGState>> relevantLoops = new LinkedHashMap<>();
private final List<Loop> finishedLoops = new ArrayList<>();
private boolean lastLoopFound = false;
public LoopCollectingEdgeVisitor(LoopStructure pLoopStructure, Configuration config) throws InvalidConfigurationException {
config.inject(this);
loopStructure = pLoopStructure;
}
/**
* Resets the state of this visitor, such that it works as if it was created newly.
*/
public void reset() {
cfaPath.clear();
loopStack.clear();
relevantLoops.clear();
finishedLoops.clear();
lastLoopFound = false;
}
@Override
public void visit(ARGState childElement, CFAEdge edge, Deque<FunctionBody> functionStack) {
cfaPath.add(Pair.of(edge, childElement));
}
public Map<Loop, Set<ARGState>> getRelevantLoops() {
ListIterator<Pair<CFAEdge, ARGState>> cfaIterator = cfaPath.listIterator(cfaPath.size());
CFAEdge edge = cfaPath.get(cfaPath.size()-1).getFirst();
ARGState state = cfaPath.get(cfaPath.size()-2).getSecond();
// Creates the initial loopStack, as seen from the error state's location
handleLoopStack(edge, state);
// now backwards traverse the list
while (cfaIterator.hasPrevious()) {
// fetch current arg path element (only cfa necessary, therefore only this
// one is here)
Pair<CFAEdge, ARGState> tmp = cfaIterator.previous();
edge = tmp.getFirst();
if(cfaIterator.hasPrevious()) {
state = cfaIterator.previous().getSecond();
cfaIterator.next();
} else {
break;
}
// check if the cfaNode has more than one outgoing edges, and if yes
// if one of these is a functionsummary edge, we want to skip
// all states until the next state in the current function if it is so
if (edge instanceof CFunctionReturnEdge) {
CFANode beforeFunctionCall = ((CFunctionReturnEdge) edge).getSummaryEdge().getPredecessor();
while (cfaIterator.hasPrevious()) {
tmp = cfaIterator.previous();
if (Objects.equals(tmp.getFirst().getPredecessor(), beforeFunctionCall)) {
edge = tmp.getFirst();
if(cfaIterator.hasPrevious()) {
state = cfaIterator.previous().getSecond();
cfaIterator.next();
}
break;
}
}
}
handleLoopStack(edge, state);
// if we have found the last loop and all belonging states,
// so we can skip further computation here
if (loopDetectionStrategy == LoopDetectionStrategy.ONLY_LAST_LOOP
&& lastLoopFound && loopStack.isEmpty()) {
break;
}
}
return relevantLoops;
}
/**
* Updates the loop information in loopStack of a given edge
*/
private void handleLoopStack(CFAEdge edge, ARGState state) {
// if the edge is null we can just add the ARGState to the current topmost
// loop if there is one, the loopstack won't change in such situations
if (edge == null) {
if (!loopStack.isEmpty()) {
List<Loop> loops = getLoopsOfNode(loopStructure, extractLocation(state));
// ArrayDeque and List can unfortunately not be compared completely with
// equals, therefore we check at least the size
assert loops.size() == loopStack.size();
for (Loop loop : loops) {
relevantLoops.get(loop).add(state);
}
}
return;
}
CFANode predecessor = edge.getPredecessor();
// remove all loops which we are not in currently
while (!loopStack.isEmpty() && !loopStack.peek().getLoopNodes().contains(predecessor)) {
finishedLoops.add(loopStack.pop());
}
boolean isInLoop = isInAnyLoop(loopStructure, predecessor);
List<Loop> loops = getLoopsOfNode(loopStructure, predecessor);
if (!loopStack.isEmpty()) {
relevantLoops.get(loopStack.peek()).add(state);
int startPushingIndex = loops.size();
while (loops.get(startPushingIndex-1) != loopStack.peek()) {
startPushingIndex--;
}
for (int i = startPushingIndex; i < loops.size(); i++) {
Loop actLoop = loops.get(i);
loopStack.push(actLoop);
Set<ARGState> states = new HashSet<>();
states.add(state);
relevantLoops.put(actLoop, states);
}
// loopstack is empty, so we only need to push something
// on the stack if we need it
} else if (lastLoopFound && loopDetectionStrategy == LoopDetectionStrategy.ONLY_LAST_LOOP) {
return;
// we either need all loops because there were no before or because of
// the heuristic
} else {
for (Loop actLoop : loops) {
loopStack.push(actLoop);
Set<ARGState> states = new HashSet<>();
states.add(state);
relevantLoops.put(actLoop, states);
}
}
if (!lastLoopFound && isInLoop) {
lastLoopFound = true;
}
}
/**
* Checks if a given CFANode is part of any loop.
*/
static boolean isInAnyLoop(LoopStructure loopStructure, final CFANode node) {
return from(loopStructure.getAllLoops())
.anyMatch(pInput -> pInput.getLoopNodes().contains(node));
}
/**
* Returns the Loops in which the given node is located from the outermost
* to the innermost loop.
*/
static List<Loop> getLoopsOfNode(LoopStructure loopStructure, final CFANode node) {
return from(loopStructure.getAllLoops())
.filter(pInput -> pInput.getLoopNodes().contains(node))
.toSortedList(
(loop1, loop2) -> {
return isOuterLoopOf(loop1, loop2) ? -1 : 1;
});
}
static boolean isOuterLoopOf(Loop outer, Loop inner) {
return outer.getLoopNodes().containsAll(inner.getLoopNodes());
}
}
|
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.android.exoplayer;
/**
* Maintains codec event counts, for debugging purposes only.
* <p>
* Counters should be written from the playback thread only. Counters may be read from any thread.
* To ensure that the counter values are correctly reflected between threads, users of this class
* should invoke {@link #ensureUpdated()} prior to reading and after writing.
*/
public final class CodecCounters {
public int codecInitCount;
public int codecReleaseCount;
public int outputFormatChangedCount;
public int outputBuffersChangedCount;
public int renderedOutputBufferCount;
public int skippedOutputBufferCount;
public int droppedOutputBufferCount;
/**
* Should be invoked from the playback thread after the counters have been updated. Should also
* be invoked from any other thread that wishes to read the counters, before reading. These calls
* ensure that counter updates are made visible to the reading threads.
*/
public synchronized void ensureUpdated() {
// Do nothing. The use of synchronized ensures a memory barrier should another thread also
// call this method.
}
public String getDebugString() {
ensureUpdated();
StringBuilder builder = new StringBuilder();
builder.append("cic:").append(codecInitCount);
builder.append(" crc:").append(codecReleaseCount);
builder.append(" ofc:").append(outputFormatChangedCount);
builder.append(" obc:").append(outputBuffersChangedCount);
builder.append(" ren:").append(renderedOutputBufferCount);
builder.append(" sob:").append(skippedOutputBufferCount);
builder.append(" dob:").append(droppedOutputBufferCount);
return builder.toString();
}
}
|
/*
* This file is part of Burningwave Core.
*
* Author: Roberto Gentili
*
* Hosted at: https://github.com/burningwave/core
*
* --
*
* The MIT License (MIT)
*
* Copyright (c) 2019-2021 Roberto Gentili
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including without
* limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
* LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO
* EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
* OR OTHER DEALINGS IN THE SOFTWARE.
*/
package org.burningwave.core.function;
import java.util.Objects;
@FunctionalInterface
public interface PentaConsumer<P0, P1, P2, P3, P4> {
public abstract void accept(P0 p0, P1 p1, P2 p2, P3 p3, P4 p4);
default PentaConsumer<P0, P1, P2, P3, P4> andThen(PentaConsumer<? super P0, ? super P1, ? super P2, ? super P3, ? super P4> after) {
Objects.requireNonNull(after);
return (p0, p1, p2, p3, p4) -> {
accept(p0, p1, p2, p3, p4);
after.accept(p0, p1, p2, p3, p4);
};
}
}
|
package org.apache.cayenne.testdo.toone.auto;
import org.apache.cayenne.PersistentObject;
import org.apache.cayenne.ValueHolder;
import org.apache.cayenne.testdo.toone.ClientTooneMaster;
/**
* A generated persistent class mapped as "TooneDep" Cayenne entity. It is a good idea to
* avoid changing this class manually, since it will be overwritten next time code is
* regenerated. If you need to make any customizations, put them in a subclass.
*/
public abstract class _ClientTooneDep extends PersistentObject {
public static final String TO_MASTER_PROPERTY = "toMaster";
protected ValueHolder toMaster;
public ClientTooneMaster getToMaster() {
if(objectContext != null) {
objectContext.prepareForAccess(this, "toMaster", true);
}
return (ClientTooneMaster) toMaster.getValue();
}
public void setToMaster(ClientTooneMaster toMaster) {
if(objectContext != null) {
objectContext.prepareForAccess(this, "toMaster", true);
}
this.toMaster.setValue(toMaster);
}
}
|
/*
* Copyright 2013-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.sagemaker;
import org.w3c.dom.*;
import java.net.*;
import java.util.*;
import javax.annotation.Generated;
import org.apache.commons.logging.*;
import com.amazonaws.*;
import com.amazonaws.annotation.SdkInternalApi;
import com.amazonaws.auth.*;
import com.amazonaws.handlers.*;
import com.amazonaws.http.*;
import com.amazonaws.internal.*;
import com.amazonaws.internal.auth.*;
import com.amazonaws.metrics.*;
import com.amazonaws.regions.*;
import com.amazonaws.transform.*;
import com.amazonaws.util.*;
import com.amazonaws.protocol.json.*;
import com.amazonaws.util.AWSRequestMetrics.Field;
import com.amazonaws.annotation.ThreadSafe;
import com.amazonaws.client.AwsSyncClientParams;
import com.amazonaws.services.sagemaker.AmazonSageMakerClientBuilder;
import com.amazonaws.services.sagemaker.waiters.AmazonSageMakerWaiters;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.services.sagemaker.model.*;
import com.amazonaws.services.sagemaker.model.transform.*;
/**
* Client for accessing SageMaker. All service calls made using this client are blocking, and will not return until the
* service call completes.
* <p>
* Definition of the public APIs exposed by SageMaker
*/
@ThreadSafe
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class AmazonSageMakerClient extends AmazonWebServiceClient implements AmazonSageMaker {
/** Provider for AWS credentials. */
private final AWSCredentialsProvider awsCredentialsProvider;
private static final Log log = LogFactory.getLog(AmazonSageMaker.class);
/** Default signing name for the service. */
private static final String DEFAULT_SIGNING_NAME = "sagemaker";
private volatile AmazonSageMakerWaiters waiters;
/** Client configuration factory providing ClientConfigurations tailored to this client */
protected static final ClientConfigurationFactory configFactory = new ClientConfigurationFactory();
private static final com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory = new com.amazonaws.protocol.json.SdkJsonProtocolFactory(
new JsonClientMetadata()
.withProtocolVersion("1.1")
.withSupportsCbor(false)
.withSupportsIon(false)
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("ResourceInUse").withModeledClass(
com.amazonaws.services.sagemaker.model.ResourceInUseException.class))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("ResourceNotFound").withModeledClass(
com.amazonaws.services.sagemaker.model.ResourceNotFoundException.class))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("ResourceLimitExceeded").withModeledClass(
com.amazonaws.services.sagemaker.model.ResourceLimitExceededException.class))
.withBaseServiceExceptionClass(com.amazonaws.services.sagemaker.model.AmazonSageMakerException.class));
public static AmazonSageMakerClientBuilder builder() {
return AmazonSageMakerClientBuilder.standard();
}
/**
* Constructs a new client to invoke service methods on SageMaker using the specified parameters.
*
* <p>
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param clientParams
* Object providing client parameters.
*/
AmazonSageMakerClient(AwsSyncClientParams clientParams) {
super(clientParams);
this.awsCredentialsProvider = clientParams.getCredentialsProvider();
init();
}
/**
* Constructs a new client to invoke service methods on SageMaker using the specified parameters.
*
* <p>
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param clientParams
* Object providing client parameters.
*/
AmazonSageMakerClient(AwsSyncClientParams clientParams, boolean endpointDiscoveryEnabled) {
super(clientParams);
this.awsCredentialsProvider = clientParams.getCredentialsProvider();
init();
}
private void init() {
setServiceNameIntern(DEFAULT_SIGNING_NAME);
setEndpointPrefix(ENDPOINT_PREFIX);
// calling this.setEndPoint(...) will also modify the signer accordingly
setEndpoint("sagemaker.us-east-1.amazonaws.com");
HandlerChainFactory chainFactory = new HandlerChainFactory();
requestHandler2s.addAll(chainFactory.newRequestHandlerChain("/com/amazonaws/services/sagemaker/request.handlers"));
requestHandler2s.addAll(chainFactory.newRequestHandler2Chain("/com/amazonaws/services/sagemaker/request.handler2s"));
requestHandler2s.addAll(chainFactory.getGlobalHandlers());
}
/**
* <p>
* Adds or overwrites one or more tags for the specified Amazon SageMaker resource. You can add tags to notebook
* instances, training jobs, models, endpoint configurations, and endpoints.
* </p>
* <p>
* Each tag consists of a key and an optional value. Tag keys must be unique per resource. For more information
* about tags, see <a
* href="http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what">Using
* Cost Allocation Tags</a> in the <i>AWS Billing and Cost Management User Guide</i>.
* </p>
*
* @param addTagsRequest
* @return Result of the AddTags operation returned by the service.
* @sample AmazonSageMaker.AddTags
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/AddTags" target="_top">AWS API
* Documentation</a>
*/
@Override
public AddTagsResult addTags(AddTagsRequest request) {
request = beforeClientExecution(request);
return executeAddTags(request);
}
@SdkInternalApi
final AddTagsResult executeAddTags(AddTagsRequest addTagsRequest) {
ExecutionContext executionContext = createExecutionContext(addTagsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<AddTagsRequest> request = null;
Response<AddTagsResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new AddTagsRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(addTagsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "AddTags");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<AddTagsResult>> responseHandler = protocolFactory.createResponseHandler(new JsonOperationMetadata()
.withPayloadJson(true).withHasStreamingSuccessResponse(false), new AddTagsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint
* to provision resources and deploy models. You create the endpoint configuration with the <a
* href="http://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpointConfig.html">CreateEndpointConfig</a> API.
* </p>
* <note>
* <p>
* Use this API only for hosting models using Amazon SageMaker hosting services.
* </p>
* </note>
* <p>
* The endpoint name must be unique within an AWS Region in your AWS account.
* </p>
* <p>
* When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute
* instances), and deploys the model(s) on them.
* </p>
* <p>
* When Amazon SageMaker receives the request, it sets the endpoint status to <code>Creating</code>. After it
* creates the endpoint, it sets the status to <code>InService</code>. Amazon SageMaker can then process incoming
* requests for inferences. To check the status of an endpoint, use the <a
* href="http://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeEndpoint.html">DescribeEndpoint</a> API.
* </p>
* <p>
* For an example, see <a href="http://docs.aws.amazon.com/sagemaker/latest/dg/ex1.html">Exercise 1: Using the
* K-Means Algorithm Provided by Amazon SageMaker</a>.
* </p>
* <p>
* If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses AWS
* Security Token Service to download model artifacts from the S3 path you provided. AWS STS is activated in your
* IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS
* for that region. For more information, see <a
* href="http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html">Activating and
* Deactivating AWS STS i an AWS Region</a> in the <i>AWS Identity and Access Management User Guide</i>.
* </p>
*
* @param createEndpointRequest
* @return Result of the CreateEndpoint operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an Amazon SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateEndpoint
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateEndpoint" target="_top">AWS API
* Documentation</a>
*/
@Override
public CreateEndpointResult createEndpoint(CreateEndpointRequest request) {
request = beforeClientExecution(request);
return executeCreateEndpoint(request);
}
@SdkInternalApi
final CreateEndpointResult executeCreateEndpoint(CreateEndpointRequest createEndpointRequest) {
ExecutionContext executionContext = createExecutionContext(createEndpointRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<CreateEndpointRequest> request = null;
Response<CreateEndpointResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateEndpointRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createEndpointRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateEndpoint");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<CreateEndpointResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateEndpointResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Creates an endpoint configuration that Amazon SageMaker hosting services uses to deploy models. In the
* configuration, you identify one or more models, created using the <code>CreateModel</code> API, to deploy and the
* resources that you want Amazon SageMaker to provision. Then you call the <a
* href="http://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpoint.html">CreateEndpoint</a> API.
* </p>
* <note>
* <p>
* Use this API only if you want to use Amazon SageMaker hosting services to deploy models into production.
* </p>
* </note>
* <p>
* In the request, you define one or more <code>ProductionVariant</code>s, each of which identifies a model. Each
* <code>ProductionVariant</code> parameter also describes the resources that you want Amazon SageMaker to
* provision. This includes the number and type of ML compute instances to deploy.
* </p>
* <p>
* If you are hosting multiple models, you also assign a <code>VariantWeight</code> to specify how much traffic you
* want to allocate to each model. For example, suppose that you want to host two models, A and B, and you assign
* traffic weight 2 for model A and 1 for model B. Amazon SageMaker distributes two-thirds of the traffic to Model
* A, and one-third to model B.
* </p>
*
* @param createEndpointConfigRequest
* @return Result of the CreateEndpointConfig operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an Amazon SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateEndpointConfig
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateEndpointConfig" target="_top">AWS
* API Documentation</a>
*/
@Override
public CreateEndpointConfigResult createEndpointConfig(CreateEndpointConfigRequest request) {
request = beforeClientExecution(request);
return executeCreateEndpointConfig(request);
}
@SdkInternalApi
final CreateEndpointConfigResult executeCreateEndpointConfig(CreateEndpointConfigRequest createEndpointConfigRequest) {
ExecutionContext executionContext = createExecutionContext(createEndpointConfigRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<CreateEndpointConfigRequest> request = null;
Response<CreateEndpointConfigResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateEndpointConfigRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createEndpointConfigRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateEndpointConfig");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<CreateEndpointConfigResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateEndpointConfigResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Starts a hyperparameter tuning job.
* </p>
*
* @param createHyperParameterTuningJobRequest
* @return Result of the CreateHyperParameterTuningJob operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceLimitExceededException
* You have exceeded an Amazon SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateHyperParameterTuningJob
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateHyperParameterTuningJob"
* target="_top">AWS API Documentation</a>
*/
@Override
public CreateHyperParameterTuningJobResult createHyperParameterTuningJob(CreateHyperParameterTuningJobRequest request) {
request = beforeClientExecution(request);
return executeCreateHyperParameterTuningJob(request);
}
@SdkInternalApi
final CreateHyperParameterTuningJobResult executeCreateHyperParameterTuningJob(CreateHyperParameterTuningJobRequest createHyperParameterTuningJobRequest) {
ExecutionContext executionContext = createExecutionContext(createHyperParameterTuningJobRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<CreateHyperParameterTuningJobRequest> request = null;
Response<CreateHyperParameterTuningJobResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateHyperParameterTuningJobRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createHyperParameterTuningJobRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateHyperParameterTuningJob");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<CreateHyperParameterTuningJobResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateHyperParameterTuningJobResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Creates a model in Amazon SageMaker. In the request, you name the model and describe a primary container. For the
* primary container, you specify the docker image containing inference code, artifacts (from prior training), and
* custom environment map that the inference code uses when you deploy the model for predictions.
* </p>
* <p>
* Use this API to create a model if you want to use Amazon SageMaker hosting services or run a batch transform job.
* </p>
* <p>
* To host your model, you create an endpoint configuration with the <code>CreateEndpointConfig</code> API, and then
* create an endpoint with the <code>CreateEndpoint</code> API. Amazon SageMaker then deploys all of the containers
* that you defined for the model in the hosting environment.
* </p>
* <p>
* To run a batch transform using your model, you start a job with the <code>CreateTransformJob</code> API. Amazon
* SageMaker uses your model and your dataset to get inferences which are then saved to a specified S3 location.
* </p>
* <p>
* In the <code>CreateModel</code> request, you must define a container with the <code>PrimaryContainer</code>
* parameter.
* </p>
* <p>
* In the request, you also provide an IAM role that Amazon SageMaker can assume to access model artifacts and
* docker image for deployment on ML compute hosting instances or for batch transform jobs. In addition, you also
* use the IAM role to manage permissions the inference code needs. For example, if the inference code access any
* other AWS resources, you grant necessary permissions via this role.
* </p>
*
* @param createModelRequest
* @return Result of the CreateModel operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an Amazon SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateModel
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateModel" target="_top">AWS API
* Documentation</a>
*/
@Override
public CreateModelResult createModel(CreateModelRequest request) {
request = beforeClientExecution(request);
return executeCreateModel(request);
}
@SdkInternalApi
final CreateModelResult executeCreateModel(CreateModelRequest createModelRequest) {
ExecutionContext executionContext = createExecutionContext(createModelRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<CreateModelRequest> request = null;
Response<CreateModelResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateModelRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createModelRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateModel");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<CreateModelResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateModelResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Creates an Amazon SageMaker notebook instance. A notebook instance is a machine learning (ML) compute instance
* running on a Jupyter notebook.
* </p>
* <p>
* In a <code>CreateNotebookInstance</code> request, specify the type of ML compute instance that you want to run.
* Amazon SageMaker launches the instance, installs common libraries that you can use to explore datasets for model
* training, and attaches an ML storage volume to the notebook instance.
* </p>
* <p>
* Amazon SageMaker also provides a set of example notebooks. Each notebook demonstrates how to use Amazon SageMaker
* with a specific algorithm or with a machine learning framework.
* </p>
* <p>
* After receiving the request, Amazon SageMaker does the following:
* </p>
* <ol>
* <li>
* <p>
* Creates a network interface in the Amazon SageMaker VPC.
* </p>
* </li>
* <li>
* <p>
* (Option) If you specified <code>SubnetId</code>, Amazon SageMaker creates a network interface in your own VPC,
* which is inferred from the subnet ID that you provide in the input. When creating this network interface, Amazon
* SageMaker attaches the security group that you specified in the request to the network interface that it creates
* in your VPC.
* </p>
* </li>
* <li>
* <p>
* Launches an EC2 instance of the type specified in the request in the Amazon SageMaker VPC. If you specified
* <code>SubnetId</code> of your VPC, Amazon SageMaker specifies both network interfaces when launching this
* instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security
* groups allow it.
* </p>
* </li>
* </ol>
* <p>
* After creating the notebook instance, Amazon SageMaker returns its Amazon Resource Name (ARN).
* </p>
* <p>
* After Amazon SageMaker creates the notebook instance, you can connect to the Jupyter server and work in Jupyter
* notebooks. For example, you can write code to explore a dataset that you can use for model training, train a
* model, host models by creating Amazon SageMaker endpoints, and validate hosted models.
* </p>
* <p>
* For more information, see <a href="http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works.html">How It
* Works</a>.
* </p>
*
* @param createNotebookInstanceRequest
* @return Result of the CreateNotebookInstance operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an Amazon SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateNotebookInstance
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateNotebookInstance"
* target="_top">AWS API Documentation</a>
*/
@Override
public CreateNotebookInstanceResult createNotebookInstance(CreateNotebookInstanceRequest request) {
request = beforeClientExecution(request);
return executeCreateNotebookInstance(request);
}
@SdkInternalApi
final CreateNotebookInstanceResult executeCreateNotebookInstance(CreateNotebookInstanceRequest createNotebookInstanceRequest) {
ExecutionContext executionContext = createExecutionContext(createNotebookInstanceRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<CreateNotebookInstanceRequest> request = null;
Response<CreateNotebookInstanceResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateNotebookInstanceRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createNotebookInstanceRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateNotebookInstance");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<CreateNotebookInstanceResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateNotebookInstanceResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Creates a lifecycle configuration that you can associate with a notebook instance. A <i>lifecycle
* configuration</i> is a collection of shell scripts that run when you create or start a notebook instance.
* </p>
* <p>
* Each lifecycle configuration script has a limit of 16384 characters.
* </p>
* <p>
* The value of the <code>$PATH</code> environment variable that is available to both scripts is
* <code>/sbin:bin:/usr/sbin:/usr/bin</code>.
* </p>
* <p>
* View CloudWatch Logs for notebook instance lifecycle configurations in log group
* <code>/aws/sagemaker/NotebookInstances</code> in log stream
* <code>[notebook-instance-name]/[LifecycleConfigHook]</code>.
* </p>
* <p>
* Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes,
* it fails and the notebook instance is not created or started.
* </p>
* <p>
* For information about notebook instance lifestyle configurations, see <a>notebook-lifecycle-config</a>.
* </p>
*
* @param createNotebookInstanceLifecycleConfigRequest
* @return Result of the CreateNotebookInstanceLifecycleConfig operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an Amazon SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateNotebookInstanceLifecycleConfig
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateNotebookInstanceLifecycleConfig"
* target="_top">AWS API Documentation</a>
*/
@Override
public CreateNotebookInstanceLifecycleConfigResult createNotebookInstanceLifecycleConfig(CreateNotebookInstanceLifecycleConfigRequest request) {
request = beforeClientExecution(request);
return executeCreateNotebookInstanceLifecycleConfig(request);
}
@SdkInternalApi
final CreateNotebookInstanceLifecycleConfigResult executeCreateNotebookInstanceLifecycleConfig(
CreateNotebookInstanceLifecycleConfigRequest createNotebookInstanceLifecycleConfigRequest) {
ExecutionContext executionContext = createExecutionContext(createNotebookInstanceLifecycleConfigRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<CreateNotebookInstanceLifecycleConfigRequest> request = null;
Response<CreateNotebookInstanceLifecycleConfigResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateNotebookInstanceLifecycleConfigRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createNotebookInstanceLifecycleConfigRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateNotebookInstanceLifecycleConfig");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<CreateNotebookInstanceLifecycleConfigResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateNotebookInstanceLifecycleConfigResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the Amazon SageMaker
* console, when you choose <code>Open</code> next to a notebook instance, Amazon SageMaker opens a new tab showing
* the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the
* page.
* </p>
* <p>
* You can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. To
* restrict access, attach an IAM policy that denies access to this API unless the call comes from an IP address in
* the specified list to every AWS Identity and Access Management user, group, or role used to access the notebook
* instance. Use the <code>NotIpAddress</code> condition operator and the <code>aws:SourceIP</code> condition
* context key to specify the list of IP addresses that you want to have access to the notebook instance. For more
* information, see <a>nbi-ip-filter</a>.
* </p>
*
* @param createPresignedNotebookInstanceUrlRequest
* @return Result of the CreatePresignedNotebookInstanceUrl operation returned by the service.
* @sample AmazonSageMaker.CreatePresignedNotebookInstanceUrl
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreatePresignedNotebookInstanceUrl"
* target="_top">AWS API Documentation</a>
*/
@Override
public CreatePresignedNotebookInstanceUrlResult createPresignedNotebookInstanceUrl(CreatePresignedNotebookInstanceUrlRequest request) {
request = beforeClientExecution(request);
return executeCreatePresignedNotebookInstanceUrl(request);
}
@SdkInternalApi
final CreatePresignedNotebookInstanceUrlResult executeCreatePresignedNotebookInstanceUrl(
CreatePresignedNotebookInstanceUrlRequest createPresignedNotebookInstanceUrlRequest) {
ExecutionContext executionContext = createExecutionContext(createPresignedNotebookInstanceUrlRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<CreatePresignedNotebookInstanceUrlRequest> request = null;
Response<CreatePresignedNotebookInstanceUrlResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreatePresignedNotebookInstanceUrlRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createPresignedNotebookInstanceUrlRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreatePresignedNotebookInstanceUrl");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<CreatePresignedNotebookInstanceUrlResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreatePresignedNotebookInstanceUrlResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Starts a model training job. After training completes, Amazon SageMaker saves the resulting model artifacts to an
* Amazon S3 location that you specify.
* </p>
* <p>
* If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model
* artifacts as part of the model. You can also use the artifacts in a deep learning service other than Amazon
* SageMaker, provided that you know how to use them for inferences.
* </p>
* <p>
* In the request body, you provide the following:
* </p>
* <ul>
* <li>
* <p>
* <code>AlgorithmSpecification</code> - Identifies the training algorithm to use.
* </p>
* </li>
* <li>
* <p>
* <code>HyperParameters</code> - Specify these algorithm-specific parameters to influence the quality of the final
* model. For a list of hyperparameters for each training algorithm provided by Amazon SageMaker, see <a
* href="http://docs.aws.amazon.com/sagemaker/latest/dg/algos.html">Algorithms</a>.
* </p>
* </li>
* <li>
* <p>
* <code>InputDataConfig</code> - Describes the training dataset and the Amazon S3 location where it is stored.
* </p>
* </li>
* <li>
* <p>
* <code>OutputDataConfig</code> - Identifies the Amazon S3 location where you want Amazon SageMaker to save the
* results of model training.
* </p>
* <p/></li>
* <li>
* <p>
* <code>ResourceConfig</code> - Identifies the resources, ML compute instances, and ML storage volumes to deploy
* for model training. In distributed training, you specify more than one instance.
* </p>
* </li>
* <li>
* <p>
* <code>RoleARN</code> - The Amazon Resource Number (ARN) that Amazon SageMaker assumes to perform tasks on your
* behalf during model training. You must grant this role the necessary permissions so that Amazon SageMaker can
* successfully complete model training.
* </p>
* </li>
* <li>
* <p>
* <code>StoppingCondition</code> - Sets a duration for training. Use this parameter to cap model training costs.
* </p>
* </li>
* </ul>
* <p>
* For more information about Amazon SageMaker, see <a
* href="http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works.html">How It Works</a>.
* </p>
*
* @param createTrainingJobRequest
* @return Result of the CreateTrainingJob operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceLimitExceededException
* You have exceeded an Amazon SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateTrainingJob
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateTrainingJob" target="_top">AWS
* API Documentation</a>
*/
@Override
public CreateTrainingJobResult createTrainingJob(CreateTrainingJobRequest request) {
request = beforeClientExecution(request);
return executeCreateTrainingJob(request);
}
@SdkInternalApi
final CreateTrainingJobResult executeCreateTrainingJob(CreateTrainingJobRequest createTrainingJobRequest) {
ExecutionContext executionContext = createExecutionContext(createTrainingJobRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<CreateTrainingJobRequest> request = null;
Response<CreateTrainingJobResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateTrainingJobRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createTrainingJobRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateTrainingJob");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<CreateTrainingJobResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateTrainingJobResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Starts a transform job. A transform job uses a trained model to get inferences on a dataset and saves these
* results to an Amazon S3 location that you specify.
* </p>
* <p>
* To perform batch transformations, you create a transform job and use the data that you have readily available.
* </p>
* <p>
* In the request body, you provide the following:
* </p>
* <ul>
* <li>
* <p>
* <code>TransformJobName</code> - Identifies the transform job. The name must be unique within an AWS Region in an
* AWS account.
* </p>
* </li>
* <li>
* <p>
* <code>ModelName</code> - Identifies the model to use. <code>ModelName</code> must be the name of an existing
* Amazon SageMaker model in the same AWS Region and AWS account. For information on creating a model, see
* <a>CreateModel</a>.
* </p>
* </li>
* <li>
* <p>
* <code>TransformInput</code> - Describes the dataset to be transformed and the Amazon S3 location where it is
* stored.
* </p>
* </li>
* <li>
* <p>
* <code>TransformOutput</code> - Identifies the Amazon S3 location where you want Amazon SageMaker to save the
* results from the transform job.
* </p>
* </li>
* <li>
* <p>
* <code>TransformResources</code> - Identifies the ML compute instances for the transform job.
* </p>
* </li>
* </ul>
* <p>
* For more information about how batch transformation works Amazon SageMaker, see <a
* href="http://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform.html">How It Works</a>.
* </p>
*
* @param createTransformJobRequest
* @return Result of the CreateTransformJob operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceLimitExceededException
* You have exceeded an Amazon SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateTransformJob
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateTransformJob" target="_top">AWS
* API Documentation</a>
*/
@Override
public CreateTransformJobResult createTransformJob(CreateTransformJobRequest request) {
request = beforeClientExecution(request);
return executeCreateTransformJob(request);
}
@SdkInternalApi
final CreateTransformJobResult executeCreateTransformJob(CreateTransformJobRequest createTransformJobRequest) {
ExecutionContext executionContext = createExecutionContext(createTransformJobRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<CreateTransformJobRequest> request = null;
Response<CreateTransformJobResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateTransformJobRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createTransformJobRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateTransformJob");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<CreateTransformJobResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateTransformJobResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Deletes an endpoint. Amazon SageMaker frees up all of the resources that were deployed when the endpoint was
* created.
* </p>
* <p>
* Amazon SageMaker retires any custom KMS key grants associated with the endpoint, meaning you don't need to use
* the <a href="http://docs.aws.amazon.com/kms/latest/APIReference/API_RevokeGrant.html">RevokeGrant</a> API call.
* </p>
*
* @param deleteEndpointRequest
* @return Result of the DeleteEndpoint operation returned by the service.
* @sample AmazonSageMaker.DeleteEndpoint
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteEndpoint" target="_top">AWS API
* Documentation</a>
*/
@Override
public DeleteEndpointResult deleteEndpoint(DeleteEndpointRequest request) {
request = beforeClientExecution(request);
return executeDeleteEndpoint(request);
}
@SdkInternalApi
final DeleteEndpointResult executeDeleteEndpoint(DeleteEndpointRequest deleteEndpointRequest) {
ExecutionContext executionContext = createExecutionContext(deleteEndpointRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<DeleteEndpointRequest> request = null;
Response<DeleteEndpointResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteEndpointRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteEndpointRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteEndpoint");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<DeleteEndpointResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteEndpointResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Deletes an endpoint configuration. The <code>DeleteEndpointConfig</code> API deletes only the specified
* configuration. It does not delete endpoints created using the configuration.
* </p>
*
* @param deleteEndpointConfigRequest
* @return Result of the DeleteEndpointConfig operation returned by the service.
* @sample AmazonSageMaker.DeleteEndpointConfig
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteEndpointConfig" target="_top">AWS
* API Documentation</a>
*/
@Override
public DeleteEndpointConfigResult deleteEndpointConfig(DeleteEndpointConfigRequest request) {
request = beforeClientExecution(request);
return executeDeleteEndpointConfig(request);
}
@SdkInternalApi
final DeleteEndpointConfigResult executeDeleteEndpointConfig(DeleteEndpointConfigRequest deleteEndpointConfigRequest) {
ExecutionContext executionContext = createExecutionContext(deleteEndpointConfigRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<DeleteEndpointConfigRequest> request = null;
Response<DeleteEndpointConfigResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteEndpointConfigRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteEndpointConfigRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteEndpointConfig");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<DeleteEndpointConfigResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteEndpointConfigResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Deletes a model. The <code>DeleteModel</code> API deletes only the model entry that was created in Amazon
* SageMaker when you called the <a
* href="http://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateModel.html">CreateModel</a> API. It does not
* delete model artifacts, inference code, or the IAM role that you specified when creating the model.
* </p>
*
* @param deleteModelRequest
* @return Result of the DeleteModel operation returned by the service.
* @sample AmazonSageMaker.DeleteModel
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteModel" target="_top">AWS API
* Documentation</a>
*/
@Override
public DeleteModelResult deleteModel(DeleteModelRequest request) {
request = beforeClientExecution(request);
return executeDeleteModel(request);
}
@SdkInternalApi
final DeleteModelResult executeDeleteModel(DeleteModelRequest deleteModelRequest) {
ExecutionContext executionContext = createExecutionContext(deleteModelRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<DeleteModelRequest> request = null;
Response<DeleteModelResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteModelRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteModelRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteModel");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<DeleteModelResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteModelResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Deletes an Amazon SageMaker notebook instance. Before you can delete a notebook instance, you must call the
* <code>StopNotebookInstance</code> API.
* </p>
* <important>
* <p>
* When you delete a notebook instance, you lose all of your data. Amazon SageMaker removes the ML compute instance,
* and deletes the ML storage volume and the network interface associated with the notebook instance.
* </p>
* </important>
*
* @param deleteNotebookInstanceRequest
* @return Result of the DeleteNotebookInstance operation returned by the service.
* @sample AmazonSageMaker.DeleteNotebookInstance
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteNotebookInstance"
* target="_top">AWS API Documentation</a>
*/
@Override
public DeleteNotebookInstanceResult deleteNotebookInstance(DeleteNotebookInstanceRequest request) {
request = beforeClientExecution(request);
return executeDeleteNotebookInstance(request);
}
@SdkInternalApi
final DeleteNotebookInstanceResult executeDeleteNotebookInstance(DeleteNotebookInstanceRequest deleteNotebookInstanceRequest) {
ExecutionContext executionContext = createExecutionContext(deleteNotebookInstanceRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<DeleteNotebookInstanceRequest> request = null;
Response<DeleteNotebookInstanceResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteNotebookInstanceRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteNotebookInstanceRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteNotebookInstance");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<DeleteNotebookInstanceResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new DeleteNotebookInstanceResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Deletes a notebook instance lifecycle configuration.
* </p>
*
* @param deleteNotebookInstanceLifecycleConfigRequest
* @return Result of the DeleteNotebookInstanceLifecycleConfig operation returned by the service.
* @sample AmazonSageMaker.DeleteNotebookInstanceLifecycleConfig
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteNotebookInstanceLifecycleConfig"
* target="_top">AWS API Documentation</a>
*/
@Override
public DeleteNotebookInstanceLifecycleConfigResult deleteNotebookInstanceLifecycleConfig(DeleteNotebookInstanceLifecycleConfigRequest request) {
request = beforeClientExecution(request);
return executeDeleteNotebookInstanceLifecycleConfig(request);
}
@SdkInternalApi
final DeleteNotebookInstanceLifecycleConfigResult executeDeleteNotebookInstanceLifecycleConfig(
DeleteNotebookInstanceLifecycleConfigRequest deleteNotebookInstanceLifecycleConfigRequest) {
ExecutionContext executionContext = createExecutionContext(deleteNotebookInstanceLifecycleConfigRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<DeleteNotebookInstanceLifecycleConfigRequest> request = null;
Response<DeleteNotebookInstanceLifecycleConfigResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteNotebookInstanceLifecycleConfigRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(deleteNotebookInstanceLifecycleConfigRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteNotebookInstanceLifecycleConfig");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<DeleteNotebookInstanceLifecycleConfigResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new DeleteNotebookInstanceLifecycleConfigResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Deletes the specified tags from an Amazon SageMaker resource.
* </p>
* <p>
* To list a resource's tags, use the <code>ListTags</code> API.
* </p>
*
* @param deleteTagsRequest
* @return Result of the DeleteTags operation returned by the service.
* @sample AmazonSageMaker.DeleteTags
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteTags" target="_top">AWS API
* Documentation</a>
*/
@Override
public DeleteTagsResult deleteTags(DeleteTagsRequest request) {
request = beforeClientExecution(request);
return executeDeleteTags(request);
}
@SdkInternalApi
final DeleteTagsResult executeDeleteTags(DeleteTagsRequest deleteTagsRequest) {
ExecutionContext executionContext = createExecutionContext(deleteTagsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<DeleteTagsRequest> request = null;
Response<DeleteTagsResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteTagsRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteTagsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteTags");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<DeleteTagsResult>> responseHandler = protocolFactory.createResponseHandler(new JsonOperationMetadata()
.withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteTagsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Returns the description of an endpoint.
* </p>
*
* @param describeEndpointRequest
* @return Result of the DescribeEndpoint operation returned by the service.
* @sample AmazonSageMaker.DescribeEndpoint
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeEndpoint" target="_top">AWS API
* Documentation</a>
*/
@Override
public DescribeEndpointResult describeEndpoint(DescribeEndpointRequest request) {
request = beforeClientExecution(request);
return executeDescribeEndpoint(request);
}
@SdkInternalApi
final DescribeEndpointResult executeDescribeEndpoint(DescribeEndpointRequest describeEndpointRequest) {
ExecutionContext executionContext = createExecutionContext(describeEndpointRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<DescribeEndpointRequest> request = null;
Response<DescribeEndpointResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeEndpointRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(describeEndpointRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeEndpoint");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<DescribeEndpointResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DescribeEndpointResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Returns the description of an endpoint configuration created using the <code>CreateEndpointConfig</code> API.
* </p>
*
* @param describeEndpointConfigRequest
* @return Result of the DescribeEndpointConfig operation returned by the service.
* @sample AmazonSageMaker.DescribeEndpointConfig
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeEndpointConfig"
* target="_top">AWS API Documentation</a>
*/
@Override
public DescribeEndpointConfigResult describeEndpointConfig(DescribeEndpointConfigRequest request) {
request = beforeClientExecution(request);
return executeDescribeEndpointConfig(request);
}
@SdkInternalApi
final DescribeEndpointConfigResult executeDescribeEndpointConfig(DescribeEndpointConfigRequest describeEndpointConfigRequest) {
ExecutionContext executionContext = createExecutionContext(describeEndpointConfigRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<DescribeEndpointConfigRequest> request = null;
Response<DescribeEndpointConfigResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeEndpointConfigRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(describeEndpointConfigRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeEndpointConfig");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<DescribeEndpointConfigResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new DescribeEndpointConfigResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Gets a description of a hyperparameter tuning job.
* </p>
*
* @param describeHyperParameterTuningJobRequest
* @return Result of the DescribeHyperParameterTuningJob operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.DescribeHyperParameterTuningJob
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeHyperParameterTuningJob"
* target="_top">AWS API Documentation</a>
*/
@Override
public DescribeHyperParameterTuningJobResult describeHyperParameterTuningJob(DescribeHyperParameterTuningJobRequest request) {
request = beforeClientExecution(request);
return executeDescribeHyperParameterTuningJob(request);
}
@SdkInternalApi
final DescribeHyperParameterTuningJobResult executeDescribeHyperParameterTuningJob(
DescribeHyperParameterTuningJobRequest describeHyperParameterTuningJobRequest) {
ExecutionContext executionContext = createExecutionContext(describeHyperParameterTuningJobRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<DescribeHyperParameterTuningJobRequest> request = null;
Response<DescribeHyperParameterTuningJobResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeHyperParameterTuningJobRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(describeHyperParameterTuningJobRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeHyperParameterTuningJob");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<DescribeHyperParameterTuningJobResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new DescribeHyperParameterTuningJobResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Describes a model that you created using the <code>CreateModel</code> API.
* </p>
*
* @param describeModelRequest
* @return Result of the DescribeModel operation returned by the service.
* @sample AmazonSageMaker.DescribeModel
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeModel" target="_top">AWS API
* Documentation</a>
*/
@Override
public DescribeModelResult describeModel(DescribeModelRequest request) {
request = beforeClientExecution(request);
return executeDescribeModel(request);
}
@SdkInternalApi
final DescribeModelResult executeDescribeModel(DescribeModelRequest describeModelRequest) {
ExecutionContext executionContext = createExecutionContext(describeModelRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<DescribeModelRequest> request = null;
Response<DescribeModelResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeModelRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(describeModelRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeModel");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<DescribeModelResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DescribeModelResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Returns information about a notebook instance.
* </p>
*
* @param describeNotebookInstanceRequest
* @return Result of the DescribeNotebookInstance operation returned by the service.
* @sample AmazonSageMaker.DescribeNotebookInstance
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeNotebookInstance"
* target="_top">AWS API Documentation</a>
*/
@Override
public DescribeNotebookInstanceResult describeNotebookInstance(DescribeNotebookInstanceRequest request) {
request = beforeClientExecution(request);
return executeDescribeNotebookInstance(request);
}
@SdkInternalApi
final DescribeNotebookInstanceResult executeDescribeNotebookInstance(DescribeNotebookInstanceRequest describeNotebookInstanceRequest) {
ExecutionContext executionContext = createExecutionContext(describeNotebookInstanceRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<DescribeNotebookInstanceRequest> request = null;
Response<DescribeNotebookInstanceResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeNotebookInstanceRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(describeNotebookInstanceRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeNotebookInstance");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<DescribeNotebookInstanceResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new DescribeNotebookInstanceResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Returns a description of a notebook instance lifecycle configuration.
* </p>
* <p>
* For information about notebook instance lifestyle configurations, see <a>notebook-lifecycle-config</a>.
* </p>
*
* @param describeNotebookInstanceLifecycleConfigRequest
* @return Result of the DescribeNotebookInstanceLifecycleConfig operation returned by the service.
* @sample AmazonSageMaker.DescribeNotebookInstanceLifecycleConfig
* @see <a
* href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeNotebookInstanceLifecycleConfig"
* target="_top">AWS API Documentation</a>
*/
@Override
public DescribeNotebookInstanceLifecycleConfigResult describeNotebookInstanceLifecycleConfig(DescribeNotebookInstanceLifecycleConfigRequest request) {
request = beforeClientExecution(request);
return executeDescribeNotebookInstanceLifecycleConfig(request);
}
@SdkInternalApi
final DescribeNotebookInstanceLifecycleConfigResult executeDescribeNotebookInstanceLifecycleConfig(
DescribeNotebookInstanceLifecycleConfigRequest describeNotebookInstanceLifecycleConfigRequest) {
ExecutionContext executionContext = createExecutionContext(describeNotebookInstanceLifecycleConfigRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<DescribeNotebookInstanceLifecycleConfigRequest> request = null;
Response<DescribeNotebookInstanceLifecycleConfigResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeNotebookInstanceLifecycleConfigRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(describeNotebookInstanceLifecycleConfigRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeNotebookInstanceLifecycleConfig");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<DescribeNotebookInstanceLifecycleConfigResult>> responseHandler = protocolFactory
.createResponseHandler(new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new DescribeNotebookInstanceLifecycleConfigResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Returns information about a training job.
* </p>
*
* @param describeTrainingJobRequest
* @return Result of the DescribeTrainingJob operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.DescribeTrainingJob
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeTrainingJob" target="_top">AWS
* API Documentation</a>
*/
@Override
public DescribeTrainingJobResult describeTrainingJob(DescribeTrainingJobRequest request) {
request = beforeClientExecution(request);
return executeDescribeTrainingJob(request);
}
@SdkInternalApi
final DescribeTrainingJobResult executeDescribeTrainingJob(DescribeTrainingJobRequest describeTrainingJobRequest) {
ExecutionContext executionContext = createExecutionContext(describeTrainingJobRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<DescribeTrainingJobRequest> request = null;
Response<DescribeTrainingJobResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeTrainingJobRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(describeTrainingJobRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeTrainingJob");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<DescribeTrainingJobResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DescribeTrainingJobResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Returns information about a transform job.
* </p>
*
* @param describeTransformJobRequest
* @return Result of the DescribeTransformJob operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.DescribeTransformJob
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeTransformJob" target="_top">AWS
* API Documentation</a>
*/
@Override
public DescribeTransformJobResult describeTransformJob(DescribeTransformJobRequest request) {
request = beforeClientExecution(request);
return executeDescribeTransformJob(request);
}
@SdkInternalApi
final DescribeTransformJobResult executeDescribeTransformJob(DescribeTransformJobRequest describeTransformJobRequest) {
ExecutionContext executionContext = createExecutionContext(describeTransformJobRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<DescribeTransformJobRequest> request = null;
Response<DescribeTransformJobResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DescribeTransformJobRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(describeTransformJobRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DescribeTransformJob");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<DescribeTransformJobResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DescribeTransformJobResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Lists endpoint configurations.
* </p>
*
* @param listEndpointConfigsRequest
* @return Result of the ListEndpointConfigs operation returned by the service.
* @sample AmazonSageMaker.ListEndpointConfigs
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListEndpointConfigs" target="_top">AWS
* API Documentation</a>
*/
@Override
public ListEndpointConfigsResult listEndpointConfigs(ListEndpointConfigsRequest request) {
request = beforeClientExecution(request);
return executeListEndpointConfigs(request);
}
@SdkInternalApi
final ListEndpointConfigsResult executeListEndpointConfigs(ListEndpointConfigsRequest listEndpointConfigsRequest) {
ExecutionContext executionContext = createExecutionContext(listEndpointConfigsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<ListEndpointConfigsRequest> request = null;
Response<ListEndpointConfigsResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ListEndpointConfigsRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(listEndpointConfigsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ListEndpointConfigs");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<ListEndpointConfigsResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new ListEndpointConfigsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Lists endpoints.
* </p>
*
* @param listEndpointsRequest
* @return Result of the ListEndpoints operation returned by the service.
* @sample AmazonSageMaker.ListEndpoints
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListEndpoints" target="_top">AWS API
* Documentation</a>
*/
@Override
public ListEndpointsResult listEndpoints(ListEndpointsRequest request) {
request = beforeClientExecution(request);
return executeListEndpoints(request);
}
@SdkInternalApi
final ListEndpointsResult executeListEndpoints(ListEndpointsRequest listEndpointsRequest) {
ExecutionContext executionContext = createExecutionContext(listEndpointsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<ListEndpointsRequest> request = null;
Response<ListEndpointsResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ListEndpointsRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(listEndpointsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ListEndpoints");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<ListEndpointsResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new ListEndpointsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Gets a list of <a>HyperParameterTuningJobSummary</a> objects that describe the hyperparameter tuning jobs
* launched in your account.
* </p>
*
* @param listHyperParameterTuningJobsRequest
* @return Result of the ListHyperParameterTuningJobs operation returned by the service.
* @sample AmazonSageMaker.ListHyperParameterTuningJobs
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListHyperParameterTuningJobs"
* target="_top">AWS API Documentation</a>
*/
@Override
public ListHyperParameterTuningJobsResult listHyperParameterTuningJobs(ListHyperParameterTuningJobsRequest request) {
request = beforeClientExecution(request);
return executeListHyperParameterTuningJobs(request);
}
@SdkInternalApi
final ListHyperParameterTuningJobsResult executeListHyperParameterTuningJobs(ListHyperParameterTuningJobsRequest listHyperParameterTuningJobsRequest) {
ExecutionContext executionContext = createExecutionContext(listHyperParameterTuningJobsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<ListHyperParameterTuningJobsRequest> request = null;
Response<ListHyperParameterTuningJobsResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ListHyperParameterTuningJobsRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(listHyperParameterTuningJobsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ListHyperParameterTuningJobs");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<ListHyperParameterTuningJobsResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new ListHyperParameterTuningJobsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Lists models created with the <a
* href="http://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateModel.html">CreateModel</a> API.
* </p>
*
* @param listModelsRequest
* @return Result of the ListModels operation returned by the service.
* @sample AmazonSageMaker.ListModels
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListModels" target="_top">AWS API
* Documentation</a>
*/
@Override
public ListModelsResult listModels(ListModelsRequest request) {
request = beforeClientExecution(request);
return executeListModels(request);
}
@SdkInternalApi
final ListModelsResult executeListModels(ListModelsRequest listModelsRequest) {
ExecutionContext executionContext = createExecutionContext(listModelsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<ListModelsRequest> request = null;
Response<ListModelsResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ListModelsRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(listModelsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ListModels");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<ListModelsResult>> responseHandler = protocolFactory.createResponseHandler(new JsonOperationMetadata()
.withPayloadJson(true).withHasStreamingSuccessResponse(false), new ListModelsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Lists notebook instance lifestyle configurations created with the <a>CreateNotebookInstanceLifecycleConfig</a>
* API.
* </p>
*
* @param listNotebookInstanceLifecycleConfigsRequest
* @return Result of the ListNotebookInstanceLifecycleConfigs operation returned by the service.
* @sample AmazonSageMaker.ListNotebookInstanceLifecycleConfigs
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListNotebookInstanceLifecycleConfigs"
* target="_top">AWS API Documentation</a>
*/
@Override
public ListNotebookInstanceLifecycleConfigsResult listNotebookInstanceLifecycleConfigs(ListNotebookInstanceLifecycleConfigsRequest request) {
request = beforeClientExecution(request);
return executeListNotebookInstanceLifecycleConfigs(request);
}
@SdkInternalApi
final ListNotebookInstanceLifecycleConfigsResult executeListNotebookInstanceLifecycleConfigs(
ListNotebookInstanceLifecycleConfigsRequest listNotebookInstanceLifecycleConfigsRequest) {
ExecutionContext executionContext = createExecutionContext(listNotebookInstanceLifecycleConfigsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<ListNotebookInstanceLifecycleConfigsRequest> request = null;
Response<ListNotebookInstanceLifecycleConfigsResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ListNotebookInstanceLifecycleConfigsRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(listNotebookInstanceLifecycleConfigsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ListNotebookInstanceLifecycleConfigs");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<ListNotebookInstanceLifecycleConfigsResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new ListNotebookInstanceLifecycleConfigsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Returns a list of the Amazon SageMaker notebook instances in the requester's account in an AWS Region.
* </p>
*
* @param listNotebookInstancesRequest
* @return Result of the ListNotebookInstances operation returned by the service.
* @sample AmazonSageMaker.ListNotebookInstances
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListNotebookInstances"
* target="_top">AWS API Documentation</a>
*/
@Override
public ListNotebookInstancesResult listNotebookInstances(ListNotebookInstancesRequest request) {
request = beforeClientExecution(request);
return executeListNotebookInstances(request);
}
@SdkInternalApi
final ListNotebookInstancesResult executeListNotebookInstances(ListNotebookInstancesRequest listNotebookInstancesRequest) {
ExecutionContext executionContext = createExecutionContext(listNotebookInstancesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<ListNotebookInstancesRequest> request = null;
Response<ListNotebookInstancesResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ListNotebookInstancesRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(listNotebookInstancesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ListNotebookInstances");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<ListNotebookInstancesResult>> responseHandler = protocolFactory
.createResponseHandler(new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new ListNotebookInstancesResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Returns the tags for the specified Amazon SageMaker resource.
* </p>
*
* @param listTagsRequest
* @return Result of the ListTags operation returned by the service.
* @sample AmazonSageMaker.ListTags
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListTags" target="_top">AWS API
* Documentation</a>
*/
@Override
public ListTagsResult listTags(ListTagsRequest request) {
request = beforeClientExecution(request);
return executeListTags(request);
}
@SdkInternalApi
final ListTagsResult executeListTags(ListTagsRequest listTagsRequest) {
ExecutionContext executionContext = createExecutionContext(listTagsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<ListTagsRequest> request = null;
Response<ListTagsResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ListTagsRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(listTagsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ListTags");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<ListTagsResult>> responseHandler = protocolFactory.createResponseHandler(new JsonOperationMetadata()
.withPayloadJson(true).withHasStreamingSuccessResponse(false), new ListTagsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Lists training jobs.
* </p>
*
* @param listTrainingJobsRequest
* @return Result of the ListTrainingJobs operation returned by the service.
* @sample AmazonSageMaker.ListTrainingJobs
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListTrainingJobs" target="_top">AWS API
* Documentation</a>
*/
@Override
public ListTrainingJobsResult listTrainingJobs(ListTrainingJobsRequest request) {
request = beforeClientExecution(request);
return executeListTrainingJobs(request);
}
@SdkInternalApi
final ListTrainingJobsResult executeListTrainingJobs(ListTrainingJobsRequest listTrainingJobsRequest) {
ExecutionContext executionContext = createExecutionContext(listTrainingJobsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<ListTrainingJobsRequest> request = null;
Response<ListTrainingJobsResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ListTrainingJobsRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(listTrainingJobsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ListTrainingJobs");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<ListTrainingJobsResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new ListTrainingJobsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Gets a list of <a>TrainingJobSummary</a> objects that describe the training jobs that a hyperparameter tuning job
* launched.
* </p>
*
* @param listTrainingJobsForHyperParameterTuningJobRequest
* @return Result of the ListTrainingJobsForHyperParameterTuningJob operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.ListTrainingJobsForHyperParameterTuningJob
* @see <a
* href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListTrainingJobsForHyperParameterTuningJob"
* target="_top">AWS API Documentation</a>
*/
@Override
public ListTrainingJobsForHyperParameterTuningJobResult listTrainingJobsForHyperParameterTuningJob(ListTrainingJobsForHyperParameterTuningJobRequest request) {
request = beforeClientExecution(request);
return executeListTrainingJobsForHyperParameterTuningJob(request);
}
@SdkInternalApi
final ListTrainingJobsForHyperParameterTuningJobResult executeListTrainingJobsForHyperParameterTuningJob(
ListTrainingJobsForHyperParameterTuningJobRequest listTrainingJobsForHyperParameterTuningJobRequest) {
ExecutionContext executionContext = createExecutionContext(listTrainingJobsForHyperParameterTuningJobRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<ListTrainingJobsForHyperParameterTuningJobRequest> request = null;
Response<ListTrainingJobsForHyperParameterTuningJobResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ListTrainingJobsForHyperParameterTuningJobRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(listTrainingJobsForHyperParameterTuningJobRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ListTrainingJobsForHyperParameterTuningJob");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<ListTrainingJobsForHyperParameterTuningJobResult>> responseHandler = protocolFactory
.createResponseHandler(new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new ListTrainingJobsForHyperParameterTuningJobResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Lists transform jobs.
* </p>
*
* @param listTransformJobsRequest
* @return Result of the ListTransformJobs operation returned by the service.
* @sample AmazonSageMaker.ListTransformJobs
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListTransformJobs" target="_top">AWS
* API Documentation</a>
*/
@Override
public ListTransformJobsResult listTransformJobs(ListTransformJobsRequest request) {
request = beforeClientExecution(request);
return executeListTransformJobs(request);
}
@SdkInternalApi
final ListTransformJobsResult executeListTransformJobs(ListTransformJobsRequest listTransformJobsRequest) {
ExecutionContext executionContext = createExecutionContext(listTransformJobsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<ListTransformJobsRequest> request = null;
Response<ListTransformJobsResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new ListTransformJobsRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(listTransformJobsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "ListTransformJobs");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<ListTransformJobsResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new ListTransformJobsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Launches an ML compute instance with the latest version of the libraries and attaches your ML storage volume.
* After configuring the notebook instance, Amazon SageMaker sets the notebook instance status to
* <code>InService</code>. A notebook instance's status must be <code>InService</code> before you can connect to
* your Jupyter notebook.
* </p>
*
* @param startNotebookInstanceRequest
* @return Result of the StartNotebookInstance operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an Amazon SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.StartNotebookInstance
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StartNotebookInstance"
* target="_top">AWS API Documentation</a>
*/
@Override
public StartNotebookInstanceResult startNotebookInstance(StartNotebookInstanceRequest request) {
request = beforeClientExecution(request);
return executeStartNotebookInstance(request);
}
@SdkInternalApi
final StartNotebookInstanceResult executeStartNotebookInstance(StartNotebookInstanceRequest startNotebookInstanceRequest) {
ExecutionContext executionContext = createExecutionContext(startNotebookInstanceRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<StartNotebookInstanceRequest> request = null;
Response<StartNotebookInstanceResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new StartNotebookInstanceRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(startNotebookInstanceRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "StartNotebookInstance");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<StartNotebookInstanceResult>> responseHandler = protocolFactory
.createResponseHandler(new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new StartNotebookInstanceResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Stops a running hyperparameter tuning job and all running training jobs that the tuning job launched.
* </p>
* <p>
* All model artifacts output from the training jobs are stored in Amazon Simple Storage Service (Amazon S3). All
* data that the training jobs write to Amazon CloudWatch Logs are still available in CloudWatch. After the tuning
* job moves to the <code>Stopped</code> state, it releases all reserved resources for the tuning job.
* </p>
*
* @param stopHyperParameterTuningJobRequest
* @return Result of the StopHyperParameterTuningJob operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.StopHyperParameterTuningJob
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopHyperParameterTuningJob"
* target="_top">AWS API Documentation</a>
*/
@Override
public StopHyperParameterTuningJobResult stopHyperParameterTuningJob(StopHyperParameterTuningJobRequest request) {
request = beforeClientExecution(request);
return executeStopHyperParameterTuningJob(request);
}
@SdkInternalApi
final StopHyperParameterTuningJobResult executeStopHyperParameterTuningJob(StopHyperParameterTuningJobRequest stopHyperParameterTuningJobRequest) {
ExecutionContext executionContext = createExecutionContext(stopHyperParameterTuningJobRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<StopHyperParameterTuningJobRequest> request = null;
Response<StopHyperParameterTuningJobResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new StopHyperParameterTuningJobRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(stopHyperParameterTuningJobRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "StopHyperParameterTuningJob");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<StopHyperParameterTuningJobResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new StopHyperParameterTuningJobResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Terminates the ML compute instance. Before terminating the instance, Amazon SageMaker disconnects the ML storage
* volume from it. Amazon SageMaker preserves the ML storage volume.
* </p>
* <p>
* To access data on the ML storage volume for a notebook instance that has been terminated, call the
* <code>StartNotebookInstance</code> API. <code>StartNotebookInstance</code> launches another ML compute instance,
* configures it, and attaches the preserved ML storage volume so you can continue your work.
* </p>
*
* @param stopNotebookInstanceRequest
* @return Result of the StopNotebookInstance operation returned by the service.
* @sample AmazonSageMaker.StopNotebookInstance
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopNotebookInstance" target="_top">AWS
* API Documentation</a>
*/
@Override
public StopNotebookInstanceResult stopNotebookInstance(StopNotebookInstanceRequest request) {
request = beforeClientExecution(request);
return executeStopNotebookInstance(request);
}
@SdkInternalApi
final StopNotebookInstanceResult executeStopNotebookInstance(StopNotebookInstanceRequest stopNotebookInstanceRequest) {
ExecutionContext executionContext = createExecutionContext(stopNotebookInstanceRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<StopNotebookInstanceRequest> request = null;
Response<StopNotebookInstanceResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new StopNotebookInstanceRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(stopNotebookInstanceRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "StopNotebookInstance");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<StopNotebookInstanceResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new StopNotebookInstanceResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Stops a training job. To stop a job, Amazon SageMaker sends the algorithm the <code>SIGTERM</code> signal, which
* delays job termination for 120 seconds. Algorithms might use this 120-second window to save the model artifacts,
* so the results of the training is not lost.
* </p>
* <p>
* Training algorithms provided by Amazon SageMaker save the intermediate results of a model training job. This
* intermediate data is a valid model artifact. You can use the model artifacts that are saved when Amazon SageMaker
* stops a training job to create a model.
* </p>
* <p>
* When it receives a <code>StopTrainingJob</code> request, Amazon SageMaker changes the status of the job to
* <code>Stopping</code>. After Amazon SageMaker stops the job, it sets the status to <code>Stopped</code>.
* </p>
*
* @param stopTrainingJobRequest
* @return Result of the StopTrainingJob operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.StopTrainingJob
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopTrainingJob" target="_top">AWS API
* Documentation</a>
*/
@Override
public StopTrainingJobResult stopTrainingJob(StopTrainingJobRequest request) {
request = beforeClientExecution(request);
return executeStopTrainingJob(request);
}
@SdkInternalApi
final StopTrainingJobResult executeStopTrainingJob(StopTrainingJobRequest stopTrainingJobRequest) {
ExecutionContext executionContext = createExecutionContext(stopTrainingJobRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<StopTrainingJobRequest> request = null;
Response<StopTrainingJobResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new StopTrainingJobRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(stopTrainingJobRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "StopTrainingJob");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<StopTrainingJobResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new StopTrainingJobResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Stops a transform job.
* </p>
* <p>
* When Amazon SageMaker receives a <code>StopTransformJob</code> request, the status of the job changes to
* <code>Stopping</code>. After Amazon SageMaker stops the job, the status is set to <code>Stopped</code>. When you
* stop a transform job before it is completed, Amazon SageMaker doesn't store the job's output in Amazon S3.
* </p>
*
* @param stopTransformJobRequest
* @return Result of the StopTransformJob operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.StopTransformJob
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopTransformJob" target="_top">AWS API
* Documentation</a>
*/
@Override
public StopTransformJobResult stopTransformJob(StopTransformJobRequest request) {
request = beforeClientExecution(request);
return executeStopTransformJob(request);
}
@SdkInternalApi
final StopTransformJobResult executeStopTransformJob(StopTransformJobRequest stopTransformJobRequest) {
ExecutionContext executionContext = createExecutionContext(stopTransformJobRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<StopTransformJobRequest> request = null;
Response<StopTransformJobResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new StopTransformJobRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(stopTransformJobRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "StopTransformJob");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<StopTransformJobResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new StopTransformJobResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Deploys the new <code>EndpointConfig</code> specified in the request, switches to using newly created endpoint,
* and then deletes resources provisioned for the endpoint using the previous <code>EndpointConfig</code> (there is
* no availability loss).
* </p>
* <p>
* When Amazon SageMaker receives the request, it sets the endpoint status to <code>Updating</code>. After updating
* the endpoint, it sets the status to <code>InService</code>. To check the status of an endpoint, use the <a
* href="http://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeEndpoint.html">DescribeEndpoint</a> API.
* </p>
* <note>
* <p>
* You cannot update an endpoint with the current <code>EndpointConfig</code>. To update an endpoint, you must
* create a new <code>EndpointConfig</code>.
* </p>
* </note>
*
* @param updateEndpointRequest
* @return Result of the UpdateEndpoint operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an Amazon SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.UpdateEndpoint
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateEndpoint" target="_top">AWS API
* Documentation</a>
*/
@Override
public UpdateEndpointResult updateEndpoint(UpdateEndpointRequest request) {
request = beforeClientExecution(request);
return executeUpdateEndpoint(request);
}
@SdkInternalApi
final UpdateEndpointResult executeUpdateEndpoint(UpdateEndpointRequest updateEndpointRequest) {
ExecutionContext executionContext = createExecutionContext(updateEndpointRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<UpdateEndpointRequest> request = null;
Response<UpdateEndpointResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new UpdateEndpointRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(updateEndpointRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "UpdateEndpoint");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<UpdateEndpointResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new UpdateEndpointResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Updates variant weight of one or more variants associated with an existing endpoint, or capacity of one variant
* associated with an existing endpoint. When it receives the request, Amazon SageMaker sets the endpoint status to
* <code>Updating</code>. After updating the endpoint, it sets the status to <code>InService</code>. To check the
* status of an endpoint, use the <a
* href="http://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeEndpoint.html">DescribeEndpoint</a> API.
* </p>
*
* @param updateEndpointWeightsAndCapacitiesRequest
* @return Result of the UpdateEndpointWeightsAndCapacities operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an Amazon SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.UpdateEndpointWeightsAndCapacities
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateEndpointWeightsAndCapacities"
* target="_top">AWS API Documentation</a>
*/
@Override
public UpdateEndpointWeightsAndCapacitiesResult updateEndpointWeightsAndCapacities(UpdateEndpointWeightsAndCapacitiesRequest request) {
request = beforeClientExecution(request);
return executeUpdateEndpointWeightsAndCapacities(request);
}
@SdkInternalApi
final UpdateEndpointWeightsAndCapacitiesResult executeUpdateEndpointWeightsAndCapacities(
UpdateEndpointWeightsAndCapacitiesRequest updateEndpointWeightsAndCapacitiesRequest) {
ExecutionContext executionContext = createExecutionContext(updateEndpointWeightsAndCapacitiesRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<UpdateEndpointWeightsAndCapacitiesRequest> request = null;
Response<UpdateEndpointWeightsAndCapacitiesResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new UpdateEndpointWeightsAndCapacitiesRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(updateEndpointWeightsAndCapacitiesRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "UpdateEndpointWeightsAndCapacities");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<UpdateEndpointWeightsAndCapacitiesResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new UpdateEndpointWeightsAndCapacitiesResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Updates a notebook instance. NotebookInstance updates include upgrading or downgrading the ML compute instance
* used for your notebook instance to accommodate changes in your workload requirements. You can also update the VPC
* security groups.
* </p>
*
* @param updateNotebookInstanceRequest
* @return Result of the UpdateNotebookInstance operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an Amazon SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.UpdateNotebookInstance
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateNotebookInstance"
* target="_top">AWS API Documentation</a>
*/
@Override
public UpdateNotebookInstanceResult updateNotebookInstance(UpdateNotebookInstanceRequest request) {
request = beforeClientExecution(request);
return executeUpdateNotebookInstance(request);
}
@SdkInternalApi
final UpdateNotebookInstanceResult executeUpdateNotebookInstance(UpdateNotebookInstanceRequest updateNotebookInstanceRequest) {
ExecutionContext executionContext = createExecutionContext(updateNotebookInstanceRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<UpdateNotebookInstanceRequest> request = null;
Response<UpdateNotebookInstanceResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new UpdateNotebookInstanceRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(updateNotebookInstanceRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "UpdateNotebookInstance");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<UpdateNotebookInstanceResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new UpdateNotebookInstanceResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* <p>
* Updates a notebook instance lifecycle configuration created with the <a>CreateNotebookInstanceLifecycleConfig</a>
* API.
* </p>
*
* @param updateNotebookInstanceLifecycleConfigRequest
* @return Result of the UpdateNotebookInstanceLifecycleConfig operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an Amazon SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.UpdateNotebookInstanceLifecycleConfig
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateNotebookInstanceLifecycleConfig"
* target="_top">AWS API Documentation</a>
*/
@Override
public UpdateNotebookInstanceLifecycleConfigResult updateNotebookInstanceLifecycleConfig(UpdateNotebookInstanceLifecycleConfigRequest request) {
request = beforeClientExecution(request);
return executeUpdateNotebookInstanceLifecycleConfig(request);
}
@SdkInternalApi
final UpdateNotebookInstanceLifecycleConfigResult executeUpdateNotebookInstanceLifecycleConfig(
UpdateNotebookInstanceLifecycleConfigRequest updateNotebookInstanceLifecycleConfigRequest) {
ExecutionContext executionContext = createExecutionContext(updateNotebookInstanceLifecycleConfigRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request<UpdateNotebookInstanceLifecycleConfigRequest> request = null;
Response<UpdateNotebookInstanceLifecycleConfigResult> response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new UpdateNotebookInstanceLifecycleConfigRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(updateNotebookInstanceLifecycleConfigRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "UpdateNotebookInstanceLifecycleConfig");
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
URI cachedEndpoint = null;
HttpResponseHandler<AmazonWebServiceResponse<UpdateNotebookInstanceLifecycleConfigResult>> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new UpdateNotebookInstanceLifecycleConfigResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
* Returns additional metadata for a previously executed successful, request, typically used for debugging issues
* where a service isn't acting as expected. This data isn't considered part of the result data returned by an
* operation, so it's available through this separate, diagnostic interface.
* <p>
* Response metadata is only cached for a limited period of time, so if you need to access this extra diagnostic
* information for an executed request, you should use this method to retrieve it as soon as possible after
* executing the request.
*
* @param request
* The originally executed request
*
* @return The response metadata for the specified request, or null if none is available.
*/
public ResponseMetadata getCachedResponseMetadata(AmazonWebServiceRequest request) {
return client.getResponseMetadataForRequest(request);
}
/**
* Normal invoke with authentication. Credentials are required and may be overriden at the request level.
**/
private <X, Y extends AmazonWebServiceRequest> Response<X> invoke(Request<Y> request, HttpResponseHandler<AmazonWebServiceResponse<X>> responseHandler,
ExecutionContext executionContext) {
return invoke(request, responseHandler, executionContext, null);
}
/**
* Normal invoke with authentication. Credentials are required and may be overriden at the request level.
**/
private <X, Y extends AmazonWebServiceRequest> Response<X> invoke(Request<Y> request, HttpResponseHandler<AmazonWebServiceResponse<X>> responseHandler,
ExecutionContext executionContext, URI cachedEndpoint) {
executionContext.setCredentialsProvider(CredentialUtils.getCredentialsProvider(request.getOriginalRequest(), awsCredentialsProvider));
return doInvoke(request, responseHandler, executionContext, cachedEndpoint);
}
/**
* Invoke with no authentication. Credentials are not required and any credentials set on the client or request will
* be ignored for this operation.
**/
private <X, Y extends AmazonWebServiceRequest> Response<X> anonymousInvoke(Request<Y> request,
HttpResponseHandler<AmazonWebServiceResponse<X>> responseHandler, ExecutionContext executionContext) {
return doInvoke(request, responseHandler, executionContext, null);
}
/**
* Invoke the request using the http client. Assumes credentials (or lack thereof) have been configured in the
* ExecutionContext beforehand.
**/
private <X, Y extends AmazonWebServiceRequest> Response<X> doInvoke(Request<Y> request, HttpResponseHandler<AmazonWebServiceResponse<X>> responseHandler,
ExecutionContext executionContext, URI discoveredEndpoint) {
if (discoveredEndpoint != null) {
request.setEndpoint(discoveredEndpoint);
request.getOriginalRequest().getRequestClientOptions().appendUserAgent("endpoint-discovery");
} else {
request.setEndpoint(endpoint);
}
request.setTimeOffset(timeOffset);
HttpResponseHandler<AmazonServiceException> errorResponseHandler = protocolFactory.createErrorResponseHandler(new JsonErrorResponseMetadata());
return client.execute(request, responseHandler, errorResponseHandler, executionContext);
}
@com.amazonaws.annotation.SdkInternalApi
static com.amazonaws.protocol.json.SdkJsonProtocolFactory getProtocolFactory() {
return protocolFactory;
}
@Override
public AmazonSageMakerWaiters waiters() {
if (waiters == null) {
synchronized (this) {
if (waiters == null) {
waiters = new AmazonSageMakerWaiters(this);
}
}
}
return waiters;
}
@Override
public void shutdown() {
super.shutdown();
if (waiters != null) {
waiters.shutdown();
}
}
}
|
package com.badgeview;
import android.graphics.PointF;
import java.util.List;
/**
* Created by chqiu on 2017/3/20.
*/
public class MathUtil {
public static final double CIRCLE_RADIAN = 2 * Math.PI;
public static double getTanRadian(double atan, int quadrant) {
if (atan < 0) {
atan += CIRCLE_RADIAN / 4;
}
atan += CIRCLE_RADIAN / 4 * (quadrant - 1);
return atan;
}
public static double radianToAngle(double radian) {
return 360 * (radian / CIRCLE_RADIAN);
}
public static int getQuadrant(PointF p, PointF center) {
if (p.x > center.x) {
if (p.y > center.y) {
return 4;
} else if (p.y < center.y) {
return 1;
}
} else if (p.x < center.x) {
if (p.y > center.y) {
return 3;
} else if (p.y < center.y) {
return 2;
}
}
return -1;
}
public static float getPointDistance(PointF p1, PointF p2) {
return (float) Math.sqrt(Math.pow(p1.x - p2.x, 2) + Math.pow(p1.y - p2.y, 2));
}
/**
* this formula is designed by mabeijianxi
* website : http://blog.csdn.net/mabeijianxi/article/details/50560361
*
* @param circleCenter The circle center point.
* @param radius The circle radius.
* @param slopeLine The slope of line which cross the pMiddle.
*/
public static void getInnertangentPoints(PointF circleCenter, float radius, Double slopeLine, List<PointF> points) {
float radian, xOffset, yOffset;
if (slopeLine != null) {
radian = (float) Math.atan(slopeLine);
xOffset = (float) (Math.cos(radian) * radius);
yOffset = (float) (Math.sin(radian) * radius);
} else {
xOffset = radius;
yOffset = 0;
}
points.add(new PointF(circleCenter.x + xOffset, circleCenter.y + yOffset));
points.add(new PointF(circleCenter.x - xOffset, circleCenter.y - yOffset));
}
}
|
/*
* Copyright (c) 2011-2018, Meituan Dianping. All Rights Reserved.
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dianping.cat.servlet;
import javax.servlet.DispatcherType;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintWriter;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import junit.framework.Assert;
import org.eclipse.jetty.webapp.WebAppContext;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.unidal.helper.Files;
import org.unidal.helper.Joiners;
import org.unidal.helper.Urls;
import org.unidal.test.jetty.JettyServer;
import com.dianping.cat.Cat;
import com.dianping.cat.message.Message;
import com.dianping.cat.message.Transaction;
public class CatFilterTest extends JettyServer {
@After
public void after() throws Exception {
super.stopServer();
}
@Before
public void before() throws Exception {
System.setProperty("devMode", "true");
super.startServer();
}
@Override
protected String getContextPath() {
return "/mock";
}
@Override
protected int getServerPort() {
return 2282;
}
@Override
protected boolean isWebXmlDefined() {
return false;
}
@Override
protected void postConfigure(WebAppContext context) {
context.addServlet(MockServlet.class, "/*");
context.addFilter(CatFilter.class, "/*", EnumSet.of(DispatcherType.REQUEST));
}
@Test
public void testMode0() throws Exception {
String url = "http://localhost:2282/mock/mode0";
InputStream in = Urls.forIO().openStream(url);
String content = Files.forIO().readFrom(in, "utf-8");
Assert.assertEquals("mock content here!", content);
TimeUnit.MILLISECONDS.sleep(100);
}
@Test
public void testMode1() throws Exception {
String url = "http://localhost:2282/mock/mode1";
Transaction t = Cat.newTransaction("Mock", "testMode1");
try {
String childId = Cat.createMessageId();
String id = Cat.getManager().getThreadLocalMessageTree().getMessageId();
Cat.logEvent("RemoteCall", url, Message.SUCCESS, childId);
InputStream in = Urls.forIO().connectTimeout(100) //
.header("X-Cat-Id", childId) //
.header("X-Cat-Parent-Id", id) //
.header("X-Cat-Root-Id", id) //
.openStream(url);
String content = Files.forIO().readFrom(in, "utf-8");
Assert.assertEquals("mock content here!", content);
t.setStatus(Message.SUCCESS);
} finally {
t.complete();
}
TimeUnit.MILLISECONDS.sleep(100);
}
@Test
public void testMode2() throws Exception {
String url = "http://localhost:2282/mock/mode2";
Map<String, List<String>> headers = new HashMap<String, List<String>>();
InputStream in = Urls.forIO().connectTimeout(100) //
.header("X-Cat-Source", "container") //
.header("X-CAT-TRACE-MODE", "true") //
.openStream(url, headers);
String content = Files.forIO().readFrom(in, "utf-8");
Assert.assertEquals("mock content here!", content);
String id = getHeader(headers, "X-CAT-ID");
String parentId = getHeader(headers, "X-CAT-PARENT-ID");
String rootId = getHeader(headers, "X-CAT-ROOT-ID");
Assert.assertNotNull(id);
Assert.assertNotNull(parentId);
Assert.assertNotNull(rootId);
Assert.assertFalse(id.equals(rootId));
TimeUnit.MILLISECONDS.sleep(100);
}
private String getHeader(Map<String, List<String>> headers, String name) {
List<String> values = headers.get(name);
if (values != null) {
int len = values.size();
if (len == 0) {
return null;
} else if (len == 1) {
return values.get(0);
} else {
return Joiners.by(',').join(values);
}
} else {
return null;
}
}
public static class MockServlet extends HttpServlet {
private static final long serialVersionUID = 1L;
@Override
protected void service(HttpServletRequest req, HttpServletResponse res) throws ServletException, IOException {
PrintWriter writer = res.getWriter();
Transaction t = Cat.newTransaction("Mock", req.getRequestURI());
try {
writer.write("mock content here!");
// no status set by purpose
} finally {
t.complete();
}
}
}
}
|
/*
* Copyright 2017 LINE Corporation
*
* LINE Corporation licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.linecorp.armeria.common;
import static com.google.common.collect.ImmutableList.toImmutableList;
import java.io.IOException;
import java.net.InetAddress;
import java.nio.channels.ClosedChannelException;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.CompletableFuture;
import java.util.function.IntPredicate;
import java.util.function.LongPredicate;
import java.util.function.Predicate;
import javax.annotation.Nullable;
import javax.net.ssl.SSLEngine;
import javax.net.ssl.SSLException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.CaffeineSpec;
import com.google.common.base.Ascii;
import com.google.common.base.CharMatcher;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableList;
import com.linecorp.armeria.client.ClientFactoryBuilder;
import com.linecorp.armeria.client.retry.Backoff;
import com.linecorp.armeria.client.retry.RetryingClient;
import com.linecorp.armeria.client.retry.RetryingRpcClient;
import com.linecorp.armeria.common.util.Exceptions;
import com.linecorp.armeria.common.util.InetAddressPredicates;
import com.linecorp.armeria.common.util.Sampler;
import com.linecorp.armeria.common.util.SystemInfo;
import com.linecorp.armeria.internal.common.util.SslContextUtil;
import com.linecorp.armeria.server.RoutingContext;
import com.linecorp.armeria.server.ServerBuilder;
import com.linecorp.armeria.server.Service;
import com.linecorp.armeria.server.ServiceConfig;
import com.linecorp.armeria.server.ServiceRequestContext;
import com.linecorp.armeria.server.annotation.ExceptionHandler;
import com.linecorp.armeria.server.annotation.ExceptionVerbosity;
import io.micrometer.core.instrument.Meter;
import io.micrometer.core.instrument.Tag;
import io.micrometer.core.instrument.config.NamingConvention;
import io.netty.buffer.ByteBufAllocator;
import io.netty.channel.epoll.Epoll;
import io.netty.handler.codec.http2.Http2CodecUtil;
import io.netty.handler.codec.http2.Http2Exception;
import io.netty.handler.ssl.OpenSsl;
import io.netty.handler.ssl.SslContextBuilder;
import io.netty.resolver.DefaultAddressResolverGroup;
import io.netty.resolver.dns.DnsNameResolverTimeoutException;
import io.netty.util.ReferenceCountUtil;
/**
* The system properties that affect Armeria's runtime behavior.
*/
public final class Flags {
private static final Logger logger = LoggerFactory.getLogger(Flags.class);
private static final Splitter CSV_SPLITTER = Splitter.on(',').trimResults().omitEmptyStrings();
private static final String PREFIX = "com.linecorp.armeria.";
private static final int NUM_CPU_CORES = Runtime.getRuntime().availableProcessors();
private static final String DEFAULT_VERBOSE_EXCEPTION_SAMPLER_SPEC = "rate-limit=10";
private static final String VERBOSE_EXCEPTION_SAMPLER_SPEC;
private static final Sampler<Class<? extends Throwable>> VERBOSE_EXCEPTION_SAMPLER;
@Nullable
private static final Predicate<InetAddress> PREFERRED_IP_V4_ADDRESSES;
static {
final String spec = getNormalized("verboseExceptions", DEFAULT_VERBOSE_EXCEPTION_SAMPLER_SPEC, val -> {
if ("true".equals(val) || "false".equals(val)) {
return true;
}
try {
Sampler.of(val);
return true;
} catch (Exception e) {
// Invalid sampler specification
return false;
}
});
switch (spec) {
case "true":
case "always":
VERBOSE_EXCEPTION_SAMPLER_SPEC = "always";
VERBOSE_EXCEPTION_SAMPLER = Sampler.always();
break;
case "false":
case "never":
VERBOSE_EXCEPTION_SAMPLER_SPEC = "never";
VERBOSE_EXCEPTION_SAMPLER = Sampler.never();
break;
default:
VERBOSE_EXCEPTION_SAMPLER_SPEC = spec;
VERBOSE_EXCEPTION_SAMPLER = new ExceptionSampler(VERBOSE_EXCEPTION_SAMPLER_SPEC);
}
final List<Predicate<InetAddress>> preferredIpV4Addresses =
CSV_SPLITTER.splitToList(getNormalized("preferredIpV4Addresses", "", unused -> true))
.stream()
.map(cidr -> {
try {
return InetAddressPredicates.ofCidr(cidr);
} catch (Exception e) {
logger.warn("Failed to parse a preferred IPv4: {}", cidr);
}
return null;
})
.filter(Objects::nonNull)
.collect(toImmutableList());
switch (preferredIpV4Addresses.size()) {
case 0:
PREFERRED_IP_V4_ADDRESSES = null;
break;
case 1:
PREFERRED_IP_V4_ADDRESSES = preferredIpV4Addresses.get(0);
break;
default:
PREFERRED_IP_V4_ADDRESSES = inetAddress -> {
for (Predicate<InetAddress> preferredIpV4Addr : preferredIpV4Addresses) {
if (preferredIpV4Addr.test(inetAddress)) {
return true;
}
}
return false;
};
}
}
private static final boolean VERBOSE_SOCKET_EXCEPTIONS = getBoolean("verboseSocketExceptions", false);
private static final boolean VERBOSE_RESPONSES = getBoolean("verboseResponses", false);
@Nullable
private static final String REQUEST_CONTEXT_STORAGE_PROVIDER =
System.getProperty(PREFIX + "requestContextStorageProvider");
private static final boolean HAS_WSLENV = System.getenv("WSLENV") != null;
private static final boolean USE_EPOLL = getBoolean("useEpoll", isEpollAvailable(),
value -> isEpollAvailable() || !value);
@Nullable
private static Boolean useOpenSsl;
@Nullable
private static Boolean dumpOpenSslInfo;
private static final int DEFAULT_MAX_NUM_CONNECTIONS = Integer.MAX_VALUE;
private static final int MAX_NUM_CONNECTIONS =
getInt("maxNumConnections", DEFAULT_MAX_NUM_CONNECTIONS, value -> value > 0);
private static final int DEFAULT_NUM_COMMON_WORKERS = NUM_CPU_CORES * 2;
private static final int NUM_COMMON_WORKERS =
getInt("numCommonWorkers", DEFAULT_NUM_COMMON_WORKERS, value -> value > 0);
private static final int DEFAULT_NUM_COMMON_BLOCKING_TASK_THREADS = 200; // from Tomcat default maxThreads
private static final int NUM_COMMON_BLOCKING_TASK_THREADS =
getInt("numCommonBlockingTaskThreads",
DEFAULT_NUM_COMMON_BLOCKING_TASK_THREADS,
value -> value > 0);
private static final long DEFAULT_DEFAULT_MAX_REQUEST_LENGTH = 10 * 1024 * 1024; // 10 MiB
private static final long DEFAULT_MAX_REQUEST_LENGTH =
getLong("defaultMaxRequestLength",
DEFAULT_DEFAULT_MAX_REQUEST_LENGTH,
value -> value >= 0);
private static final long DEFAULT_DEFAULT_MAX_RESPONSE_LENGTH = 10 * 1024 * 1024; // 10 MiB
private static final long DEFAULT_MAX_RESPONSE_LENGTH =
getLong("defaultMaxResponseLength",
DEFAULT_DEFAULT_MAX_RESPONSE_LENGTH,
value -> value >= 0);
private static final long DEFAULT_DEFAULT_REQUEST_TIMEOUT_MILLIS = 10 * 1000; // 10 seconds
private static final long DEFAULT_REQUEST_TIMEOUT_MILLIS =
getLong("defaultRequestTimeoutMillis",
DEFAULT_DEFAULT_REQUEST_TIMEOUT_MILLIS,
value -> value >= 0);
// Use slightly greater value than the default request timeout so that clients have a higher chance of
// getting proper 503 Service Unavailable response when server-side timeout occurs.
private static final long DEFAULT_DEFAULT_RESPONSE_TIMEOUT_MILLIS = 15 * 1000; // 15 seconds
private static final long DEFAULT_RESPONSE_TIMEOUT_MILLIS =
getLong("defaultResponseTimeoutMillis",
DEFAULT_DEFAULT_RESPONSE_TIMEOUT_MILLIS,
value -> value >= 0);
private static final long DEFAULT_DEFAULT_CONNECT_TIMEOUT_MILLIS = 3200; // 3.2 seconds
private static final long DEFAULT_CONNECT_TIMEOUT_MILLIS =
getLong("defaultConnectTimeoutMillis",
DEFAULT_DEFAULT_CONNECT_TIMEOUT_MILLIS,
value -> value > 0);
private static final long DEFAULT_DEFAULT_WRITE_TIMEOUT_MILLIS = 1000; // 1 second
private static final long DEFAULT_WRITE_TIMEOUT_MILLIS =
getLong("defaultWriteTimeoutMillis",
DEFAULT_DEFAULT_WRITE_TIMEOUT_MILLIS,
value -> value >= 0);
// Use slightly greater value than the client-side default so that clients close the connection more often.
private static final long DEFAULT_DEFAULT_SERVER_IDLE_TIMEOUT_MILLIS = 15000; // 15 seconds
private static final long DEFAULT_SERVER_IDLE_TIMEOUT_MILLIS =
getLong("defaultServerIdleTimeoutMillis",
DEFAULT_DEFAULT_SERVER_IDLE_TIMEOUT_MILLIS,
value -> value >= 0);
private static final long DEFAULT_DEFAULT_CLIENT_IDLE_TIMEOUT_MILLIS = 10000; // 10 seconds
private static final long DEFAULT_CLIENT_IDLE_TIMEOUT_MILLIS =
getLong("defaultClientIdleTimeoutMillis",
DEFAULT_DEFAULT_CLIENT_IDLE_TIMEOUT_MILLIS,
value -> value >= 0);
private static final long DEFAULT_DEFAULT_PING_INTERVAL_MILLIS = 0; // Disabled
private static final long DEFAULT_PING_INTERVAL_MILLIS =
getLong("defaultPingIntervalMillis",
DEFAULT_DEFAULT_PING_INTERVAL_MILLIS,
value -> value >= 0);
private static final int DEFAULT_DEFAULT_HTTP2_INITIAL_CONNECTION_WINDOW_SIZE = 1024 * 1024; // 1MiB
private static final int DEFAULT_HTTP2_INITIAL_CONNECTION_WINDOW_SIZE =
getInt("defaultHttp2InitialConnectionWindowSize",
DEFAULT_DEFAULT_HTTP2_INITIAL_CONNECTION_WINDOW_SIZE,
value -> value > 0);
private static final int DEFAULT_DEFAULT_HTTP2_INITIAL_STREAM_WINDOW_SIZE = 1024 * 1024; // 1MiB
private static final int DEFAULT_HTTP2_INITIAL_STREAM_WINDOW_SIZE =
getInt("defaultHttp2InitialStreamWindowSize",
DEFAULT_DEFAULT_HTTP2_INITIAL_STREAM_WINDOW_SIZE,
value -> value > 0);
private static final int DEFAULT_DEFAULT_HTTP2_MAX_FRAME_SIZE = 16384; // From HTTP/2 specification
private static final int DEFAULT_HTTP2_MAX_FRAME_SIZE =
getInt("defaultHttp2MaxFrameSize",
DEFAULT_DEFAULT_HTTP2_MAX_FRAME_SIZE,
value -> value >= Http2CodecUtil.MAX_FRAME_SIZE_LOWER_BOUND &&
value <= Http2CodecUtil.MAX_FRAME_SIZE_UPPER_BOUND);
// Can't use 0xFFFFFFFFL because some implementations use a signed 32-bit integer to store HTTP/2 SETTINGS
// parameter values, thus anything greater than 0x7FFFFFFF will break them or make them unhappy.
private static final long DEFAULT_DEFAULT_HTTP2_MAX_STREAMS_PER_CONNECTION = Integer.MAX_VALUE;
private static final long DEFAULT_HTTP2_MAX_STREAMS_PER_CONNECTION =
getLong("defaultHttp2MaxStreamsPerConnection",
DEFAULT_DEFAULT_HTTP2_MAX_STREAMS_PER_CONNECTION,
value -> value > 0 && value <= 0xFFFFFFFFL);
// from Netty default maxHeaderSize
private static final long DEFAULT_DEFAULT_HTTP2_MAX_HEADER_LIST_SIZE = 8192;
private static final long DEFAULT_HTTP2_MAX_HEADER_LIST_SIZE =
getLong("defaultHttp2MaxHeaderListSize",
DEFAULT_DEFAULT_HTTP2_MAX_HEADER_LIST_SIZE,
value -> value > 0 && value <= 0xFFFFFFFFL);
private static final int DEFAULT_DEFAULT_HTTP1_MAX_INITIAL_LINE_LENGTH = 4096; // from Netty
private static final int DEFAULT_MAX_HTTP1_INITIAL_LINE_LENGTH =
getInt("defaultHttp1MaxInitialLineLength",
DEFAULT_DEFAULT_HTTP1_MAX_INITIAL_LINE_LENGTH,
value -> value >= 0);
private static final int DEFAULT_DEFAULT_HTTP1_MAX_HEADER_SIZE = 8192; // from Netty
private static final int DEFAULT_MAX_HTTP1_HEADER_SIZE =
getInt("defaultHttp1MaxHeaderSize",
DEFAULT_DEFAULT_HTTP1_MAX_HEADER_SIZE,
value -> value >= 0);
private static final int DEFAULT_DEFAULT_HTTP1_MAX_CHUNK_SIZE = 8192; // from Netty
private static final int DEFAULT_HTTP1_MAX_CHUNK_SIZE =
getInt("defaultHttp1MaxChunkSize",
DEFAULT_DEFAULT_HTTP1_MAX_CHUNK_SIZE,
value -> value >= 0);
private static final boolean DEFAULT_USE_HTTP2_PREFACE = getBoolean("defaultUseHttp2Preface", true);
private static final boolean DEFAULT_USE_HTTP1_PIPELINING = getBoolean("defaultUseHttp1Pipelining", false);
private static final String DEFAULT_DEFAULT_BACKOFF_SPEC =
"exponential=200:10000,jitter=0.2";
private static final String DEFAULT_BACKOFF_SPEC =
getNormalized("defaultBackoffSpec", DEFAULT_DEFAULT_BACKOFF_SPEC, value -> {
try {
Backoff.of(value);
return true;
} catch (Exception e) {
// Invalid backoff specification
return false;
}
});
private static final int DEFAULT_DEFAULT_MAX_TOTAL_ATTEMPTS = 10;
private static final int DEFAULT_MAX_TOTAL_ATTEMPTS =
getInt("defaultMaxTotalAttempts",
DEFAULT_DEFAULT_MAX_TOTAL_ATTEMPTS,
value -> value > 0);
private static final String DEFAULT_ROUTE_CACHE_SPEC = "maximumSize=4096";
@Nullable
private static final String ROUTE_CACHE_SPEC =
caffeineSpec("routeCache", DEFAULT_ROUTE_CACHE_SPEC);
private static final String DEFAULT_ROUTE_DECORATOR_CACHE_SPEC = "maximumSize=4096";
@Nullable
private static final String ROUTE_DECORATOR_CACHE_SPEC =
caffeineSpec("routeDecoratorCache", DEFAULT_ROUTE_DECORATOR_CACHE_SPEC);
private static final String DEFAULT_COMPOSITE_SERVICE_CACHE_SPEC = "maximumSize=256";
@Nullable
private static final String COMPOSITE_SERVICE_CACHE_SPEC =
caffeineSpec("compositeServiceCache", DEFAULT_COMPOSITE_SERVICE_CACHE_SPEC);
private static final String DEFAULT_PARSED_PATH_CACHE_SPEC = "maximumSize=4096";
@Nullable
private static final String PARSED_PATH_CACHE_SPEC =
caffeineSpec("parsedPathCache", DEFAULT_PARSED_PATH_CACHE_SPEC);
private static final String DEFAULT_HEADER_VALUE_CACHE_SPEC = "maximumSize=4096";
@Nullable
private static final String HEADER_VALUE_CACHE_SPEC =
caffeineSpec("headerValueCache", DEFAULT_HEADER_VALUE_CACHE_SPEC);
private static final String DEFAULT_FILE_SERVICE_CACHE_SPEC = "maximumSize=1024";
@Nullable
private static final String FILE_SERVICE_CACHE_SPEC =
caffeineSpec("fileServiceCache", DEFAULT_FILE_SERVICE_CACHE_SPEC);
private static final String DEFAULT_CACHED_HEADERS =
":authority,:scheme,:method,accept-encoding,content-type";
private static final List<String> CACHED_HEADERS =
CSV_SPLITTER.splitToList(getNormalized(
"cachedHeaders", DEFAULT_CACHED_HEADERS, CharMatcher.ascii()::matchesAllOf));
private static final String DEFAULT_ANNOTATED_SERVICE_EXCEPTION_VERBOSITY = "unhandled";
private static final ExceptionVerbosity ANNOTATED_SERVICE_EXCEPTION_VERBOSITY =
exceptionLoggingMode("annotatedServiceExceptionVerbosity",
DEFAULT_ANNOTATED_SERVICE_EXCEPTION_VERBOSITY);
private static final boolean USE_JDK_DNS_RESOLVER = getBoolean("useJdkDnsResolver", false);
private static final boolean REPORT_BLOCKED_EVENT_LOOP =
getBoolean("reportBlockedEventLoop", true);
private static final boolean VALIDATE_HEADERS = getBoolean("validateHeaders", true);
private static final boolean USE_LEGACY_METER_NAMES = getBoolean("useLegacyMeterNames", false);
static {
if (!isEpollAvailable()) {
final Throwable cause = Epoll.unavailabilityCause();
if (cause != null) {
logger.info("/dev/epoll not available: {}", Exceptions.peel(cause).toString());
} else {
if (HAS_WSLENV) {
logger.info("/dev/epoll not available: WSL not supported");
} else {
logger.info("/dev/epoll not available: ?");
}
}
} else if (USE_EPOLL) {
logger.info("Using /dev/epoll");
}
}
private static boolean isEpollAvailable() {
if (SystemInfo.isLinux()) {
// Netty epoll transport does not work with WSL (Windows Sybsystem for Linux) yet.
// TODO(trustin): Re-enable on WSL if https://github.com/Microsoft/WSL/issues/1982 is resolved.
return Epoll.isAvailable() && !HAS_WSLENV;
}
return false;
}
/**
* Returns the {@link Sampler} that determines whether to retain the stack trace of the exceptions
* that are thrown frequently by Armeria.
*
* @see #verboseExceptionSamplerSpec()
*/
public static Sampler<Class<? extends Throwable>> verboseExceptionSampler() {
return VERBOSE_EXCEPTION_SAMPLER;
}
/**
* Returns the specification string of the {@link Sampler} that determines whether to retain the stack
* trace of the exceptions that are thrown frequently by Armeria. A sampled exception will have the stack
* trace while the others will have an empty stack trace to eliminate the cost of capturing the stack
* trace.
*
* <p>The default value of this flag is {@value #DEFAULT_VERBOSE_EXCEPTION_SAMPLER_SPEC}, which retains
* the stack trace of the exceptions at the maximum rate of 10 exceptions/sec.
* Specify the {@code -Dcom.linecorp.armeria.verboseExceptions=<specification>} JVM option to override
* the default. See {@link Sampler#of(String)} for the specification string format.</p>
*/
public static String verboseExceptionSamplerSpec() {
// XXX(trustin): Is it worth allowing to specify different specs for different exception types?
return VERBOSE_EXCEPTION_SAMPLER_SPEC;
}
/**
* Returns whether to log the socket exceptions which are mostly harmless. If enabled, the following
* exceptions will be logged:
* <ul>
* <li>{@link ClosedChannelException}</li>
* <li>{@link ClosedSessionException}</li>
* <li>{@link IOException} - 'Connection reset/closed/aborted by peer'</li>
* <li>'Broken pipe'</li>
* <li>{@link Http2Exception} - 'Stream closed'</li>
* <li>{@link SSLException} - 'SSLEngine closed already'</li>
* </ul>
*
* <p>It is recommended to keep this flag disabled, because it increases the amount of log messages for
* the errors you usually do not have control over, e.g. unexpected socket disconnection due to network
* or remote peer issues.</p>
*
* <p>This flag is disabled by default.
* Specify the {@code -Dcom.linecorp.armeria.verboseSocketExceptions=true} JVM option to enable it.</p>
*
* @see Exceptions#isExpected(Throwable)
*/
public static boolean verboseSocketExceptions() {
return VERBOSE_SOCKET_EXCEPTIONS;
}
/**
* Returns whether the verbose response mode is enabled. When enabled, the server responses will contain
* the exception type and its full stack trace, which may be useful for debugging while potentially
* insecure. When disabled, the server responses will not expose such server-side details to the client.
*
* <p>This flag is disabled by default. Specify the {@code -Dcom.linecorp.armeria.verboseResponses=true}
* JVM option or use {@link ServerBuilder#verboseResponses(boolean)} to enable it.
*/
public static boolean verboseResponses() {
return VERBOSE_RESPONSES;
}
/**
* Returns the fully qualified class name of {@link RequestContextStorageProvider} that is used to choose
* when multiple {@link RequestContextStorageProvider}s exist.
*
* <p>The default value of this flag is {@code null}, which means only one
* {@link RequestContextStorageProvider} must be found via Java SPI. If there are more than one,
* you must specify the {@code -Dcom.linecorp.armeria.requestContextStorageProvider=<FQCN>} JVM option to
* choose the {@link RequestContextStorageProvider}.
*/
@Nullable
public static String requestContextStorageProvider() {
return REQUEST_CONTEXT_STORAGE_PROVIDER;
}
/**
* Returns whether the JNI-based {@code /dev/epoll} socket I/O is enabled. When enabled on Linux, Armeria
* uses {@code /dev/epoll} directly for socket I/O. When disabled, {@code java.nio} socket API is used
* instead.
*
* <p>This flag is enabled by default for supported platforms. Specify the
* {@code -Dcom.linecorp.armeria.useEpoll=false} JVM option to disable it.
*/
public static boolean useEpoll() {
return USE_EPOLL;
}
/**
* Returns whether the JNI-based TLS support with OpenSSL is enabled. When enabled, Armeria uses OpenSSL
* for processing TLS connections. When disabled, the current JVM's default {@link SSLEngine} is used
* instead.
*
* <p>This flag is enabled by default for supported platforms. Specify the
* {@code -Dcom.linecorp.armeria.useOpenSsl=false} JVM option to disable it.
*/
public static boolean useOpenSsl() {
if (useOpenSsl != null) {
return useOpenSsl;
}
setUseOpenSslAndDumpOpenSslInfo();
return useOpenSsl;
}
private static void setUseOpenSslAndDumpOpenSslInfo() {
final boolean useOpenSsl = getBoolean("useOpenSsl", true);
if (!useOpenSsl) {
// OpenSSL explicitly disabled
Flags.useOpenSsl = false;
dumpOpenSslInfo = false;
return;
}
if (!OpenSsl.isAvailable()) {
final Throwable cause = Exceptions.peel(OpenSsl.unavailabilityCause());
logger.info("OpenSSL not available: {}", cause.toString());
Flags.useOpenSsl = false;
dumpOpenSslInfo = false;
return;
}
Flags.useOpenSsl = true;
logger.info("Using OpenSSL: {}, 0x{}", OpenSsl.versionString(),
Long.toHexString(OpenSsl.version() & 0xFFFFFFFFL));
dumpOpenSslInfo = getBoolean("dumpOpenSslInfo", false);
if (dumpOpenSslInfo) {
final SSLEngine engine = SslContextUtil.createSslContext(
SslContextBuilder::forClient,
false,
ImmutableList.of()).newEngine(ByteBufAllocator.DEFAULT);
logger.info("All available SSL protocols: {}",
ImmutableList.copyOf(engine.getSupportedProtocols()));
logger.info("Default enabled SSL protocols: {}", SslContextUtil.DEFAULT_PROTOCOLS);
ReferenceCountUtil.release(engine);
logger.info("All available SSL ciphers: {}", OpenSsl.availableJavaCipherSuites());
logger.info("Default enabled SSL ciphers: {}", SslContextUtil.DEFAULT_CIPHERS);
}
}
/**
* Returns whether information about the OpenSSL environment should be dumped when first starting the
* application, including supported ciphers.
*
* <p>This flag is disabled by default. Specify the {@code -Dcom.linecorp.armeria.dumpOpenSslInfo=true} JVM
* option to enable it.
*
* <p>If {@link #useOpenSsl()} returns {@code false}, this also returns {@code false} no matter you
* specified the JVM option.
*/
public static boolean dumpOpenSslInfo() {
if (dumpOpenSslInfo != null) {
return dumpOpenSslInfo;
}
setUseOpenSslAndDumpOpenSslInfo();
return dumpOpenSslInfo;
}
/**
* Returns the default server-side maximum number of connections.
*
* <p>The default value of this flag is {@value #DEFAULT_MAX_NUM_CONNECTIONS}. Specify the
* {@code -Dcom.linecorp.armeria.maxNumConnections=<integer>} JVM option to override
* the default value.
*/
public static int maxNumConnections() {
return MAX_NUM_CONNECTIONS;
}
/**
* Returns the default number of {@linkplain CommonPools#workerGroup() common worker group} threads.
* Note that this value has effect only if a user did not specify a worker group.
*
* <p>The default value of this flag is {@code 2 * <numCpuCores>}. Specify the
* {@code -Dcom.linecorp.armeria.numCommonWorkers=<integer>} JVM option to override the default value.
*/
public static int numCommonWorkers() {
return NUM_COMMON_WORKERS;
}
/**
* Returns the default number of {@linkplain CommonPools#blockingTaskExecutor() blocking task executor}
* threads. Note that this value has effect only if a user did not specify a blocking task executor.
*
* <p>The default value of this flag is {@value #DEFAULT_NUM_COMMON_BLOCKING_TASK_THREADS}. Specify the
* {@code -Dcom.linecorp.armeria.numCommonBlockingTaskThreads=<integer>} JVM option to override
* the default value.
*/
public static int numCommonBlockingTaskThreads() {
return NUM_COMMON_BLOCKING_TASK_THREADS;
}
/**
* Returns the default server-side maximum length of a request. Note that this value has effect
* only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_MAX_REQUEST_LENGTH}. Specify the
* {@code -Dcom.linecorp.armeria.defaultMaxRequestLength=<long>} to override the default value.
* {@code 0} disables the length limit.
*/
public static long defaultMaxRequestLength() {
return DEFAULT_MAX_REQUEST_LENGTH;
}
/**
* Returns the default client-side maximum length of a response. Note that this value has effect
* only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_MAX_RESPONSE_LENGTH}. Specify the
* {@code -Dcom.linecorp.armeria.defaultMaxResponseLength=<long>} to override the default value.
* {@code 0} disables the length limit.
*/
public static long defaultMaxResponseLength() {
return DEFAULT_MAX_RESPONSE_LENGTH;
}
/**
* Returns the default server-side timeout of a request in milliseconds. Note that this value has effect
* only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_REQUEST_TIMEOUT_MILLIS}.
* Specify the {@code -Dcom.linecorp.armeria.defaultRequestTimeoutMillis=<long>} to override
* the default value. {@code 0} disables the timeout.
*/
public static long defaultRequestTimeoutMillis() {
return DEFAULT_REQUEST_TIMEOUT_MILLIS;
}
/**
* Returns the default client-side timeout of a response in milliseconds. Note that this value has effect
* only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_RESPONSE_TIMEOUT_MILLIS}.
* Specify the {@code -Dcom.linecorp.armeria.defaultResponseTimeoutMillis=<long>} to override
* the default value. {@code 0} disables the timeout.
*/
public static long defaultResponseTimeoutMillis() {
return DEFAULT_RESPONSE_TIMEOUT_MILLIS;
}
/**
* Returns the default client-side timeout of a socket connection attempt in milliseconds.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_CONNECT_TIMEOUT_MILLIS}. Specify the
* {@code -Dcom.linecorp.armeria.defaultConnectTimeoutMillis=<integer>} JVM option to override
* the default value.
*/
public static long defaultConnectTimeoutMillis() {
return DEFAULT_CONNECT_TIMEOUT_MILLIS;
}
/**
* Returns the default client-side timeout of a socket write attempt in milliseconds.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_WRITE_TIMEOUT_MILLIS}. Specify the
* {@code -Dcom.linecorp.armeria.defaultWriteTimeoutMillis=<integer>} JVM option to override
* the default value. {@code 0} disables the timeout.
*/
public static long defaultWriteTimeoutMillis() {
return DEFAULT_WRITE_TIMEOUT_MILLIS;
}
/**
* Returns the default server-side idle timeout of a connection for keep-alive in milliseconds.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_SERVER_IDLE_TIMEOUT_MILLIS}. Specify the
* {@code -Dcom.linecorp.armeria.defaultServerIdleTimeoutMillis=<integer>} JVM option to override
* the default value.
*/
public static long defaultServerIdleTimeoutMillis() {
return DEFAULT_SERVER_IDLE_TIMEOUT_MILLIS;
}
/**
* Returns the default client-side idle timeout of a connection for keep-alive in milliseconds.
* Note that this value has effect only if a user did not specify it.
*
* <p>This default value of this flag is {@value #DEFAULT_DEFAULT_CLIENT_IDLE_TIMEOUT_MILLIS}. Specify the
* {@code -Dcom.linecorp.armeria.defaultClientIdleTimeoutMillis=<integer>} JVM option to override
* the default value.
*/
public static long defaultClientIdleTimeoutMillis() {
return DEFAULT_CLIENT_IDLE_TIMEOUT_MILLIS;
}
/**
* Returns the default maximum length of an HTTP/1 response initial line.
* Note that this value has effect only if a user did not specify it.
*
* <p>This default value of this flag is {@value #DEFAULT_DEFAULT_HTTP1_MAX_INITIAL_LINE_LENGTH}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp1MaxInitialLineLength=<integer>} JVM option
* to override the default value.
*/
public static int defaultHttp1MaxInitialLineLength() {
return DEFAULT_MAX_HTTP1_INITIAL_LINE_LENGTH;
}
/**
* Returns the default maximum length of all headers in an HTTP/1 response.
* Note that this value has effect only if a user did not specify it.
*
* <p>This default value of this flag is {@value #DEFAULT_DEFAULT_HTTP1_MAX_HEADER_SIZE}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp1MaxHeaderSize=<integer>} JVM option
* to override the default value.
*/
public static int defaultHttp1MaxHeaderSize() {
return DEFAULT_MAX_HTTP1_HEADER_SIZE;
}
/**
* Returns the default maximum length of each chunk in an HTTP/1 response content.
* The content or a chunk longer than this value will be split into smaller chunks
* so that their lengths never exceed it.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_HTTP1_MAX_CHUNK_SIZE}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp1MaxChunkSize=<integer>} JVM option
* to override the default value.
*/
public static int defaultHttp1MaxChunkSize() {
return DEFAULT_HTTP1_MAX_CHUNK_SIZE;
}
/**
* Returns the default value of the {@link ClientFactoryBuilder#useHttp2Preface(boolean)} option.
* Note that this value has effect only if a user did not specify it.
*
* <p>This flag is enabled by default. Specify the
* {@code -Dcom.linecorp.armeria.defaultUseHttp2Preface=false} JVM option to disable it.
*/
public static boolean defaultUseHttp2Preface() {
return DEFAULT_USE_HTTP2_PREFACE;
}
/**
* Returns the default value of the {@link ClientFactoryBuilder#useHttp1Pipelining(boolean)} option.
* Note that this value has effect only if a user did not specify it.
*
* <p>This flag is disabled by default. Specify the
* {@code -Dcom.linecorp.armeria.defaultUseHttp1Pipelining=true} JVM option to enable it.
*/
public static boolean defaultUseHttp1Pipelining() {
return DEFAULT_USE_HTTP1_PIPELINING;
}
/**
* Returns the default value for the PING interval.
* A <a href="https://httpwg.org/specs/rfc7540.html#PING">PING</a> frame
* is sent for HTTP/2 server and client or
* an <a herf="https://tools.ietf.org/html/rfc7231#section-4.3.7">OPTIONS</a> request with an asterisk ("*")
* is sent for HTTP/1 client.
*
* <p>Note that this flag is only in effect when {@link #defaultServerIdleTimeoutMillis()} for server and
* {@link #defaultClientIdleTimeoutMillis()} for client are greater than the value of this flag.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_PING_INTERVAL_MILLIS} milliseconds.
* Specify the {@code -Dcom.linecorp.armeria.defaultPingIntervalMillis=<integer>} JVM option to override
* the default value. If the specified value was smaller than 10 seconds, bumps PING interval to 10 seconds.
*/
public static long defaultPingIntervalMillis() {
return DEFAULT_PING_INTERVAL_MILLIS;
}
/**
* Returns the default value of the {@link ServerBuilder#http2InitialConnectionWindowSize(int)} and
* {@link ClientFactoryBuilder#http2InitialConnectionWindowSize(int)} option.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_HTTP2_INITIAL_CONNECTION_WINDOW_SIZE}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp2InitialConnectionWindowSize=<integer>} JVM option
* to override the default value.
*/
public static int defaultHttp2InitialConnectionWindowSize() {
return DEFAULT_HTTP2_INITIAL_CONNECTION_WINDOW_SIZE;
}
/**
* Returns the default value of the {@link ServerBuilder#http2InitialStreamWindowSize(int)} and
* {@link ClientFactoryBuilder#http2InitialStreamWindowSize(int)} option.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_HTTP2_INITIAL_STREAM_WINDOW_SIZE}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp2InitialStreamWindowSize=<integer>} JVM option
* to override the default value.
*/
public static int defaultHttp2InitialStreamWindowSize() {
return DEFAULT_HTTP2_INITIAL_STREAM_WINDOW_SIZE;
}
/**
* Returns the default value of the {@link ServerBuilder#http2MaxFrameSize(int)} and
* {@link ClientFactoryBuilder#http2MaxFrameSize(int)} option.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_HTTP2_MAX_FRAME_SIZE}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp2MaxFrameSize=<integer>} JVM option
* to override the default value.
*/
public static int defaultHttp2MaxFrameSize() {
return DEFAULT_HTTP2_MAX_FRAME_SIZE;
}
/**
* Returns the default value of the {@link ServerBuilder#http2MaxStreamsPerConnection(long)} option.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_HTTP2_MAX_STREAMS_PER_CONNECTION}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp2MaxStreamsPerConnection=<integer>} JVM option
* to override the default value.
*/
public static long defaultHttp2MaxStreamsPerConnection() {
return DEFAULT_HTTP2_MAX_STREAMS_PER_CONNECTION;
}
/**
* Returns the default value of the {@link ServerBuilder#http2MaxHeaderListSize(long)} and
* {@link ClientFactoryBuilder#http2MaxHeaderListSize(long)} option.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_HTTP2_MAX_HEADER_LIST_SIZE}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp2MaxHeaderListSize=<integer>} JVM option
* to override the default value.
*/
public static long defaultHttp2MaxHeaderListSize() {
return DEFAULT_HTTP2_MAX_HEADER_LIST_SIZE;
}
/**
* Returns the default value of the {@code backoffSpec} parameter when instantiating a {@link Backoff}
* using {@link Backoff#of(String)}. Note that this value has effect only if a user did not specify the
* {@code defaultBackoffSpec} in the constructor call.
*
* <p>The default value of this flag is {@value DEFAULT_DEFAULT_BACKOFF_SPEC}. Specify the
* {@code -Dcom.linecorp.armeria.defaultBackoffSpec=<spec>} JVM option to override the default value.
*/
public static String defaultBackoffSpec() {
return DEFAULT_BACKOFF_SPEC;
}
/**
* Returns the default maximum number of total attempts. Note that this value has effect only if a user
* did not specify it when creating a {@link RetryingClient} or a {@link RetryingRpcClient}.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_MAX_TOTAL_ATTEMPTS}. Specify the
* {@code -Dcom.linecorp.armeria.defaultMaxTotalAttempts=<integer>} JVM option to
* override the default value.
*/
public static int defaultMaxTotalAttempts() {
return DEFAULT_MAX_TOTAL_ATTEMPTS;
}
/**
* Returns the value of the {@code routeCache} parameter. It would be used to create a Caffeine
* {@link Cache} instance using {@link CaffeineSpec} for routing a request. The {@link Cache}
* would hold the mappings of {@link RoutingContext} and the designated {@link ServiceConfig}
* for a request to improve server performance.
*
* <p>The default value of this flag is {@value DEFAULT_ROUTE_CACHE_SPEC}. Specify the
* {@code -Dcom.linecorp.armeria.routeCache=<spec>} JVM option to override the default value.
* For example, {@code -Dcom.linecorp.armeria.routeCache=maximumSize=4096,expireAfterAccess=600s}.
* Also, specify {@code -Dcom.linecorp.armeria.routeCache=off} JVM option to disable it.
*/
@Nullable
public static String routeCacheSpec() {
return ROUTE_CACHE_SPEC;
}
/**
* Returns the value of the {@code routeDecoratorCache} parameter. It would be used to create a Caffeine
* {@link Cache} instance using {@link CaffeineSpec} for mapping a route to decorator.
* The {@link Cache} would hold the mappings of {@link RoutingContext} and the designated
* dispatcher {@link Service}s for a request to improve server performance.
*
* <p>The default value of this flag is {@value DEFAULT_ROUTE_DECORATOR_CACHE_SPEC}. Specify the
* {@code -Dcom.linecorp.armeria.routeDecoratorCache=<spec>} JVM option to override the default value.
* For example, {@code -Dcom.linecorp.armeria.routeDecoratorCache=maximumSize=4096,expireAfterAccess=600s}.
* Also, specify {@code -Dcom.linecorp.armeria.routeDecoratorCache=off} JVM option to disable it.
*/
@Nullable
public static String routeDecoratorCacheSpec() {
return ROUTE_DECORATOR_CACHE_SPEC;
}
/**
* Returns the value of the {@code parsedPathCache} parameter. It would be used to create a Caffeine
* {@link Cache} instance using {@link CaffeineSpec} for mapping raw HTTP paths to parsed pair of
* path and query, after validation.
*
* <p>The default value of this flag is {@value DEFAULT_PARSED_PATH_CACHE_SPEC}. Specify the
* {@code -Dcom.linecorp.armeria.parsedPathCache=<spec>} JVM option to override the default value.
* For example, {@code -Dcom.linecorp.armeria.parsedPathCache=maximumSize=4096,expireAfterAccess=600s}.
* Also, specify {@code -Dcom.linecorp.armeria.parsedPathCache=off} JVM option to disable it.
*/
@Nullable
public static String parsedPathCacheSpec() {
return PARSED_PATH_CACHE_SPEC;
}
/**
* Returns the value of the {@code headerValueCache} parameter. It would be used to create a Caffeine
* {@link Cache} instance using {@link CaffeineSpec} for mapping raw HTTP ASCII header values to
* {@link String}.
*
* <p>The default value of this flag is {@value DEFAULT_HEADER_VALUE_CACHE_SPEC}. Specify the
* {@code -Dcom.linecorp.armeria.headerValueCache=<spec>} JVM option to override the default value.
* For example, {@code -Dcom.linecorp.armeria.headerValueCache=maximumSize=4096,expireAfterAccess=600s}.
* Also, specify {@code -Dcom.linecorp.armeria.headerValueCache=off} JVM option to disable it.
*/
@Nullable
public static String headerValueCacheSpec() {
return HEADER_VALUE_CACHE_SPEC;
}
/**
* Returns the value of the {@code fileServiceCache} parameter. It would be used to create a Caffeine
* {@link Cache} instance using {@link CaffeineSpec} for caching file entries.
*
* <p>The default value of this flag is {@value DEFAULT_FILE_SERVICE_CACHE_SPEC}. Specify the
* {@code -Dcom.linecorp.armeria.fileServiceCache=<spec>} JVM option to override the default value.
* For example, {@code -Dcom.linecorp.armeria.fileServiceCache=maximumSize=1024,expireAfterAccess=600s}.
* Also, specify {@code -Dcom.linecorp.armeria.fileServiceCache=off} JVM option to disable it.
*/
@Nullable
public static String fileServiceCacheSpec() {
return FILE_SERVICE_CACHE_SPEC;
}
/**
* Returns the value of the {@code cachedHeaders} parameter which contains a comma-separated list of
* headers whose values are cached using {@code headerValueCache}.
*
* <p>The default value of this flag is {@value DEFAULT_CACHED_HEADERS}. Specify the
* {@code -Dcom.linecorp.armeria.cachedHeaders=<csv>} JVM option to override the default value.
*/
public static List<String> cachedHeaders() {
return CACHED_HEADERS;
}
/**
* Returns the value of the {@code compositeServiceCache} parameter. It would be used to create a
* Caffeine {@link Cache} instance using {@link CaffeineSpec} for routing a request.
* The {@link Cache} would hold the mappings of {@link RoutingContext} and the designated
* {@link ServiceConfig} for a request to improve server performance.
*
* <p>The default value of this flag is {@value DEFAULT_COMPOSITE_SERVICE_CACHE_SPEC}. Specify the
* {@code -Dcom.linecorp.armeria.compositeServiceCache=<spec>} JVM option to override the default value.
* For example, {@code -Dcom.linecorp.armeria.compositeServiceCache=maximumSize=256,expireAfterAccess=600s}.
* Also, specify {@code -Dcom.linecorp.armeria.compositeServiceCache=off} JVM option to disable it.
*/
@Nullable
public static String compositeServiceCacheSpec() {
return COMPOSITE_SERVICE_CACHE_SPEC;
}
/**
* Returns the verbosity of exceptions logged by annotated HTTP services. The value of this property
* is one of the following:
* <ul>
* <li>{@link ExceptionVerbosity#ALL} - logging all exceptions raised from annotated HTTP services</li>
* <li>{@link ExceptionVerbosity#UNHANDLED} - logging exceptions which are not handled by
* {@link ExceptionHandler}s provided by a user and are not well-known exceptions
* <li>{@link ExceptionVerbosity#NONE} - no logging exceptions</li>
* </ul>
* A log message would be written at {@code WARN} level.
*
* <p>The default value of this flag is {@value DEFAULT_ANNOTATED_SERVICE_EXCEPTION_VERBOSITY}.
* Specify the
* {@code -Dcom.linecorp.armeria.annotatedServiceExceptionVerbosity=<all|unhandled|none>} JVM option
* to override the default value.
*
* @see ExceptionVerbosity
*/
public static ExceptionVerbosity annotatedServiceExceptionVerbosity() {
return ANNOTATED_SERVICE_EXCEPTION_VERBOSITY;
}
/**
* Returns the {@link Predicate} that is used to choose the non-loopback IP v4 address in
* {@link SystemInfo#defaultNonLoopbackIpV4Address()}.
*
* <p>The default value of this flag is {@code null}, which means all valid IPv4 addresses are
* preferred. Specify the {@code -Dcom.linecorp.armeria.preferredIpV4Addresses=<csv>} JVM option
* to override the default value. The {@code csv} should be
* <a href="https://tools.ietf.org/html/rfc4632">Classless Inter-domain Routing(CIDR)</a>s or
* exact IP addresses separated by commas. For example,
* {@code -Dcom.linecorp.armeria.preferredIpV4Addresses=211.111.111.111,10.0.0.0/8,192.168.1.0/24}.
*/
@Nullable
public static Predicate<InetAddress> preferredIpV4Addresses() {
return PREFERRED_IP_V4_ADDRESSES;
}
/**
* Enables {@link DefaultAddressResolverGroup} that resolves domain name using JDK's built-in domain name
* lookup mechanism.
* Note that JDK's built-in resolver performs a blocking name lookup from the caller thread, and thus
* this flag should be enabled only when the default asynchronous resolver does not work as expected,
* for example by always throwing a {@link DnsNameResolverTimeoutException}.
*
* <p>This flag is disabled by default.
* Specify the {@code -Dcom.linecorp.armeria.useJdkDnsResolver=true} JVM option
* to enable it.
*/
public static boolean useJdkDnsResolver() {
return USE_JDK_DNS_RESOLVER;
}
/**
* Returns whether {@link CompletableFuture}s returned by Armeria methods log a warning if
* {@link CompletableFuture#join()} or {@link CompletableFuture#get()} are called from an event loop thread.
* Blocking an event loop thread in this manner reduces performance significantly, possibly causing
* deadlocks, so it should be avoided at all costs (e.g. using {@code thenApply()} type methods to execute
* asynchronously or running the logic using {@link ServiceRequestContext#blockingTaskExecutor()}.
*
* <p>This flag is enabled by default.
* Specify the {@code -Dcom.linecorp.armeria.reportBlockedEventLoop=false} JVM option
* to disable it.
*/
public static boolean reportBlockedEventLoop() {
return REPORT_BLOCKED_EVENT_LOOP;
}
/**
* Enables validation of HTTP headers for dangerous characters like newlines - such characters can be used
* for injecting arbitrary content into HTTP responses.
*
* <p><strong>DISCLAIMER:</strong> Do not disable this unless you know what you are doing. It is recommended
* to keep this validation enabled to ensure the sanity of responses. However, you may wish to disable the
* validation to improve performance when you are sure responses are always safe, for example when only
* HTTP/2 is used, or when you populate headers with known values, and have no chance of using untrusted
* ones.
*
* <p>See <a href="https://github.com/line/armeria/security/advisories/GHSA-35fr-h7jr-hh86">CWE-113</a> for
* more details on the security implications of this flag.
*
* <p>This flag is enabled by default.
* Specify the {@code -Dcom.linecorp.armeria.validateHeaders=false} JVM option to disable it.</p>
*/
public static boolean validateHeaders() {
return VALIDATE_HEADERS;
}
/**
* Returns whether to switch back to Armeria's legacy {@link Meter} and {@link Tag} naming convention
* that is not compliant with Micrometer's default {@link NamingConvention}.
*
* <p>This flag is disabled by default. Specify the {@code -Dcom.linecorp.armeria.useLegacyMeterNames=true}
* JVM option to enable it.</p>
*/
public static boolean useLegacyMeterNames() {
return USE_LEGACY_METER_NAMES;
}
@Nullable
private static String caffeineSpec(String name, String defaultValue) {
final String spec = get(name, defaultValue, value -> {
try {
if (!"off".equals(value)) {
CaffeineSpec.parse(value);
}
return true;
} catch (Exception e) {
return false;
}
});
return "off".equals(spec) ? null : spec;
}
private static ExceptionVerbosity exceptionLoggingMode(String name, String defaultValue) {
final String mode = getNormalized(name, defaultValue,
value -> Arrays.stream(ExceptionVerbosity.values())
.anyMatch(v -> v.name().equalsIgnoreCase(value)));
return ExceptionVerbosity.valueOf(mode.toUpperCase());
}
private static boolean getBoolean(String name, boolean defaultValue) {
return getBoolean(name, defaultValue, value -> true);
}
private static boolean getBoolean(String name, boolean defaultValue, Predicate<Boolean> validator) {
return "true".equals(getNormalized(name, String.valueOf(defaultValue), value -> {
if ("true".equals(value)) {
return validator.test(true);
}
if ("false".equals(value)) {
return validator.test(false);
}
return false;
}));
}
private static int getInt(String name, int defaultValue, IntPredicate validator) {
return Integer.parseInt(getNormalized(name, String.valueOf(defaultValue), value -> {
try {
return validator.test(Integer.parseInt(value));
} catch (Exception e) {
// null or non-integer
return false;
}
}));
}
private static long getLong(String name, long defaultValue, LongPredicate validator) {
return Long.parseLong(getNormalized(name, String.valueOf(defaultValue), value -> {
try {
return validator.test(Long.parseLong(value));
} catch (Exception e) {
// null or non-integer
return false;
}
}));
}
private static String get(String name, String defaultValue, Predicate<String> validator) {
final String fullName = PREFIX + name;
final String value = System.getProperty(fullName);
if (value == null) {
logger.info("{}: {} (default)", fullName, defaultValue);
return defaultValue;
}
if (validator.test(value)) {
logger.info("{}: {}", fullName, value);
return value;
}
logger.info("{}: {} (default instead of: {})", fullName, defaultValue, value);
return defaultValue;
}
private static String getNormalized(String name, String defaultValue, Predicate<String> validator) {
final String fullName = PREFIX + name;
final String value = getLowerCased(fullName);
if (value == null) {
logger.info("{}: {} (default)", fullName, defaultValue);
return defaultValue;
}
if (validator.test(value)) {
logger.info("{}: {}", fullName, value);
return value;
}
logger.info("{}: {} (default instead of: {})", fullName, defaultValue, value);
return defaultValue;
}
@Nullable
private static String getLowerCased(String fullName) {
String value = System.getProperty(fullName);
if (value != null) {
value = Ascii.toLowerCase(value);
}
return value;
}
private Flags() {}
}
|
package com.transcendensoft.hedbanz.utils;
/**
* Copyright 2017. Andrii Chernysh
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.security.MessageDigest;
import timber.log.Timber;
/**
* Class for hashing passwords
*
* @author Andrii Chernysh. E-mail: itcherry97@gmail.com
* Developed by <u>Transcendensoft</u>
*/
public class SecurityUtils {
public static String hash(String password) {
try{
MessageDigest digest = MessageDigest.getInstance("SHA-256");
byte[] hash = digest.digest(password.getBytes("UTF-8"));
StringBuffer hexString = new StringBuffer();
for (int i = 0; i < hash.length; i++) {
String hex = Integer.toHexString(0xff & hash[i]);
if(hex.length() == 1) hexString.append('0');
hexString.append(hex);
}
return hexString.toString();
} catch(Exception ex){
Timber.e(ex);
throw new RuntimeException(ex);
}
}
}
|
package DataStructures.DynamicArray;
import java.util.Arrays;
import java.util.ConcurrentModificationException;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.Objects;
import java.util.function.Consumer;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
public class DynamicArray<E> implements Iterable<E> {
private int capacity = 10;
private int size = 0;
private Object[] elements;
public DynamicArray(final int capacity) {
this.capacity = capacity;
this.elements = new Object[this.capacity];
}
public DynamicArray() {
this.elements = new Object[this.capacity];
}
public int newCapacity() {
this.capacity <<= 1;
return this.capacity;
}
public void add(final E element) {
if (this.size == this.elements.length)
this.elements = Arrays.copyOf(this.elements, newCapacity());
this.elements[this.size] = element;
size++;
}
public void put(final int index, E element) {
// Objects.checkIndex(index, this.size);
this.elements[index] = element;
}
public E get(final int index) {
return getElement(index);
}
public E remove(final int index) {
final E oldElement = getElement(index);
fastRemove(this.elements, index);
return oldElement;
}
public int size() {
return this.size;
}
public boolean isEmpty() {
return this.size == 0;
}
public Stream<E> stream() {
return StreamSupport.stream(spliterator(), false);
}
private void fastRemove(final Object[] elements, final int index) {
final int newSize = this.size - 1;
if (newSize > index)
System.arraycopy(elements, index + 1, elements, index, newSize - index);
elements[this.size = newSize] = null;
}
private E getElement(final int index) {
// Objects.checkIndex(index, this.size);
return (E) this.elements[index];
}
@Override
public String toString() {
return Arrays.toString(Arrays.stream(this.elements).filter(Objects::nonNull).toArray());
}
@Override
public Iterator iterator() {
return new DynamicArrayIterator();
}
private class DynamicArrayIterator implements Iterator<E> {
private int cursor;
@Override
public boolean hasNext() {
return this.cursor != size;
}
@Override
public E next() {
if (this.cursor > DynamicArray.this.size) throw new NoSuchElementException();
if (this.cursor > DynamicArray.this.elements.length) throw new ConcurrentModificationException();
final E element = DynamicArray.this.getElement(this.cursor);
this.cursor++;
return element;
}
@Override
public void remove() {
if (this.cursor < 0) throw new IllegalStateException();
DynamicArray.this.remove(this.cursor);
this.cursor--;
}
@Override
public void forEachRemaining(Consumer<? super E> action) {
Objects.requireNonNull(action);
for (int i = 0; i < DynamicArray.this.size; i++) {
action.accept(DynamicArray.this.getElement(i));
}
}
}
public static void main(String[] args) {
DynamicArray<String> names = new DynamicArray<>();
names.add("Peubes");
names.add("Marley");
for (String name : names) {
System.out.println(name);
}
names.stream().forEach(System.out::println);
System.out.println(names);
System.out.println(names.size());
names.remove(0);
for (String name : names) {
System.out.println(name);
}
}
}
|
package ch.skyfy.advancedwild.impl.cppi;
import ch.skyfy.advancedwild.impl.WildImplConfig;
public final class ClassicPerPlayerImplConfig implements WildImplConfig {
private static final Long defaultDelayBetweenWild = 60_000L;
private static final int defaultMaxWild = 5;
private static final int defaultMin = -10_000;
private static final int defaultMax = 10_000;
public final Long delayBetweenWild;
public final int maximumWild;
public final int min;
public final int max;
public ClassicPerPlayerImplConfig(Long delayBetweenWild, int maximumWild, int min, int max) {
this.delayBetweenWild = delayBetweenWild;
this.maximumWild = maximumWild;
this.min = min;
this.max = max;
}
@SuppressWarnings("unused")
public ClassicPerPlayerImplConfig() { // Return the defaultConfiguration
this(defaultDelayBetweenWild, defaultMaxWild, defaultMin, defaultMax);
}
@Override
public boolean isValid() {
return true;
}
}
|
/**
* Copyright ${license.git.copyrightYears} the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mybatis.spring.batch.builder;
import java.util.Map;
import java.util.Optional;
import org.apache.ibatis.session.SqlSessionFactory;
import org.mybatis.spring.batch.MyBatisPagingItemReader;
/**
* A builder for the {@link MyBatisPagingItemReader}.
*
* @author Kazuki Shimizu
* @since 2.0.0
* @see MyBatisPagingItemReader
*/
public class MyBatisPagingItemReaderBuilder<T> {
private SqlSessionFactory sqlSessionFactory;
private String queryId;
private Map<String, Object> parameterValues;
private Integer pageSize;
private Boolean saveState;
private Integer maxItemCount;
/**
* Set the {@link SqlSessionFactory} to be used by writer for database access.
*
* @param sqlSessionFactory
* the {@link SqlSessionFactory} to be used by writer for database access
* @return this instance for method chaining
* @see MyBatisPagingItemReader#setSqlSessionFactory(SqlSessionFactory)
*/
public MyBatisPagingItemReaderBuilder<T> sqlSessionFactory(SqlSessionFactory sqlSessionFactory) {
this.sqlSessionFactory = sqlSessionFactory;
return this;
}
/**
* Set the query id identifying the statement in the SqlMap configuration file.
*
* @param queryId
* the id for the query
* @return this instance for method chaining
* @see MyBatisPagingItemReader#setQueryId(String)
*/
public MyBatisPagingItemReaderBuilder<T> queryId(String queryId) {
this.queryId = queryId;
return this;
}
/**
* Set the parameter values to be used for the query execution.
*
* @param parameterValues
* the parameter values to be used for the query execution
* @return this instance for method chaining
* @see MyBatisPagingItemReader#setParameterValues(Map)
*/
public MyBatisPagingItemReaderBuilder<T> parameterValues(Map<String, Object> parameterValues) {
this.parameterValues = parameterValues;
return this;
}
/**
* The number of records to request per page/query. Defaults to 10. Must be greater than zero.
*
* @param pageSize
* number of items
* @return this instance for method chaining
* @see org.springframework.batch.item.database.AbstractPagingItemReader#setPageSize(int)
*/
public MyBatisPagingItemReaderBuilder<T> pageSize(int pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* Configure if the state of the {@link org.springframework.batch.item.ItemStreamSupport} should be persisted within
* the {@link org.springframework.batch.item.ExecutionContext} for restart purposes.
*
* @param saveState
* defaults to true
* @return The current instance of the builder.
* @see org.springframework.batch.item.support.AbstractItemCountingItemStreamItemReader#setSaveState(boolean)
*/
public MyBatisPagingItemReaderBuilder<T> saveState(boolean saveState) {
this.saveState = saveState;
return this;
}
/**
* Configure the max number of items to be read.
*
* @param maxItemCount
* the max items to be read
* @return The current instance of the builder.
* @see org.springframework.batch.item.support.AbstractItemCountingItemStreamItemReader#setMaxItemCount(int)
*/
public MyBatisPagingItemReaderBuilder<T> maxItemCount(int maxItemCount) {
this.maxItemCount = maxItemCount;
return this;
}
/**
* Returns a fully built {@link MyBatisPagingItemReader}.
*
* @return the reader
*/
public MyBatisPagingItemReader<T> build() {
MyBatisPagingItemReader<T> reader = new MyBatisPagingItemReader<>();
reader.setSqlSessionFactory(this.sqlSessionFactory);
reader.setQueryId(this.queryId);
reader.setParameterValues(this.parameterValues);
Optional.ofNullable(this.pageSize).ifPresent(reader::setPageSize);
Optional.ofNullable(this.saveState).ifPresent(reader::setSaveState);
Optional.ofNullable(this.maxItemCount).ifPresent(reader::setMaxItemCount);
return reader;
}
}
|
package io.nuls.crosschain.nuls.rpc.call;
import io.nuls.base.RPCUtil;
import io.nuls.base.basic.AddressTool;
import io.nuls.base.data.MultiSigAccount;
import io.nuls.base.signture.P2PHKSignature;
import io.nuls.core.exception.NulsException;
import io.nuls.core.rpc.info.Constants;
import io.nuls.core.rpc.model.ModuleE;
import io.nuls.crosschain.nuls.utils.CommonUtil;
import java.util.HashMap;
import java.util.Map;
import static io.nuls.crosschain.nuls.constant.NulsCrossChainConstant.INIT_CAPACITY_8;
import static io.nuls.crosschain.nuls.constant.NulsCrossChainConstant.RPC_VERSION;
/**
* 与账户模块交互类
* Interaction class with account module
* @author tag
* 2019/4/10
*/
public class AccountCall {
/**
* 查询地址私钥
* Query address private key
*/
public static String getPrikey(String address, String password) throws NulsException {
try {
int chainId = AddressTool.getChainIdByAddress(address);
Map<String, Object> params = new HashMap<>(INIT_CAPACITY_8);
params.put(Constants.VERSION_KEY_STR, RPC_VERSION);
params.put(Constants.CHAIN_ID, chainId);
params.put("address", address);
params.put("password", password);
HashMap result = (HashMap) CommonCall.request(ModuleE.AC.abbr, "ac_getPriKeyByAddress", params);
return (String) result.get("priKey");
} catch (Exception e) {
throw new NulsException(e);
}
}
/**
* 查询地址是否加密
* Is address Encrypted
*/
public static boolean isEncrypted(String address) throws NulsException {
try {
int chainId = AddressTool.getChainIdByAddress(address);
Map<String, Object> params = new HashMap<>(INIT_CAPACITY_8);
params.put(Constants.VERSION_KEY_STR, RPC_VERSION);
params.put(Constants.CHAIN_ID, chainId);
params.put("address", address);
HashMap result = (HashMap) CommonCall.request(ModuleE.AC.abbr, "ac_isEncrypted", params);
return (boolean) result.get("value");
} catch (Exception e) {
throw new NulsException(e);
}
}
/**
* 查询多签账户
* Query multi-sign account
*
* @param multiSignAddress
* @return
*/
public static MultiSigAccount getMultiSigAccount(byte[] multiSignAddress) throws NulsException {
try {
String address = AddressTool.getStringAddressByBytes(multiSignAddress);
int chainId = AddressTool.getChainIdByAddress(address);
Map<String, Object> params = new HashMap<>(INIT_CAPACITY_8);
params.put(Constants.VERSION_KEY_STR, RPC_VERSION);
params.put(Constants.CHAIN_ID, chainId);
params.put("address", address);
HashMap result = (HashMap) CommonCall.request(ModuleE.AC.abbr, "ac_getMultiSigAccount", params);
String mAccountStr = (String) result.get("value");
return null == mAccountStr ? null : CommonUtil.getInstanceRpcStr(mAccountStr, MultiSigAccount.class);
} catch (Exception e) {
throw new NulsException(e);
}
}
/**
* 通过账户模块对数据进行签名
* @param address
* @param password
* @param data 待签名的数据
* @return P2PHKSignature
* @throws NulsException
*/
public static P2PHKSignature signDigest(String address, String password, byte[] data) throws NulsException {
try {
int chainId = AddressTool.getChainIdByAddress(address);
Map<String, Object> params = new HashMap<>(INIT_CAPACITY_8);
params.put(Constants.VERSION_KEY_STR, RPC_VERSION);
params.put(Constants.CHAIN_ID, chainId);
params.put("address", address);
params.put("password", password);
params.put("data", RPCUtil.encode(data));
HashMap result = (HashMap) CommonCall.request(ModuleE.AC.abbr, "ac_signDigest", params);
String signatureStr = (String)result.get("signature");
return CommonUtil.getInstanceRpcStr(signatureStr, P2PHKSignature.class);
} catch (Exception e) {
throw new NulsException(e);
}
}
}
|
import org.junit.Assert;
import org.junit.Test;
import tool.ListNode;
public class M00002_01sTest {
M00002_01s m00002_01s = new M00002_01s();
@Test
public void removeDuplicateNodes() {
Assert.assertEquals(String.valueOf(m00002_01s.removeDuplicateNodes(ListNode.deserialize("1, 2, 3, 3, 2, 1"))), "1,2,3");
Assert.assertEquals(String.valueOf(m00002_01s.removeDuplicateNodes(ListNode.deserialize("1, 1, 1, 1, 2"))), "1,2");
Assert.assertEquals(String.valueOf(m00002_01s.removeDuplicateNodes(ListNode.deserialize("1, 1, 1, 1"))), "1");
Assert.assertEquals(String.valueOf(m00002_01s.removeDuplicateNodes(ListNode.deserialize("1, 1"))), "1");
Assert.assertEquals(String.valueOf(m00002_01s.removeDuplicateNodes(ListNode.deserialize("1"))), "1");
}
}
|
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: google/logging/v2/logging_config.proto
package com.google.logging.v2;
public interface DeleteSinkRequestOrBuilder extends
// @@protoc_insertion_point(interface_extends:google.logging.v2.DeleteSinkRequest)
com.google.protobuf.MessageOrBuilder {
/**
* <pre>
* Required. The full resource name of the sink to delete, including the
* parent resource and the sink identifier:
* "projects/[PROJECT_ID]/sinks/[SINK_ID]"
* "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]"
* "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]"
* "folders/[FOLDER_ID]/sinks/[SINK_ID]"
* Example: `"projects/my-project-id/sinks/my-sink-id"`.
* </pre>
*
* <code>string sink_name = 1;</code>
*/
java.lang.String getSinkName();
/**
* <pre>
* Required. The full resource name of the sink to delete, including the
* parent resource and the sink identifier:
* "projects/[PROJECT_ID]/sinks/[SINK_ID]"
* "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]"
* "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]"
* "folders/[FOLDER_ID]/sinks/[SINK_ID]"
* Example: `"projects/my-project-id/sinks/my-sink-id"`.
* </pre>
*
* <code>string sink_name = 1;</code>
*/
com.google.protobuf.ByteString
getSinkNameBytes();
}
|
/*
* Copyright 2017 Google, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.spectator.api.Registry;
import com.netflix.spinnaker.cats.agent.AgentDataType;
import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials;
import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys;
import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind;
import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE;
import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE;
@Slf4j
public class KubernetesDaemonSetCachingAgent extends KubernetesV2OnDemandCachingAgent {
KubernetesDaemonSetCachingAgent(KubernetesNamedAccountCredentials<KubernetesV2Credentials> namedAccountCredentials,
ObjectMapper objectMapper,
Registry registry,
int agentIndex,
int agentCount) {
super(namedAccountCredentials, objectMapper, registry, agentIndex, agentCount);
}
@Getter
final private Collection<AgentDataType> providedDataTypes = Collections.unmodifiableSet(
new HashSet<>(Arrays.asList(
AUTHORITATIVE.forType(Keys.LogicalKind.APPLICATIONS.toString()),
INFORMATIVE.forType(Keys.LogicalKind.CLUSTERS.toString()),
INFORMATIVE.forType(KubernetesKind.DEPLOYMENT.toString()),
AUTHORITATIVE.forType(KubernetesKind.DAEMON_SET.toString())
))
);
@Override
protected boolean hasClusterRelationship() {
return true;
}
@Override
protected KubernetesKind primaryKind() {
return KubernetesKind.DAEMON_SET;
}
}
|
/* ===========================================================
* Orson Charts : a 3D chart library for the Java(tm) platform
* ===========================================================
*
* (C)opyright 2013-2016, by Object Refinery Limited. All rights reserved.
*
* http://www.object-refinery.com/orsoncharts/index.html
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* [Oracle and Java are registered trademarks of Oracle and/or its affiliates.
* Other names may be trademarks of their respective owners.]
*
* If you do not wish to be bound by the terms of the GPL, an alternative
* commercial license can be purchased. For details, please see visit the
* Orson Charts home page:
*
* http://www.object-refinery.com/orsoncharts/index.html
*
*/
package com.orsoncharts.data;
import java.util.List;
/**
* A two dimensional grid of data values where each value is uniquely
* identified by two keys (the {@code rowKey} and the
* {@code columnKey}). Any instance of {@code Comparable} can be
* used as a key ({@code String} objects are instances of
* {@code Comparable}, making them convenient key objects).
*
* @param <R> The row key type.
* @param <C> The column key type.
* @param <T> The value type.
*/
public interface KeyedValues2D<R extends Comparable<R>,
C extends Comparable<C>, T> extends Values2D<T> {
/**
* Returns the row key with the specified index.
*
* @param rowIndex the row index.
*
* @return The key.
*/
public R getRowKey(int rowIndex);
/**
* Returns the column key with the specified index.
*
* @param columnIndex the index.
*
* @return The key.
*/
public C getColumnKey(int columnIndex);
/**
* Returns the index of the specified key, or {@code -1} if there
* is no such key.
*
* @param rowKey the row key ({@code null} not permitted).
*
* @return The index, or {@code -1}.
*/
public int getRowIndex(R rowKey);
/**
* Returns the index of the specified key, or {@code -1} if there
* is no such key.
*
* @param columnKey the column key ({@code null} not permitted).
*
* @return The index, or {@code -1}.
*/
public int getColumnIndex(C columnKey);
/**
* Returns a list of the row keys (the order is significant, since data
* values can be accessed by index as well as by key).
* <br><br>
* NOTE: this method must be implemented so that modifications to the
* returned list do not impact the underlying data structure.
*
* @return A list of row keys.
*/
public List<R> getRowKeys();
/**
* Returns a list of the column keys (the order is significant, since data
* values can be accessed by index as well as by key).
* <br><br>
* NOTE: this method must be implemented so that modifications to the
* returned list do not impact the underlying data structure.
*
* @return A list of column keys.
*/
public List<C> getColumnKeys();
/**
* Returns the value (possibly {@code null}) associated with the
* specified keys. If either or both of the keys is not defined in this
* data structure, a runtime exception will be thrown.
*
* @param rowKey the row key ({@code null} not permitted).
* @param columnKey the column key ({@code null} not permitted).
*
* @return The value (possibly {@code null}).
*/
public T getValue(R rowKey, C columnKey);
}
|
/**
* generated by Xtext 2.9.2
*/
package xmodelica.ui;
import org.eclipse.ui.plugin.AbstractUIPlugin;
import org.eclipse.xtend.lib.annotations.FinalFieldsConstructor;
import xmodelica.ui.AbstractModelicaUiModule;
/**
* Use this class to register components to be used within the Eclipse IDE.
*/
@FinalFieldsConstructor
@SuppressWarnings("all")
public class ModelicaUiModule extends AbstractModelicaUiModule {
public ModelicaUiModule(final AbstractUIPlugin plugin) {
super(plugin);
}
}
|
package com.vosmann.springboottemplate.server;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.json.MetricsModule;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.context.embedded.EmbeddedServletContainerFactory;
import org.springframework.boot.context.embedded.tomcat.TomcatEmbeddedServletContainerFactory;
import org.springframework.context.annotation.Bean;
import java.util.concurrent.TimeUnit;
@SpringBootApplication
public class App {
private static final Logger LOG = LoggerFactory.getLogger(App.class);
@Value("${your.parameter}")
private String yourParameter;
@Bean
public NopService nopService() {
return new NopService();
}
@Bean
public MetricRegistry metricRegistry() {
final MetricRegistry registry = new MetricRegistry();
return registry;
}
@Bean
public ObjectMapper objectMapper() {
final ObjectMapper mapper = new ObjectMapper();
mapper.registerModule(new MetricsModule(TimeUnit.SECONDS, TimeUnit.MILLISECONDS, false));
LOG.info("Registered metrics module.");
return mapper;
}
@Bean
public EmbeddedServletContainerFactory servletContainer() {
final TomcatEmbeddedServletContainerFactory factory = new TomcatEmbeddedServletContainerFactory();
factory.setPort(10001);
LOG.info("Set port to 10001.");
return factory;
}
public static void main(final String[] args) throws Exception {
SpringApplication.run(App.class, args);
}
}
|
/*
* Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.internal.jmx.suppliers;
import com.hazelcast.map.IMap;
import com.hazelcast.map.LocalMapStats;
import com.hazelcast.internal.monitor.impl.LocalMapStatsImpl;
/**
* Implementation of {@link StatsSupplier} for {@link LocalMapStats}
*/
public class LocalMapStatsSupplier implements StatsSupplier<LocalMapStats> {
private final IMap map;
public LocalMapStatsSupplier(IMap map) {
this.map = map;
}
@Override
public LocalMapStats getEmpty() {
return new LocalMapStatsImpl();
}
@Override
public LocalMapStats get() {
return map.getLocalMapStats();
}
}
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.mledger.impl;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.apache.bookkeeper.mledger.ManagedLedgerException.getManagedLedgerException;
import static org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.DEFAULT_LEDGER_DELETE_BACKOFF_TIME_SEC;
import static org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.DEFAULT_LEDGER_DELETE_RETRIES;
import static org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.createManagedLedgerException;
import static org.apache.bookkeeper.mledger.util.Errors.isNoSuchLedgerExistsException;
import static org.apache.bookkeeper.mledger.util.SafeRun.safeRun;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.MoreObjects;
import com.google.common.base.Predicate;
import com.google.common.collect.Collections2;
import com.google.common.collect.ComparisonChain;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Range;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.RateLimiter;
import com.google.protobuf.InvalidProtocolBufferException;
import io.netty.util.concurrent.FastThreadLocal;
import java.time.Clock;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicLongFieldUpdater;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.bookkeeper.client.AsyncCallback.CloseCallback;
import org.apache.bookkeeper.client.AsyncCallback.DeleteCallback;
import org.apache.bookkeeper.client.AsyncCallback.OpenCallback;
import org.apache.bookkeeper.client.BKException;
import org.apache.bookkeeper.client.BookKeeper;
import org.apache.bookkeeper.client.LedgerEntry;
import org.apache.bookkeeper.client.LedgerHandle;
import org.apache.bookkeeper.client.api.BKException.Code;
import org.apache.bookkeeper.mledger.AsyncCallbacks;
import org.apache.bookkeeper.mledger.AsyncCallbacks.ClearBacklogCallback;
import org.apache.bookkeeper.mledger.AsyncCallbacks.FindEntryCallback;
import org.apache.bookkeeper.mledger.AsyncCallbacks.MarkDeleteCallback;
import org.apache.bookkeeper.mledger.AsyncCallbacks.ReadEntriesCallback;
import org.apache.bookkeeper.mledger.AsyncCallbacks.ReadEntryCallback;
import org.apache.bookkeeper.mledger.AsyncCallbacks.SkipEntriesCallback;
import org.apache.bookkeeper.mledger.Entry;
import org.apache.bookkeeper.mledger.ManagedCursor;
import org.apache.bookkeeper.mledger.ManagedLedger;
import org.apache.bookkeeper.mledger.ManagedLedgerConfig;
import org.apache.bookkeeper.mledger.ManagedLedgerException;
import org.apache.bookkeeper.mledger.ManagedLedgerException.CursorAlreadyClosedException;
import org.apache.bookkeeper.mledger.ManagedLedgerException.MetaStoreException;
import org.apache.bookkeeper.mledger.ManagedLedgerException.NoMoreEntriesToReadException;
import org.apache.bookkeeper.mledger.Position;
import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.PositionBound;
import org.apache.bookkeeper.mledger.impl.MetaStore.MetaStoreCallback;
import org.apache.bookkeeper.mledger.proto.MLDataFormats;
import org.apache.bookkeeper.mledger.proto.MLDataFormats.LongProperty;
import org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedCursorInfo;
import org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedLedgerInfo.LedgerInfo;
import org.apache.bookkeeper.mledger.proto.MLDataFormats.MessageRange;
import org.apache.bookkeeper.mledger.proto.MLDataFormats.PositionInfo;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.pulsar.common.util.collections.BitSetRecyclable;
import org.apache.pulsar.common.util.collections.ConcurrentOpenLongPairRangeSet;
import org.apache.pulsar.common.util.collections.LongPairRangeSet;
import org.apache.pulsar.common.util.collections.LongPairRangeSet.LongPairConsumer;
import org.apache.pulsar.metadata.api.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@SuppressWarnings("checkstyle:javadoctype")
public class ManagedCursorImpl implements ManagedCursor {
protected final BookKeeper bookkeeper;
protected final ManagedLedgerConfig config;
protected final ManagedLedgerImpl ledger;
private final String name;
private final BookKeeper.DigestType digestType;
protected volatile PositionImpl markDeletePosition;
protected static final AtomicReferenceFieldUpdater<ManagedCursorImpl, PositionImpl> READ_POSITION_UPDATER =
AtomicReferenceFieldUpdater.newUpdater(ManagedCursorImpl.class, PositionImpl.class, "readPosition");
protected volatile PositionImpl readPosition;
protected static final AtomicReferenceFieldUpdater<ManagedCursorImpl, MarkDeleteEntry> LAST_MARK_DELETE_ENTRY_UPDATER =
AtomicReferenceFieldUpdater.newUpdater(ManagedCursorImpl.class, MarkDeleteEntry.class, "lastMarkDeleteEntry");
protected volatile MarkDeleteEntry lastMarkDeleteEntry;
protected static final AtomicReferenceFieldUpdater<ManagedCursorImpl, OpReadEntry> WAITING_READ_OP_UPDATER =
AtomicReferenceFieldUpdater.newUpdater(ManagedCursorImpl.class, OpReadEntry.class, "waitingReadOp");
@SuppressWarnings("unused")
private volatile OpReadEntry waitingReadOp = null;
public static final int FALSE = 0;
public static final int TRUE = 1;
private static final AtomicIntegerFieldUpdater<ManagedCursorImpl> RESET_CURSOR_IN_PROGRESS_UPDATER =
AtomicIntegerFieldUpdater.newUpdater(ManagedCursorImpl.class, "resetCursorInProgress");
@SuppressWarnings("unused")
private volatile int resetCursorInProgress = FALSE;
private static final AtomicIntegerFieldUpdater<ManagedCursorImpl> PENDING_READ_OPS_UPDATER =
AtomicIntegerFieldUpdater.newUpdater(ManagedCursorImpl.class, "pendingReadOps");
@SuppressWarnings("unused")
private volatile int pendingReadOps = 0;
private static final AtomicLongFieldUpdater<ManagedCursorImpl> MSG_CONSUMED_COUNTER_UPDATER =
AtomicLongFieldUpdater.newUpdater(ManagedCursorImpl.class, "messagesConsumedCounter");
// This counters are used to compute the numberOfEntries and numberOfEntriesInBacklog values, without having to look
// at the list of ledgers in the ml. They are initialized to (-backlog) at opening, and will be incremented each
// time a message is read or deleted.
protected volatile long messagesConsumedCounter;
// Current ledger used to append the mark-delete position
private volatile LedgerHandle cursorLedger;
// Wether the current cursorLedger is read-only or writable
private boolean isCursorLedgerReadOnly = true;
// Stat of the cursor z-node
private volatile Stat cursorLedgerStat;
private static final LongPairConsumer<PositionImpl> positionRangeConverter = PositionImpl::new;
private static final LongPairConsumer<PositionImplRecyclable> recyclePositionRangeConverter = (key, value) -> {
PositionImplRecyclable position = PositionImplRecyclable.create();
position.ledgerId = key;
position.entryId = value;
return position;
};
private final LongPairRangeSet<PositionImpl> individualDeletedMessages;
// Maintain the deletion status for batch messages
// (ledgerId, entryId) -> deletion indexes
private final ConcurrentSkipListMap<PositionImpl, BitSetRecyclable> batchDeletedIndexes;
private final ReadWriteLock lock = new ReentrantReadWriteLock();
private RateLimiter markDeleteLimiter;
private boolean alwaysInactive = false;
/** used temporary variables to {@link #getNumIndividualDeletedEntriesToSkip(long)} **/
private static final FastThreadLocal<Long> tempTotalEntriesToSkip = new FastThreadLocal<>();
private static final FastThreadLocal<Long> tempDeletedMessages = new FastThreadLocal<>();
private static final FastThreadLocal<PositionImpl> tempStartPosition = new FastThreadLocal<>();
private static final FastThreadLocal<PositionImpl> tempEndPosition = new FastThreadLocal<>();
private static final long NO_MAX_SIZE_LIMIT = -1L;
private long entriesReadCount;
private long entriesReadSize;
class MarkDeleteEntry {
final PositionImpl newPosition;
final MarkDeleteCallback callback;
final Object ctx;
final Map<String, Long> properties;
// If the callbackGroup is set, it means this mark-delete request was done on behalf of a group of request (just
// persist the last one in the chain). In this case we need to trigger the callbacks for every request in the
// group.
List<MarkDeleteEntry> callbackGroup;
public MarkDeleteEntry(PositionImpl newPosition, Map<String, Long> properties,
MarkDeleteCallback callback, Object ctx) {
this.newPosition = newPosition;
this.properties = properties;
this.callback = callback;
this.ctx = ctx;
}
}
protected final ArrayDeque<MarkDeleteEntry> pendingMarkDeleteOps = new ArrayDeque<>();
private static final AtomicIntegerFieldUpdater<ManagedCursorImpl> PENDING_MARK_DELETED_SUBMITTED_COUNT_UPDATER =
AtomicIntegerFieldUpdater.newUpdater(ManagedCursorImpl.class, "pendingMarkDeletedSubmittedCount");
@SuppressWarnings("unused")
private volatile int pendingMarkDeletedSubmittedCount = 0;
private long lastLedgerSwitchTimestamp;
private final Clock clock;
// The last active time (Unix time, milliseconds) of the cursor
private long lastActive;
enum State {
Uninitialized, // Cursor is being initialized
NoLedger, // There is no metadata ledger open for writing
Open, // Metadata ledger is ready
SwitchingLedger, // The metadata ledger is being switched
Closing, // The managed cursor is closing
Closed // The managed cursor has been closed
}
private static final AtomicReferenceFieldUpdater<ManagedCursorImpl, State> STATE_UPDATER =
AtomicReferenceFieldUpdater.newUpdater(ManagedCursorImpl.class, State.class, "state");
protected volatile State state = null;
@SuppressWarnings("checkstyle:javadoctype")
public interface VoidCallback {
void operationComplete();
void operationFailed(ManagedLedgerException exception);
}
ManagedCursorImpl(BookKeeper bookkeeper, ManagedLedgerConfig config, ManagedLedgerImpl ledger, String cursorName) {
this.bookkeeper = bookkeeper;
this.config = config;
this.ledger = ledger;
this.name = cursorName;
this.individualDeletedMessages = config.isUnackedRangesOpenCacheSetEnabled()
? new ConcurrentOpenLongPairRangeSet<>(4096, positionRangeConverter)
: new LongPairRangeSet.DefaultRangeSet<>(positionRangeConverter);
if (config.isDeletionAtBatchIndexLevelEnabled()) {
this.batchDeletedIndexes = new ConcurrentSkipListMap<>();
} else {
this.batchDeletedIndexes = null;
}
this.digestType = BookKeeper.DigestType.fromApiDigestType(config.getDigestType());
STATE_UPDATER.set(this, State.Uninitialized);
PENDING_MARK_DELETED_SUBMITTED_COUNT_UPDATER.set(this, 0);
PENDING_READ_OPS_UPDATER.set(this, 0);
RESET_CURSOR_IN_PROGRESS_UPDATER.set(this, FALSE);
WAITING_READ_OP_UPDATER.set(this, null);
this.clock = config.getClock();
this.lastActive = this.clock.millis();
this.lastLedgerSwitchTimestamp = this.clock.millis();
if (config.getThrottleMarkDelete() > 0.0) {
markDeleteLimiter = RateLimiter.create(config.getThrottleMarkDelete());
} else {
// Disable mark-delete rate limiter
markDeleteLimiter = null;
}
}
@Override
public Map<String, Long> getProperties() {
return lastMarkDeleteEntry != null ? lastMarkDeleteEntry.properties : Collections.emptyMap();
}
/**
* Performs the initial recovery, reading the mark-deleted position from the ledger and then calling initialize to
* have a new opened ledger.
*/
void recover(final VoidCallback callback) {
// Read the meta-data ledgerId from the store
log.info("[{}] Recovering from bookkeeper ledger cursor: {}", ledger.getName(), name);
ledger.getStore().asyncGetCursorInfo(ledger.getName(), name, new MetaStoreCallback<ManagedCursorInfo>() {
@Override
public void operationComplete(ManagedCursorInfo info, Stat stat) {
cursorLedgerStat = stat;
lastActive = info.getLastActive() != 0 ? info.getLastActive() : lastActive;
if (info.getCursorsLedgerId() == -1L) {
// There is no cursor ledger to read the last position from. It means the cursor has been properly
// closed and the last mark-delete position is stored in the ManagedCursorInfo itself.
PositionImpl recoveredPosition = new PositionImpl(info.getMarkDeleteLedgerId(),
info.getMarkDeleteEntryId());
if (info.getIndividualDeletedMessagesCount() > 0) {
recoverIndividualDeletedMessages(info.getIndividualDeletedMessagesList());
}
Map<String, Long> recoveredProperties = Collections.emptyMap();
if (info.getPropertiesCount() > 0) {
// Recover properties map
recoveredProperties = Maps.newHashMap();
for (int i = 0; i < info.getPropertiesCount(); i++) {
LongProperty property = info.getProperties(i);
recoveredProperties.put(property.getName(), property.getValue());
}
}
recoveredCursor(recoveredPosition, recoveredProperties, null);
callback.operationComplete();
} else {
// Need to proceed and read the last entry in the specified ledger to find out the last position
log.info("[{}] Consumer {} meta-data recover from ledger {}", ledger.getName(), name,
info.getCursorsLedgerId());
recoverFromLedger(info, callback);
}
}
@Override
public void operationFailed(MetaStoreException e) {
callback.operationFailed(e);
}
});
}
protected void recoverFromLedger(final ManagedCursorInfo info, final VoidCallback callback) {
// Read the acknowledged position from the metadata ledger, then create
// a new ledger and write the position into it
ledger.mbean.startCursorLedgerOpenOp();
long ledgerId = info.getCursorsLedgerId();
OpenCallback openCallback = (rc, lh, ctx) -> {
if (log.isInfoEnabled()) {
log.info("[{}] Opened ledger {} for consumer {}. rc={}", ledger.getName(), ledgerId, name, rc);
}
if (isBkErrorNotRecoverable(rc)) {
log.error("[{}] Error opening metadata ledger {} for consumer {}: {}", ledger.getName(), ledgerId, name,
BKException.getMessage(rc));
// Rewind to oldest entry available
initialize(getRollbackPosition(info), Collections.emptyMap(), callback);
return;
} else if (rc != BKException.Code.OK) {
log.warn("[{}] Error opening metadata ledger {} for consumer {}: {}", ledger.getName(), ledgerId, name,
BKException.getMessage(rc));
callback.operationFailed(new ManagedLedgerException(BKException.getMessage(rc)));
return;
}
// Read the last entry in the ledger
long lastEntryInLedger = lh.getLastAddConfirmed();
if (lastEntryInLedger < 0) {
log.warn("[{}] Error reading from metadata ledger {} for consumer {}: No entries in ledger",
ledger.getName(), ledgerId, name);
// Rewind to last cursor snapshot available
initialize(getRollbackPosition(info), Collections.emptyMap(), callback);
return;
}
lh.asyncReadEntries(lastEntryInLedger, lastEntryInLedger, (rc1, lh1, seq, ctx1) -> {
if (log.isDebugEnabled()) {
log.debug("[{}} readComplete rc={} entryId={}", ledger.getName(), rc1, lh1.getLastAddConfirmed());
}
if (isBkErrorNotRecoverable(rc1)) {
log.error("[{}] Error reading from metadata ledger {} for consumer {}: {}", ledger.getName(),
ledgerId, name, BKException.getMessage(rc1));
// Rewind to oldest entry available
initialize(getRollbackPosition(info), Collections.emptyMap(), callback);
return;
} else if (rc1 != BKException.Code.OK) {
log.warn("[{}] Error reading from metadata ledger {} for consumer {}: {}", ledger.getName(),
ledgerId, name, BKException.getMessage(rc1));
callback.operationFailed(createManagedLedgerException(rc1));
return;
}
LedgerEntry entry = seq.nextElement();
PositionInfo positionInfo;
try {
positionInfo = PositionInfo.parseFrom(entry.getEntry());
} catch (InvalidProtocolBufferException e) {
callback.operationFailed(new ManagedLedgerException(e));
return;
}
Map<String, Long> recoveredProperties = Collections.emptyMap();
if (positionInfo.getPropertiesCount() > 0) {
// Recover properties map
recoveredProperties = Maps.newHashMap();
for (int i = 0; i < positionInfo.getPropertiesCount(); i++) {
LongProperty property = positionInfo.getProperties(i);
recoveredProperties.put(property.getName(), property.getValue());
}
}
PositionImpl position = new PositionImpl(positionInfo);
if (positionInfo.getIndividualDeletedMessagesCount() > 0) {
recoverIndividualDeletedMessages(positionInfo.getIndividualDeletedMessagesList());
}
if (config.isDeletionAtBatchIndexLevelEnabled() && batchDeletedIndexes != null
&& positionInfo.getBatchedEntryDeletionIndexInfoCount() > 0) {
recoverBatchDeletedIndexes(positionInfo.getBatchedEntryDeletionIndexInfoList());
}
recoveredCursor(position, recoveredProperties, lh);
callback.operationComplete();
}, null);
};
try {
bookkeeper.asyncOpenLedger(ledgerId, digestType, config.getPassword(), openCallback, null);
} catch (Throwable t) {
log.error("[{}] Encountered error on opening cursor ledger {} for cursor {}",
ledger.getName(), ledgerId, name, t);
openCallback.openComplete(BKException.Code.UnexpectedConditionException, null, null);
}
}
private void recoverIndividualDeletedMessages(List<MLDataFormats.MessageRange> individualDeletedMessagesList) {
lock.writeLock().lock();
try {
individualDeletedMessages.clear();
individualDeletedMessagesList.forEach(messageRange -> {
MLDataFormats.NestedPositionInfo lowerEndpoint = messageRange.getLowerEndpoint();
MLDataFormats.NestedPositionInfo upperEndpoint = messageRange.getUpperEndpoint();
if (lowerEndpoint.getLedgerId() == upperEndpoint.getLedgerId()) {
individualDeletedMessages.addOpenClosed(lowerEndpoint.getLedgerId(), lowerEndpoint.getEntryId(),
upperEndpoint.getLedgerId(), upperEndpoint.getEntryId());
} else {
// Store message ranges after splitting them by ledger ID
LedgerInfo lowerEndpointLedgerInfo = ledger.getLedgersInfo().get(lowerEndpoint.getLedgerId());
if (lowerEndpointLedgerInfo != null) {
individualDeletedMessages.addOpenClosed(lowerEndpoint.getLedgerId(), lowerEndpoint.getEntryId(),
lowerEndpoint.getLedgerId(), lowerEndpointLedgerInfo.getEntries() - 1);
} else {
log.warn("[{}][{}] No ledger info of lower endpoint {}:{}", ledger.getName(), name,
lowerEndpoint.getLedgerId(), lowerEndpoint.getEntryId());
}
for (LedgerInfo li : ledger.getLedgersInfo()
.subMap(lowerEndpoint.getLedgerId(), false, upperEndpoint.getLedgerId(), false).values()) {
individualDeletedMessages.addOpenClosed(li.getLedgerId(), -1, li.getLedgerId(),
li.getEntries() - 1);
}
individualDeletedMessages.addOpenClosed(upperEndpoint.getLedgerId(), -1,
upperEndpoint.getLedgerId(), upperEndpoint.getEntryId());
}
});
} finally {
lock.writeLock().unlock();
}
}
private void recoverBatchDeletedIndexes (List<MLDataFormats.BatchedEntryDeletionIndexInfo> batchDeletedIndexInfoList) {
lock.writeLock().lock();
try {
this.batchDeletedIndexes.clear();
batchDeletedIndexInfoList.forEach(batchDeletedIndexInfo -> {
if (batchDeletedIndexInfo.getDeleteSetCount() > 0) {
long[] array = new long[batchDeletedIndexInfo.getDeleteSetCount()];
for (int i = 0; i < batchDeletedIndexInfo.getDeleteSetList().size(); i++) {
array[i] = batchDeletedIndexInfo.getDeleteSetList().get(i);
}
this.batchDeletedIndexes.put(PositionImpl.get(batchDeletedIndexInfo.getPosition().getLedgerId(),
batchDeletedIndexInfo.getPosition().getEntryId()), BitSetRecyclable.create().resetWords(array));
}
});
} finally {
lock.writeLock().unlock();
}
}
private void recoveredCursor(PositionImpl position, Map<String, Long> properties,
LedgerHandle recoveredFromCursorLedger) {
// if the position was at a ledger that didn't exist (since it will be deleted if it was previously empty),
// we need to move to the next existing ledger
if (!ledger.ledgerExists(position.getLedgerId())) {
Long nextExistingLedger = ledger.getNextValidLedger(position.getLedgerId());
if (nextExistingLedger == null) {
log.info("[{}] [{}] Couldn't find next next valid ledger for recovery {}", ledger.getName(), name,
position);
}
position = nextExistingLedger != null ? PositionImpl.get(nextExistingLedger, -1) : position;
}
if (position.compareTo(ledger.getLastPosition()) > 0) {
log.warn("[{}] [{}] Current position {} is ahead of last position {}", ledger.getName(), name, position,
ledger.getLastPosition());
position = ledger.getLastPosition();
}
log.info("[{}] Cursor {} recovered to position {}", ledger.getName(), name, position);
messagesConsumedCounter = -getNumberOfEntries(Range.openClosed(position, ledger.getLastPosition()));
markDeletePosition = position;
readPosition = ledger.getNextValidPosition(position);
lastMarkDeleteEntry = new MarkDeleteEntry(markDeletePosition, properties, null, null);
// assign cursor-ledger so, it can be deleted when new ledger will be switched
this.cursorLedger = recoveredFromCursorLedger;
this.isCursorLedgerReadOnly = true;
STATE_UPDATER.set(this, State.NoLedger);
}
void initialize(PositionImpl position, Map<String, Long> properties, final VoidCallback callback) {
recoveredCursor(position, properties, null);
if (log.isDebugEnabled()) {
log.debug("[{}] Consumer {} cursor initialized with counters: consumed {} mdPos {} rdPos {}",
ledger.getName(), name, messagesConsumedCounter, markDeletePosition, readPosition);
}
createNewMetadataLedger(new VoidCallback() {
@Override
public void operationComplete() {
STATE_UPDATER.set(ManagedCursorImpl.this, State.Open);
callback.operationComplete();
}
@Override
public void operationFailed(ManagedLedgerException exception) {
callback.operationFailed(exception);
}
});
}
@Override
public List<Entry> readEntries(int numberOfEntriesToRead) throws InterruptedException, ManagedLedgerException {
checkArgument(numberOfEntriesToRead > 0);
final CountDownLatch counter = new CountDownLatch(1);
class Result {
ManagedLedgerException exception = null;
List<Entry> entries = null;
}
final Result result = new Result();
asyncReadEntries(numberOfEntriesToRead, new ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entries, Object ctx) {
result.entries = entries;
counter.countDown();
}
@Override
public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
result.exception = exception;
counter.countDown();
}
}, null);
counter.await();
if (result.exception != null) {
throw result.exception;
}
return result.entries;
}
@Override
public void asyncReadEntries(final int numberOfEntriesToRead, final ReadEntriesCallback callback,
final Object ctx) {
checkArgument(numberOfEntriesToRead > 0);
if (isClosed()) {
callback.readEntriesFailed(new ManagedLedgerException("Cursor was already closed"), ctx);
return;
}
PENDING_READ_OPS_UPDATER.incrementAndGet(this);
OpReadEntry op = OpReadEntry.create(this, readPosition, numberOfEntriesToRead, callback, ctx);
ledger.asyncReadEntries(op);
}
@Override
public Entry getNthEntry(int n, IndividualDeletedEntries deletedEntries)
throws InterruptedException, ManagedLedgerException {
final CountDownLatch counter = new CountDownLatch(1);
class Result {
ManagedLedgerException exception = null;
Entry entry = null;
}
final Result result = new Result();
asyncGetNthEntry(n, deletedEntries, new ReadEntryCallback() {
@Override
public void readEntryFailed(ManagedLedgerException exception, Object ctx) {
result.exception = exception;
counter.countDown();
}
@Override
public void readEntryComplete(Entry entry, Object ctx) {
result.entry = entry;
counter.countDown();
}
}, null);
counter.await(ledger.getConfig().getMetadataOperationsTimeoutSeconds(), TimeUnit.SECONDS);
if (result.exception != null) {
throw result.exception;
}
return result.entry;
}
@Override
public void asyncGetNthEntry(int n, IndividualDeletedEntries deletedEntries, ReadEntryCallback callback,
Object ctx) {
checkArgument(n > 0);
if (isClosed()) {
callback.readEntryFailed(new ManagedLedgerException("Cursor was already closed"), ctx);
return;
}
PositionImpl startPosition = ledger.getNextValidPosition(markDeletePosition);
PositionImpl endPosition = ledger.getLastPosition();
if (startPosition.compareTo(endPosition) <= 0) {
long numOfEntries = getNumberOfEntries(Range.closed(startPosition, endPosition));
if (numOfEntries >= n) {
long deletedMessages = 0;
if (deletedEntries == IndividualDeletedEntries.Exclude) {
deletedMessages = getNumIndividualDeletedEntriesToSkip(n);
}
PositionImpl positionAfterN = ledger.getPositionAfterN(markDeletePosition, n + deletedMessages,
PositionBound.startExcluded);
ledger.asyncReadEntry(positionAfterN, callback, ctx);
} else {
callback.readEntryComplete(null, ctx);
}
} else {
callback.readEntryComplete(null, ctx);
}
}
@Override
public List<Entry> readEntriesOrWait(int numberOfEntriesToRead)
throws InterruptedException, ManagedLedgerException {
return readEntriesOrWait(numberOfEntriesToRead, NO_MAX_SIZE_LIMIT);
}
@Override
public List<Entry> readEntriesOrWait(int numberOfEntriesToRead, long maxSizeBytes)
throws InterruptedException, ManagedLedgerException {
checkArgument(numberOfEntriesToRead > 0);
final CountDownLatch counter = new CountDownLatch(1);
class Result {
ManagedLedgerException exception = null;
List<Entry> entries = null;
}
final Result result = new Result();
asyncReadEntriesOrWait(numberOfEntriesToRead, maxSizeBytes, new ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entries, Object ctx) {
result.entries = entries;
counter.countDown();
}
@Override
public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
result.exception = exception;
counter.countDown();
}
}, null);
counter.await();
if (result.exception != null) {
throw result.exception;
}
return result.entries;
}
@Override
public void asyncReadEntriesOrWait(int numberOfEntriesToRead, ReadEntriesCallback callback, Object ctx) {
asyncReadEntriesOrWait(numberOfEntriesToRead, NO_MAX_SIZE_LIMIT, callback, ctx);
}
@Override
public void asyncReadEntriesOrWait(int maxEntries, long maxSizeBytes, ReadEntriesCallback callback, Object ctx) {
checkArgument(maxEntries > 0);
if (isClosed()) {
callback.readEntriesFailed(new CursorAlreadyClosedException("Cursor was already closed"), ctx);
return;
}
int numberOfEntriesToRead = applyMaxSizeCap(maxEntries, maxSizeBytes);
if (hasMoreEntries()) {
// If we have available entries, we can read them immediately
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Read entries immediately", ledger.getName(), name);
}
asyncReadEntries(numberOfEntriesToRead, callback, ctx);
} else {
OpReadEntry op = OpReadEntry.create(this, readPosition, numberOfEntriesToRead, callback,
ctx);
if (!WAITING_READ_OP_UPDATER.compareAndSet(this, null, op)) {
callback.readEntriesFailed(new ManagedLedgerException("We can only have a single waiting callback"),
ctx);
return;
}
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Deferring retry of read at position {}", ledger.getName(), name, op.readPosition);
}
// Check again for new entries again in 10ms, then if still no entries are available register to be notified
ledger.getScheduledExecutor().schedule(safeRun(() -> {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Re-trying the read at position {}", ledger.getName(), name, op.readPosition);
}
if (!hasMoreEntries()) {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Still no entries available. Register for notification", ledger.getName(),
name);
}
// Let the managed ledger know we want to be notified whenever a new entry is published
ledger.waitingCursors.add(this);
} else {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Skip notification registering since we do have entries available",
ledger.getName(), name);
}
}
// Check again the entries count, since an entry could have been written between the time we
// checked and the time we've asked to be notified by managed ledger
if (hasMoreEntries()) {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Found more entries", ledger.getName(), name);
}
// Try to cancel the notification request
if (WAITING_READ_OP_UPDATER.compareAndSet(this, op, null)) {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Cancelled notification and scheduled read at {}", ledger.getName(),
name, op.readPosition);
}
PENDING_READ_OPS_UPDATER.incrementAndGet(this);
ledger.asyncReadEntries(op);
} else {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] notification was already cancelled", ledger.getName(), name);
}
}
} else if (ledger.isTerminated()) {
// At this point we registered for notification and still there were no more available
// entries.
// If the managed ledger was indeed terminated, we need to notify the cursor
callback.readEntriesFailed(new NoMoreEntriesToReadException("Topic was terminated"), ctx);
}
}), config.getNewEntriesCheckDelayInMillis(), TimeUnit.MILLISECONDS);
}
}
private boolean isClosed() {
return state == State.Closed || state == State.Closing;
}
@Override
public boolean cancelPendingReadRequest() {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Cancel pending read request", ledger.getName(), name);
}
return WAITING_READ_OP_UPDATER.getAndSet(this, null) != null;
}
public boolean hasPendingReadRequest() {
return WAITING_READ_OP_UPDATER.get(this) != null;
}
@Override
public boolean hasMoreEntries() {
// If writer and reader are on the same ledger, we just need to compare the entry id to know if we have more
// entries.
// If they are on different ledgers we have 2 cases :
// * Writer pointing to valid entry --> should return true since we have available entries
// * Writer pointing to "invalid" entry -1 (meaning no entries in that ledger) --> Need to check if the reader
// is
// at the last entry in the previous ledger
PositionImpl writerPosition = ledger.getLastPosition();
if (writerPosition.getEntryId() != -1) {
return readPosition.compareTo(writerPosition) <= 0;
} else {
// Fall back to checking the number of entries to ensure we are at the last entry in ledger and no ledgers
// are in the middle
return getNumberOfEntries() > 0;
}
}
@Override
public long getNumberOfEntries() {
if (readPosition.compareTo(ledger.getLastPosition().getNext()) > 0) {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Read position {} is ahead of last position {}. There are no entries to read",
ledger.getName(), name, readPosition, ledger.getLastPosition());
}
return 0;
} else {
return getNumberOfEntries(Range.closedOpen(readPosition, ledger.getLastPosition().getNext()));
}
}
@Override
public long getNumberOfEntriesSinceFirstNotAckedMessage() {
// sometimes for already caught up consumer: due to race condition markDeletePosition > readPosition. so,
// validate it before preparing range
PositionImpl markDeletePosition = this.markDeletePosition;
PositionImpl readPosition = this.readPosition;
return (markDeletePosition != null && readPosition != null && markDeletePosition.compareTo(readPosition) < 0)
? ledger.getNumberOfEntries(Range.openClosed(markDeletePosition, readPosition))
: 0;
}
@Override
public int getTotalNonContiguousDeletedMessagesRange() {
return individualDeletedMessages.size();
}
@Override
public long getEstimatedSizeSinceMarkDeletePosition() {
return ledger.estimateBacklogFromPosition(markDeletePosition);
}
@Override
public long getNumberOfEntriesInBacklog(boolean isPrecise) {
if (log.isDebugEnabled()) {
log.debug("[{}] Consumer {} cursor ml-entries: {} -- deleted-counter: {} other counters: mdPos {} rdPos {}",
ledger.getName(), name, ManagedLedgerImpl.ENTRIES_ADDED_COUNTER_UPDATER.get(ledger),
messagesConsumedCounter, markDeletePosition, readPosition);
}
if (isPrecise) {
return getNumberOfEntries(Range.closed(markDeletePosition, ledger.getLastPosition())) - 1;
}
long backlog = ManagedLedgerImpl.ENTRIES_ADDED_COUNTER_UPDATER.get(ledger) - messagesConsumedCounter;
if (backlog < 0) {
// In some case the counters get incorrect values, fall back to the precise backlog count
backlog = getNumberOfEntries(Range.closed(markDeletePosition, ledger.getLastPosition())) - 1;
}
return backlog;
}
public long getNumberOfEntriesInStorage() {
return ledger.getNumberOfEntries(Range.openClosed(markDeletePosition, ledger.getLastPosition().getNext()));
}
@Override
public Position findNewestMatching(Predicate<Entry> condition) throws InterruptedException, ManagedLedgerException {
return findNewestMatching(FindPositionConstraint.SearchActiveEntries, condition);
}
@Override
public Position findNewestMatching(FindPositionConstraint constraint, Predicate<Entry> condition) throws InterruptedException, ManagedLedgerException {
final CountDownLatch counter = new CountDownLatch(1);
class Result {
ManagedLedgerException exception = null;
Position position = null;
}
final Result result = new Result();
asyncFindNewestMatching(constraint, condition, new FindEntryCallback() {
@Override
public void findEntryComplete(Position position, Object ctx) {
result.position = position;
counter.countDown();
}
@Override
public void findEntryFailed(ManagedLedgerException exception, Optional<Position> failedReadPosition,
Object ctx) {
result.exception = exception;
counter.countDown();
}
}, null);
counter.await();
if (result.exception != null) {
throw result.exception;
}
return result.position;
}
@Override
public void asyncFindNewestMatching(FindPositionConstraint constraint, Predicate<Entry> condition,
FindEntryCallback callback, Object ctx) {
OpFindNewest op;
PositionImpl startPosition = null;
long max = 0;
switch (constraint) {
case SearchAllAvailableEntries:
startPosition = (PositionImpl) getFirstPosition();
max = ledger.getNumberOfEntries() - 1;
break;
case SearchActiveEntries:
startPosition = ledger.getNextValidPosition(markDeletePosition);
max = getNumberOfEntriesInStorage();
break;
default:
callback.findEntryFailed(new ManagedLedgerException("Unknown position constraint"), Optional.empty(), ctx);
return;
}
if (startPosition == null) {
callback.findEntryFailed(new ManagedLedgerException("Couldn't find start position"),
Optional.empty(), ctx);
return;
}
op = new OpFindNewest(this, startPosition, condition, max, callback, ctx);
op.find();
}
@Override
public void setActive() {
if (!alwaysInactive) {
ledger.activateCursor(this);
}
}
@Override
public boolean isActive() {
return ledger.isCursorActive(this);
}
@Override
public void setInactive() {
ledger.deactivateCursor(this);
}
@Override
public void setAlwaysInactive() {
setInactive();
this.alwaysInactive = true;
}
@Override
public Position getFirstPosition() {
Long firstLedgerId = ledger.getLedgersInfo().firstKey();
return firstLedgerId == null ? null : new PositionImpl(firstLedgerId, 0);
}
protected void internalResetCursor(PositionImpl position, AsyncCallbacks.ResetCursorCallback resetCursorCallback) {
if (position.equals(PositionImpl.earliest)) {
position = ledger.getFirstPosition();
} else if (position.equals(PositionImpl.latest)) {
position = ledger.getLastPosition().getNext();
}
log.info("[{}] Initiate reset position to {} on cursor {}", ledger.getName(), position, name);
synchronized (pendingMarkDeleteOps) {
if (!RESET_CURSOR_IN_PROGRESS_UPDATER.compareAndSet(this, FALSE, TRUE)) {
log.error("[{}] reset requested - position [{}], previous reset in progress - cursor {}",
ledger.getName(), position, name);
resetCursorCallback.resetFailed(
new ManagedLedgerException.ConcurrentFindCursorPositionException("reset already in progress"),
position);
}
}
final AsyncCallbacks.ResetCursorCallback callback = resetCursorCallback;
final PositionImpl newPosition = position;
VoidCallback finalCallback = new VoidCallback() {
@Override
public void operationComplete() {
// modify mark delete and read position since we are able to persist new position for cursor
lock.writeLock().lock();
try {
PositionImpl newMarkDeletePosition = ledger.getPreviousPosition(newPosition);
if (markDeletePosition.compareTo(newMarkDeletePosition) >= 0) {
MSG_CONSUMED_COUNTER_UPDATER.addAndGet(cursorImpl(), -getNumberOfEntries(
Range.closedOpen(newMarkDeletePosition, markDeletePosition)));
} else {
MSG_CONSUMED_COUNTER_UPDATER.addAndGet(cursorImpl(), getNumberOfEntries(
Range.closedOpen(markDeletePosition, newMarkDeletePosition)));
}
markDeletePosition = newMarkDeletePosition;
lastMarkDeleteEntry = new MarkDeleteEntry(newMarkDeletePosition, Collections.emptyMap(),
null, null);
individualDeletedMessages.clear();
if (config.isDeletionAtBatchIndexLevelEnabled() && batchDeletedIndexes != null) {
batchDeletedIndexes.values().forEach(BitSetRecyclable::recycle);
batchDeletedIndexes.clear();
}
PositionImpl oldReadPosition = readPosition;
if (oldReadPosition.compareTo(newPosition) >= 0) {
log.info("[{}] reset position to {} before current read position {} on cursor {}",
ledger.getName(), newPosition, oldReadPosition, name);
} else {
log.info("[{}] reset position to {} skipping from current read position {} on cursor {}",
ledger.getName(), newPosition, oldReadPosition, name);
}
readPosition = newPosition;
} finally {
lock.writeLock().unlock();
}
synchronized (pendingMarkDeleteOps) {
pendingMarkDeleteOps.clear();
if (!RESET_CURSOR_IN_PROGRESS_UPDATER.compareAndSet(ManagedCursorImpl.this, TRUE, FALSE)) {
log.error("[{}] expected reset position [{}], but another reset in progress on cursor {}",
ledger.getName(), newPosition, name);
}
}
callback.resetComplete(newPosition);
}
@Override
public void operationFailed(ManagedLedgerException exception) {
synchronized (pendingMarkDeleteOps) {
if (!RESET_CURSOR_IN_PROGRESS_UPDATER.compareAndSet(ManagedCursorImpl.this, TRUE, FALSE)) {
log.error("[{}] expected reset position [{}], but another reset in progress on cursor {}",
ledger.getName(), newPosition, name);
}
}
callback.resetFailed(new ManagedLedgerException.InvalidCursorPositionException(
"unable to persist position for cursor reset " + newPosition.toString()), newPosition);
}
};
internalAsyncMarkDelete(newPosition, Collections.emptyMap(), new MarkDeleteCallback() {
@Override
public void markDeleteComplete(Object ctx) {
finalCallback.operationComplete();
}
@Override
public void markDeleteFailed(ManagedLedgerException exception, Object ctx) {
finalCallback.operationFailed(exception);
}
}, null);
}
@Override
public void asyncResetCursor(Position newPos, AsyncCallbacks.ResetCursorCallback callback) {
checkArgument(newPos instanceof PositionImpl);
final PositionImpl newPosition = (PositionImpl) newPos;
// order trim and reset operations on a ledger
ledger.getExecutor().executeOrdered(ledger.getName(), safeRun(() -> {
PositionImpl actualPosition = newPosition;
if (!ledger.isValidPosition(actualPosition) &&
!actualPosition.equals(PositionImpl.earliest) &&
!actualPosition.equals(PositionImpl.latest)) {
actualPosition = ledger.getNextValidPosition(actualPosition);
if (actualPosition == null) {
// next valid position would only return null when newPos
// is larger than all available positions, then it's latest in effect.
actualPosition = PositionImpl.latest;
}
}
internalResetCursor(actualPosition, callback);
}));
}
@Override
public void resetCursor(Position newPos) throws ManagedLedgerException, InterruptedException {
class Result {
ManagedLedgerException exception = null;
}
final Result result = new Result();
final CountDownLatch counter = new CountDownLatch(1);
asyncResetCursor(newPos, new AsyncCallbacks.ResetCursorCallback() {
@Override
public void resetComplete(Object ctx) {
counter.countDown();
}
@Override
public void resetFailed(ManagedLedgerException exception, Object ctx) {
result.exception = exception;
counter.countDown();
}
});
if (!counter.await(ManagedLedgerImpl.AsyncOperationTimeoutSeconds, TimeUnit.SECONDS)) {
if (result.exception != null) {
log.warn("[{}] Reset cursor to {} on cursor {} timed out with exception {}", ledger.getName(), newPos,
name, result.exception);
}
throw new ManagedLedgerException("Timeout during reset cursor");
}
if (result.exception != null) {
throw result.exception;
}
}
@Override
public List<Entry> replayEntries(Set<? extends Position> positions)
throws InterruptedException, ManagedLedgerException {
final CountDownLatch counter = new CountDownLatch(1);
class Result {
ManagedLedgerException exception = null;
List<Entry> entries = null;
}
final Result result = new Result();
asyncReplayEntries(positions, new ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entries, Object ctx) {
result.entries = entries;
counter.countDown();
}
@Override
public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
result.exception = exception;
counter.countDown();
}
}, null);
counter.await();
if (result.exception != null) {
throw result.exception;
}
return result.entries;
}
/**
* Async replays given positions: a. before reading it filters out already-acked messages b. reads remaining entries
* async and gives it to given ReadEntriesCallback c. returns all already-acked messages which are not replayed so,
* those messages can be removed by caller(Dispatcher)'s replay-list and it won't try to replay it again
*
*/
@Override
public Set<? extends Position> asyncReplayEntries(final Set<? extends Position> positions,
ReadEntriesCallback callback, Object ctx) {
return asyncReplayEntries(positions, callback, ctx, false);
}
@Override
public Set<? extends Position> asyncReplayEntries(Set<? extends Position> positions,
ReadEntriesCallback callback, Object ctx, boolean sortEntries) {
List<Entry> entries = Lists.newArrayListWithExpectedSize(positions.size());
if (positions.isEmpty()) {
callback.readEntriesComplete(entries, ctx);
return Collections.emptySet();
}
// filters out messages which are already acknowledged
Set<Position> alreadyAcknowledgedPositions = Sets.newHashSet();
lock.readLock().lock();
try {
positions.stream()
.filter(position -> individualDeletedMessages.contains(((PositionImpl) position).getLedgerId(),
((PositionImpl) position).getEntryId())
|| ((PositionImpl) position).compareTo(markDeletePosition) <= 0)
.forEach(alreadyAcknowledgedPositions::add);
} finally {
lock.readLock().unlock();
}
final int totalValidPositions = positions.size() - alreadyAcknowledgedPositions.size();
final AtomicReference<ManagedLedgerException> exception = new AtomicReference<>();
ReadEntryCallback cb = new ReadEntryCallback() {
int pendingCallbacks = totalValidPositions;
@Override
public synchronized void readEntryComplete(Entry entry, Object ctx) {
if (exception.get() != null) {
// if there is already a failure for a different position, we should release the entry straight away
// and not add it to the list
entry.release();
if (--pendingCallbacks == 0) {
callback.readEntriesFailed(exception.get(), ctx);
}
} else {
entries.add(entry);
if (--pendingCallbacks == 0) {
if (sortEntries) {
entries.sort((e1, e2) -> ComparisonChain.start()
.compare(e1.getLedgerId(), e2.getLedgerId())
.compare(e1.getEntryId(), e2.getEntryId()).result());
}
callback.readEntriesComplete(entries, ctx);
}
}
}
@Override
public synchronized void readEntryFailed(ManagedLedgerException mle, Object ctx) {
log.warn("[{}][{}] Error while replaying entries", ledger.getName(), name, mle);
if (exception.compareAndSet(null, mle)) {
// release the entries just once, any further read success will release the entry straight away
entries.forEach(Entry::release);
}
if (--pendingCallbacks == 0) {
callback.readEntriesFailed(exception.get(), ctx);
}
}
};
positions.stream().filter(position -> !alreadyAcknowledgedPositions.contains(position))
.forEach(p ->{
if (((PositionImpl) p).compareTo(this.readPosition) == 0) {
this.setReadPosition(this.readPosition.getNext());
log.warn("[{}][{}] replayPosition{} equals readPosition{}," + " need set next readPositio",
ledger.getName(), name, p, this.readPosition);
}
ledger.asyncReadEntry((PositionImpl) p, cb, ctx);
});
return alreadyAcknowledgedPositions;
}
protected long getNumberOfEntries(Range<PositionImpl> range) {
long allEntries = ledger.getNumberOfEntries(range);
if (log.isDebugEnabled()) {
log.debug("[{}] getNumberOfEntries. {} allEntries: {}", ledger.getName(), range, allEntries);
}
AtomicLong deletedEntries = new AtomicLong(0);
lock.readLock().lock();
try {
individualDeletedMessages.forEach((r) -> {
try {
if (r.isConnected(range)) {
Range<PositionImpl> commonEntries = r.intersection(range);
long commonCount = ledger.getNumberOfEntries(commonEntries);
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Discounting {} entries for already deleted range {}", ledger.getName(),
name, commonCount, commonEntries);
}
deletedEntries.addAndGet(commonCount);
}
return true;
} finally {
if (r.lowerEndpoint() instanceof PositionImplRecyclable) {
((PositionImplRecyclable) r.lowerEndpoint()).recycle();
((PositionImplRecyclable) r.upperEndpoint()).recycle();
}
}
}, recyclePositionRangeConverter);
} finally {
lock.readLock().unlock();
}
if (log.isDebugEnabled()) {
log.debug("[{}] Found {} entries - deleted: {}", ledger.getName(), allEntries - deletedEntries.get(),
deletedEntries);
}
return allEntries - deletedEntries.get();
}
@Override
public void markDelete(Position position) throws InterruptedException, ManagedLedgerException {
markDelete(position, Collections.emptyMap());
}
@Override
public void markDelete(Position position, Map<String, Long> properties)
throws InterruptedException, ManagedLedgerException {
checkNotNull(position);
checkArgument(position instanceof PositionImpl);
class Result {
ManagedLedgerException exception = null;
}
final Result result = new Result();
final CountDownLatch counter = new CountDownLatch(1);
asyncMarkDelete(position, properties, new MarkDeleteCallback() {
@Override
public void markDeleteComplete(Object ctx) {
counter.countDown();
}
@Override
public void markDeleteFailed(ManagedLedgerException exception, Object ctx) {
result.exception = exception;
counter.countDown();
}
}, null);
if (!counter.await(ManagedLedgerImpl.AsyncOperationTimeoutSeconds, TimeUnit.SECONDS)) {
throw new ManagedLedgerException("Timeout during mark-delete operation");
}
if (result.exception != null) {
throw result.exception;
}
}
@Override
public void clearBacklog() throws InterruptedException, ManagedLedgerException {
class Result {
ManagedLedgerException exception = null;
}
final Result result = new Result();
final CountDownLatch counter = new CountDownLatch(1);
asyncClearBacklog(new ClearBacklogCallback() {
@Override
public void clearBacklogComplete(Object ctx) {
counter.countDown();
}
@Override
public void clearBacklogFailed(ManagedLedgerException exception, Object ctx) {
result.exception = exception;
counter.countDown();
}
}, null);
if (!counter.await(ManagedLedgerImpl.AsyncOperationTimeoutSeconds, TimeUnit.SECONDS)) {
throw new ManagedLedgerException("Timeout during clear backlog operation");
}
if (result.exception != null) {
throw result.exception;
}
}
@Override
public void asyncClearBacklog(final ClearBacklogCallback callback, Object ctx) {
asyncMarkDelete(ledger.getLastPosition(), new MarkDeleteCallback() {
@Override
public void markDeleteComplete(Object ctx) {
callback.clearBacklogComplete(ctx);
}
@Override
public void markDeleteFailed(ManagedLedgerException exception, Object ctx) {
if (exception.getCause() instanceof IllegalArgumentException) {
// There could be a race condition between calling clear backlog and other mark delete operations.
// If we get an exception it means the backlog was already cleared in the meantime.
callback.clearBacklogComplete(ctx);
} else {
callback.clearBacklogFailed(exception, ctx);
}
}
}, ctx);
}
@Override
public void skipEntries(int numEntriesToSkip, IndividualDeletedEntries deletedEntries)
throws InterruptedException, ManagedLedgerException {
class Result {
ManagedLedgerException exception = null;
}
final Result result = new Result();
final CountDownLatch counter = new CountDownLatch(1);
asyncSkipEntries(numEntriesToSkip, deletedEntries, new SkipEntriesCallback() {
@Override
public void skipEntriesComplete(Object ctx) {
counter.countDown();
}
@Override
public void skipEntriesFailed(ManagedLedgerException exception, Object ctx) {
result.exception = exception;
counter.countDown();
}
}, null);
if (!counter.await(ManagedLedgerImpl.AsyncOperationTimeoutSeconds, TimeUnit.SECONDS)) {
throw new ManagedLedgerException("Timeout during skip messages operation");
}
if (result.exception != null) {
throw result.exception;
}
}
@Override
public void asyncSkipEntries(int numEntriesToSkip, IndividualDeletedEntries deletedEntries,
final SkipEntriesCallback callback, Object ctx) {
log.info("[{}] Skipping {} entries on cursor {}", ledger.getName(), numEntriesToSkip, name);
long numDeletedMessages = 0;
if (deletedEntries == IndividualDeletedEntries.Exclude) {
numDeletedMessages = getNumIndividualDeletedEntriesToSkip(numEntriesToSkip);
}
asyncMarkDelete(ledger.getPositionAfterN(markDeletePosition, numEntriesToSkip + numDeletedMessages,
PositionBound.startExcluded), new MarkDeleteCallback() {
@Override
public void markDeleteComplete(Object ctx) {
callback.skipEntriesComplete(ctx);
}
@Override
public void markDeleteFailed(ManagedLedgerException exception, Object ctx) {
if (exception.getCause() instanceof IllegalArgumentException) {
// There could be a race condition between calling clear backlog and other mark delete
// operations.
// If we get an exception it means the backlog was already cleared in the meantime.
callback.skipEntriesComplete(ctx);
} else {
log.error("[{}] Skip {} entries failed for cursor {}", ledger.getName(), numEntriesToSkip,
name, exception);
callback.skipEntriesFailed(exception, ctx);
}
}
}, ctx);
}
long getNumIndividualDeletedEntriesToSkip(long numEntries) {
tempTotalEntriesToSkip.set(0L);
tempDeletedMessages.set(0L);
lock.readLock().lock();
try {
tempStartPosition.set(markDeletePosition);
tempEndPosition.set(null);
individualDeletedMessages.forEach((r) -> {
try {
tempEndPosition.set(r.lowerEndpoint());
if (tempStartPosition.get().compareTo(tempEndPosition.get()) <= 0) {
Range<PositionImpl> range = Range.openClosed(tempStartPosition.get(), tempEndPosition.get());
long entries = ledger.getNumberOfEntries(range);
if (tempTotalEntriesToSkip.get() + entries >= numEntries) {
// do not process further
return false;
}
tempTotalEntriesToSkip.set(tempTotalEntriesToSkip.get() + entries);
tempDeletedMessages.set(tempDeletedMessages.get() + ledger.getNumberOfEntries(r));
tempStartPosition.set(r.upperEndpoint());
} else {
if (log.isDebugEnabled()) {
log.debug("[{}] deletePosition {} moved ahead without clearing deleteMsgs {} for cursor {}",
ledger.getName(), markDeletePosition, r.lowerEndpoint(), name);
}
}
return true;
} finally {
if (r.lowerEndpoint() instanceof PositionImplRecyclable) {
((PositionImplRecyclable) r.lowerEndpoint()).recycle();
((PositionImplRecyclable) r.upperEndpoint()).recycle();
}
}
}, recyclePositionRangeConverter);
} finally {
lock.readLock().unlock();
}
return tempDeletedMessages.get();
}
boolean hasMoreEntries(PositionImpl position) {
PositionImpl lastPositionInLedger = ledger.getLastPosition();
if (position.compareTo(lastPositionInLedger) <= 0) {
return getNumberOfEntries(Range.closed(position, lastPositionInLedger)) > 0;
}
return false;
}
void initializeCursorPosition(Pair<PositionImpl, Long> lastPositionCounter) {
readPosition = ledger.getNextValidPosition(lastPositionCounter.getLeft());
markDeletePosition = lastPositionCounter.getLeft();
// Initialize the counter such that the difference between the messages written on the ML and the
// messagesConsumed is 0, to ensure the initial backlog count is 0.
messagesConsumedCounter = lastPositionCounter.getRight();
}
/**
*
* @param newMarkDeletePosition
* the new acknowledged position
* @return the previous acknowledged position
*/
PositionImpl setAcknowledgedPosition(PositionImpl newMarkDeletePosition) {
if (newMarkDeletePosition.compareTo(markDeletePosition) < 0) {
throw new IllegalArgumentException("Mark deleting an already mark-deleted position");
}
PositionImpl oldMarkDeletePosition = markDeletePosition;
if (!newMarkDeletePosition.equals(oldMarkDeletePosition)) {
long skippedEntries = 0;
if (newMarkDeletePosition.getLedgerId() == oldMarkDeletePosition.getLedgerId()
&& newMarkDeletePosition.getEntryId() == oldMarkDeletePosition.getEntryId() + 1) {
// Mark-deleting the position next to current one
skippedEntries = individualDeletedMessages.contains(newMarkDeletePosition.getLedgerId(),
newMarkDeletePosition.getEntryId()) ? 0 : 1;
} else {
skippedEntries = getNumberOfEntries(Range.openClosed(oldMarkDeletePosition, newMarkDeletePosition));
}
PositionImpl positionAfterNewMarkDelete = ledger.getNextValidPosition(newMarkDeletePosition);
// sometime ranges are connected but belongs to different ledgers so, they are placed sequentially
// eg: (2:10..3:15] can be returned as (2:10..2:15],[3:0..3:15]. So, try to iterate over connected range and
// found the last non-connected range which gives new markDeletePosition
while (positionAfterNewMarkDelete.compareTo(ledger.lastConfirmedEntry) <= 0) {
if (individualDeletedMessages.contains(positionAfterNewMarkDelete.getLedgerId(),
positionAfterNewMarkDelete.getEntryId())) {
Range<PositionImpl> rangeToBeMarkDeleted = individualDeletedMessages.rangeContaining(
positionAfterNewMarkDelete.getLedgerId(), positionAfterNewMarkDelete.getEntryId());
newMarkDeletePosition = rangeToBeMarkDeleted.upperEndpoint();
positionAfterNewMarkDelete = ledger.getNextValidPosition(newMarkDeletePosition);
// check if next valid position is also deleted and part of the deleted-range
continue;
}
break;
}
if (log.isDebugEnabled()) {
log.debug("[{}] Moved ack position from: {} to: {} -- skipped: {}", ledger.getName(),
oldMarkDeletePosition, newMarkDeletePosition, skippedEntries);
}
MSG_CONSUMED_COUNTER_UPDATER.addAndGet(this, skippedEntries);
}
// markDelete-position and clear out deletedMsgSet
markDeletePosition = newMarkDeletePosition;
individualDeletedMessages.removeAtMost(markDeletePosition.getLedgerId(), markDeletePosition.getEntryId());
if (readPosition.compareTo(newMarkDeletePosition) <= 0) {
// If the position that is mark-deleted is past the read position, it
// means that the client has skipped some entries. We need to move
// read position forward
PositionImpl oldReadPosition = readPosition;
readPosition = ledger.getNextValidPosition(newMarkDeletePosition);
if (log.isDebugEnabled()) {
log.debug("[{}] Moved read position from: {} to: {}, and new mark-delete position {}", ledger.getName(),
oldReadPosition, readPosition, markDeletePosition);
}
}
return newMarkDeletePosition;
}
@Override
public void asyncMarkDelete(final Position position, final MarkDeleteCallback callback, final Object ctx) {
asyncMarkDelete(position, Collections.emptyMap(), callback, ctx);
}
@Override
public void asyncMarkDelete(final Position position, Map<String, Long> properties,
final MarkDeleteCallback callback, final Object ctx) {
checkNotNull(position);
checkArgument(position instanceof PositionImpl);
if (isClosed()) {
callback.markDeleteFailed(new ManagedLedgerException("Cursor was already closed"), ctx);
return;
}
if (RESET_CURSOR_IN_PROGRESS_UPDATER.get(this) == TRUE) {
if (log.isDebugEnabled()) {
log.debug("[{}] cursor reset in progress - ignoring mark delete on position [{}] for cursor [{}]",
ledger.getName(), position, name);
}
callback.markDeleteFailed(
new ManagedLedgerException("Reset cursor in progress - unable to mark delete position "
+ position.toString()),
ctx);
}
if (log.isDebugEnabled()) {
log.debug("[{}] Mark delete cursor {} up to position: {}", ledger.getName(), name, position);
}
PositionImpl newPosition = (PositionImpl) position;
if (config.isDeletionAtBatchIndexLevelEnabled() && batchDeletedIndexes != null) {
if (newPosition.ackSet != null) {
batchDeletedIndexes.put(newPosition, BitSetRecyclable.create().resetWords(newPosition.ackSet));
newPosition = ledger.getPreviousPosition(newPosition);
}
Map<PositionImpl, BitSetRecyclable> subMap = batchDeletedIndexes.subMap(PositionImpl.earliest, newPosition);
subMap.values().forEach(BitSetRecyclable::recycle);
subMap.clear();
} else if (newPosition.ackSet != null) {
newPosition = ledger.getPreviousPosition(newPosition);
newPosition.ackSet = null;
}
if (((PositionImpl) ledger.getLastConfirmedEntry()).compareTo(newPosition) < 0) {
if (log.isDebugEnabled()) {
log.debug(
"[{}] Failed mark delete due to invalid markDelete {} is ahead of last-confirmed-entry {} for cursor [{}]",
ledger.getName(), position, ledger.getLastConfirmedEntry(), name);
}
callback.markDeleteFailed(new ManagedLedgerException("Invalid mark deleted position"), ctx);
return;
}
lock.writeLock().lock();
try {
newPosition = setAcknowledgedPosition(newPosition);
} catch (IllegalArgumentException e) {
callback.markDeleteFailed(getManagedLedgerException(e), ctx);
return;
} finally {
lock.writeLock().unlock();
}
// Apply rate limiting to mark-delete operations
if (markDeleteLimiter != null && !markDeleteLimiter.tryAcquire()) {
lastMarkDeleteEntry = new MarkDeleteEntry(newPosition, properties, null, null);
callback.markDeleteComplete(ctx);
return;
}
internalAsyncMarkDelete(newPosition, properties, callback, ctx);
}
protected void internalAsyncMarkDelete(final PositionImpl newPosition, Map<String, Long> properties,
final MarkDeleteCallback callback, final Object ctx) {
ledger.mbean.addMarkDeleteOp();
MarkDeleteEntry mdEntry = new MarkDeleteEntry(newPosition, properties, callback, ctx);
// We cannot write to the ledger during the switch, need to wait until the new metadata ledger is available
synchronized (pendingMarkDeleteOps) {
// The state might have changed while we were waiting on the queue mutex
switch (STATE_UPDATER.get(this)) {
case Closed:
callback.markDeleteFailed(new ManagedLedgerException("Cursor was already closed"), ctx);
return;
case NoLedger:
// We need to create a new ledger to write into
startCreatingNewMetadataLedger();
// fall through
case SwitchingLedger:
pendingMarkDeleteOps.add(mdEntry);
break;
case Open:
if (PENDING_READ_OPS_UPDATER.get(this) > 0) {
// Wait until no read operation are pending
pendingMarkDeleteOps.add(mdEntry);
} else {
// Execute the mark delete immediately
internalMarkDelete(mdEntry);
}
break;
default:
log.error("[{}][{}] Invalid cursor state: {}", ledger.getName(), name, state);
callback.markDeleteFailed(new ManagedLedgerException("Cursor was in invalid state: " + state), ctx);
break;
}
}
}
void internalMarkDelete(final MarkDeleteEntry mdEntry) {
// The counter is used to mark all the pending mark-delete request that were submitted to BK and that are not
// yet finished. While we have outstanding requests we cannot close the current ledger, so the switch to new
// ledger is postponed to when the counter goes to 0.
PENDING_MARK_DELETED_SUBMITTED_COUNT_UPDATER.incrementAndGet(this);
lastMarkDeleteEntry = mdEntry;
persistPositionToLedger(cursorLedger, mdEntry, new VoidCallback() {
@Override
public void operationComplete() {
if (log.isDebugEnabled()) {
log.debug("[{}] Mark delete cursor {} to position {} succeeded", ledger.getName(), name,
mdEntry.newPosition);
}
// Remove from the individual deleted messages all the entries before the new mark delete
// point.
lock.writeLock().lock();
try {
individualDeletedMessages.removeAtMost(mdEntry.newPosition.getLedgerId(),
mdEntry.newPosition.getEntryId());
if (config.isDeletionAtBatchIndexLevelEnabled() && batchDeletedIndexes != null) {
Map<PositionImpl, BitSetRecyclable> subMap = batchDeletedIndexes.subMap(PositionImpl.earliest, false, PositionImpl.get(mdEntry.newPosition.getLedgerId(), mdEntry.newPosition.getEntryId()), true);
subMap.values().forEach(BitSetRecyclable::recycle);
subMap.clear();
}
} finally {
lock.writeLock().unlock();
}
ledger.updateCursor(ManagedCursorImpl.this, mdEntry.newPosition);
decrementPendingMarkDeleteCount();
// Trigger the final callback after having (eventually) triggered the switchin-ledger operation. This
// will ensure that no race condition will happen between the next mark-delete and the switching
// operation.
if (mdEntry.callbackGroup != null) {
// Trigger the callback for every request in the group
for (MarkDeleteEntry e : mdEntry.callbackGroup) {
e.callback.markDeleteComplete(e.ctx);
}
} else {
// Only trigger the callback for the current request
mdEntry.callback.markDeleteComplete(mdEntry.ctx);
}
}
@Override
public void operationFailed(ManagedLedgerException exception) {
log.warn("[{}] Failed to mark delete position for cursor={} position={}", ledger.getName(),
ManagedCursorImpl.this, mdEntry.newPosition);
if (log.isDebugEnabled()) {
log.debug("[{}] Consumer {} cursor mark delete failed with counters: consumed {} mdPos {} rdPos {}",
ledger.getName(), name, messagesConsumedCounter, markDeletePosition, readPosition);
}
decrementPendingMarkDeleteCount();
if (mdEntry.callbackGroup != null) {
for (MarkDeleteEntry e : mdEntry.callbackGroup) {
e.callback.markDeleteFailed(exception, e.ctx);
}
} else {
mdEntry.callback.markDeleteFailed(exception, mdEntry.ctx);
}
}
});
}
@Override
public void delete(final Position position) throws InterruptedException, ManagedLedgerException {
delete(Collections.singletonList(position));
}
@Override
public void asyncDelete(Position pos, final AsyncCallbacks.DeleteCallback callback, Object ctx) {
asyncDelete(Collections.singletonList(pos), callback, ctx);
}
@Override
public void delete(Iterable<Position> positions) throws InterruptedException, ManagedLedgerException {
checkNotNull(positions);
class Result {
ManagedLedgerException exception = null;
}
final Result result = new Result();
final CountDownLatch counter = new CountDownLatch(1);
final AtomicBoolean timeout = new AtomicBoolean(false);
asyncDelete(positions, new AsyncCallbacks.DeleteCallback() {
@Override
public void deleteComplete(Object ctx) {
if (timeout.get()) {
log.warn("[{}] [{}] Delete operation timeout. Callback deleteComplete at position {}",
ledger.getName(), name, positions);
}
counter.countDown();
}
@Override
public void deleteFailed(ManagedLedgerException exception, Object ctx) {
result.exception = exception;
if (timeout.get()) {
log.warn("[{}] [{}] Delete operation timeout. Callback deleteFailed at position {}",
ledger.getName(), name, positions);
}
counter.countDown();
}
}, null);
if (!counter.await(ManagedLedgerImpl.AsyncOperationTimeoutSeconds, TimeUnit.SECONDS)) {
timeout.set(true);
log.warn("[{}] [{}] Delete operation timeout. No callback was triggered at position {}", ledger.getName(),
name, positions);
throw new ManagedLedgerException("Timeout during delete operation");
}
if (result.exception != null) {
throw result.exception;
}
}
@Override
public void asyncDelete(Iterable<Position> positions, AsyncCallbacks.DeleteCallback callback, Object ctx) {
if (isClosed()) {
callback.deleteFailed(new ManagedLedgerException("Cursor was already closed"), ctx);
return;
}
PositionImpl newMarkDeletePosition = null;
lock.writeLock().lock();
try {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Deleting individual messages at {}. Current status: {} - md-position: {}",
ledger.getName(), name, positions, individualDeletedMessages, markDeletePosition);
}
for (Position pos : positions) {
PositionImpl position = (PositionImpl) checkNotNull(pos);
if (((PositionImpl) ledger.getLastConfirmedEntry()).compareTo(position) < 0) {
if (log.isDebugEnabled()) {
log.debug(
"[{}] Failed mark delete due to invalid markDelete {} is ahead of last-confirmed-entry {} for cursor [{}]",
ledger.getName(), position, ledger.getLastConfirmedEntry(), name);
}
callback.deleteFailed(new ManagedLedgerException("Invalid mark deleted position"), ctx);
return;
}
if (individualDeletedMessages.contains(position.getLedgerId(), position.getEntryId())
|| position.compareTo(markDeletePosition) <= 0) {
if (config.isDeletionAtBatchIndexLevelEnabled() && batchDeletedIndexes != null) {
BitSetRecyclable bitSetRecyclable = batchDeletedIndexes.remove(position);
if (bitSetRecyclable != null) {
bitSetRecyclable.recycle();
}
}
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Position was already deleted {}", ledger.getName(), name, position);
}
continue;
}
if (position.ackSet == null) {
if (config.isDeletionAtBatchIndexLevelEnabled() && batchDeletedIndexes != null) {
BitSetRecyclable bitSetRecyclable = batchDeletedIndexes.remove(position);
if (bitSetRecyclable != null) {
bitSetRecyclable.recycle();
}
}
// Add a range (prev, pos] to the set. Adding the previous entry as an open limit to the range will make
// the RangeSet recognize the "continuity" between adjacent Positions
PositionImpl previousPosition = ledger.getPreviousPosition(position);
individualDeletedMessages.addOpenClosed(previousPosition.getLedgerId(), previousPosition.getEntryId(),
position.getLedgerId(), position.getEntryId());
MSG_CONSUMED_COUNTER_UPDATER.incrementAndGet(this);
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Individually deleted messages: {}", ledger.getName(), name,
individualDeletedMessages);
}
} else if (config.isDeletionAtBatchIndexLevelEnabled() && batchDeletedIndexes != null) {
BitSetRecyclable bitSet = batchDeletedIndexes.computeIfAbsent(position, (v) -> BitSetRecyclable.create().resetWords(position.ackSet));
BitSetRecyclable givenBitSet = BitSetRecyclable.create().resetWords(position.ackSet);
bitSet.and(givenBitSet);
givenBitSet.recycle();
if (bitSet.isEmpty()) {
PositionImpl previousPosition = ledger.getPreviousPosition(position);
individualDeletedMessages.addOpenClosed(previousPosition.getLedgerId(), previousPosition.getEntryId(),
position.getLedgerId(), position.getEntryId());
++messagesConsumedCounter;
BitSetRecyclable bitSetRecyclable = batchDeletedIndexes.remove(position);
if (bitSetRecyclable != null) {
bitSetRecyclable.recycle();
}
}
}
}
if (individualDeletedMessages.isEmpty()) {
// No changes to individually deleted messages, so nothing to do at this point
callback.deleteComplete(ctx);
return;
}
// If the lower bound of the range set is the current mark delete position, then we can trigger a new
// mark-delete to the upper bound of the first range segment
Range<PositionImpl> range = individualDeletedMessages.firstRange();
// If the lowerBound is ahead of MarkDelete, verify if there are any entries in-between
if (range.lowerEndpoint().compareTo(markDeletePosition) <= 0 || ledger
.getNumberOfEntries(Range.openClosed(markDeletePosition, range.lowerEndpoint())) <= 0) {
if (log.isDebugEnabled()) {
log.debug("[{}] Found a position range to mark delete for cursor {}: {} ", ledger.getName(),
name, range);
}
newMarkDeletePosition = range.upperEndpoint();
}
if (newMarkDeletePosition != null) {
newMarkDeletePosition = setAcknowledgedPosition(newMarkDeletePosition);
} else {
newMarkDeletePosition = markDeletePosition;
}
} catch (Exception e) {
log.warn("[{}] [{}] Error while updating individualDeletedMessages [{}]", ledger.getName(), name,
e.getMessage(), e);
callback.deleteFailed(getManagedLedgerException(e), ctx);
return;
} finally {
lock.writeLock().unlock();
}
// Apply rate limiting to mark-delete operations
if (markDeleteLimiter != null && !markDeleteLimiter.tryAcquire()) {
PositionImpl finalNewMarkDeletePosition = newMarkDeletePosition;
LAST_MARK_DELETE_ENTRY_UPDATER.updateAndGet(this,
last -> new MarkDeleteEntry(finalNewMarkDeletePosition, last.properties, null, null));
callback.deleteComplete(ctx);
return;
}
try {
Map<String, Long> properties = lastMarkDeleteEntry != null ? lastMarkDeleteEntry.properties
: Collections.emptyMap();
internalAsyncMarkDelete(newMarkDeletePosition, properties, new MarkDeleteCallback() {
@Override
public void markDeleteComplete(Object ctx) {
callback.deleteComplete(ctx);
}
@Override
public void markDeleteFailed(ManagedLedgerException exception, Object ctx) {
callback.deleteFailed(exception, ctx);
}
}, ctx);
} catch (Exception e) {
log.warn("[{}] [{}] Error doing asyncDelete [{}]", ledger.getName(), name, e.getMessage(), e);
if (log.isDebugEnabled()) {
log.debug("[{}] Consumer {} cursor asyncDelete error, counters: consumed {} mdPos {} rdPos {}",
ledger.getName(), name, messagesConsumedCounter, markDeletePosition, readPosition);
}
callback.deleteFailed(new ManagedLedgerException(e), ctx);
}
}
/**
* Given a list of entries, filter out the entries that have already been individually deleted.
*
* @param entries
* a list of entries
* @return a list of entries not containing deleted messages
*/
List<Entry> filterReadEntries(List<Entry> entries) {
lock.readLock().lock();
try {
Range<PositionImpl> entriesRange = Range.closed((PositionImpl) entries.get(0).getPosition(),
(PositionImpl) entries.get(entries.size() - 1).getPosition());
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Filtering entries {} - alreadyDeleted: {}", ledger.getName(), name, entriesRange,
individualDeletedMessages);
}
if (individualDeletedMessages.isEmpty() || !entriesRange.isConnected(individualDeletedMessages.span())) {
// There are no individually deleted messages in this entry list, no need to perform filtering
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] No filtering needed for entries {}", ledger.getName(), name, entriesRange);
}
return entries;
} else {
// Remove from the entry list all the entries that were already marked for deletion
return Lists.newArrayList(Collections2.filter(entries, entry -> {
boolean includeEntry = !individualDeletedMessages.contains(
((PositionImpl) entry.getPosition()).getLedgerId(),
((PositionImpl) entry.getPosition()).getEntryId());
if (!includeEntry) {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Filtering entry at {} - already deleted", ledger.getName(), name,
entry.getPosition());
}
entry.release();
}
return includeEntry;
}));
}
} finally {
lock.readLock().unlock();
}
}
@Override
public synchronized String toString() {
return MoreObjects.toStringHelper(this).add("ledger", ledger.getName()).add("name", name)
.add("ackPos", markDeletePosition).add("readPos", readPosition).toString();
}
@Override
public String getName() {
return name;
}
@Override
public long getLastActive() {
return lastActive;
}
@Override
public void updateLastActive() {
lastActive = System.currentTimeMillis();
}
@Override
public boolean isDurable() {
return true;
}
@Override
public Position getReadPosition() {
return readPosition;
}
@Override
public Position getMarkDeletedPosition() {
return markDeletePosition;
}
@Override
public void rewind() {
lock.writeLock().lock();
try {
PositionImpl newReadPosition = ledger.getNextValidPosition(markDeletePosition);
PositionImpl oldReadPosition = readPosition;
log.info("[{}-{}] Rewind from {} to {}", ledger.getName(), name, oldReadPosition, newReadPosition);
readPosition = newReadPosition;
} finally {
lock.writeLock().unlock();
}
}
@Override
public void seek(Position newReadPositionInt) {
checkArgument(newReadPositionInt instanceof PositionImpl);
PositionImpl newReadPosition = (PositionImpl) newReadPositionInt;
lock.writeLock().lock();
try {
if (newReadPosition.compareTo(markDeletePosition) <= 0) {
// Make sure the newReadPosition comes after the mark delete position
newReadPosition = ledger.getNextValidPosition(markDeletePosition);
}
PositionImpl oldReadPosition = readPosition;
readPosition = newReadPosition;
} finally {
lock.writeLock().unlock();
}
}
@Override
public void close() throws InterruptedException, ManagedLedgerException {
class Result {
ManagedLedgerException exception = null;
}
final Result result = new Result();
final CountDownLatch latch = new CountDownLatch(1);
asyncClose(new AsyncCallbacks.CloseCallback() {
@Override
public void closeComplete(Object ctx) {
if (log.isDebugEnabled()) {
log.debug("[{}] Successfully closed ledger for cursor {}", ledger.getName(), name);
}
latch.countDown();
}
@Override
public void closeFailed(ManagedLedgerException exception, Object ctx) {
log.warn("[{}] Closing ledger failed for cursor {}", ledger.getName(), name, exception);
result.exception = exception;
latch.countDown();
}
}, null);
if (!latch.await(ManagedLedgerImpl.AsyncOperationTimeoutSeconds, TimeUnit.SECONDS)) {
throw new ManagedLedgerException("Timeout during close operation");
}
if (result.exception != null) {
throw result.exception;
}
}
/**
* Persist given markDelete position to cursor-ledger or zk-metaStore based on max number of allowed unack-range
* that can be persist in zk-metastore. If current unack-range is higher than configured threshold then broker
* persists mark-delete into cursor-ledger else into zk-metastore.
*
* @param position
* @param properties
* @param callback
* @param ctx
*/
void persistPositionWhenClosing(PositionImpl position, Map<String, Long> properties,
final AsyncCallbacks.CloseCallback callback, final Object ctx) {
if (shouldPersistUnackRangesToLedger()) {
persistPositionToLedger(cursorLedger, new MarkDeleteEntry(position, properties, null, null),
new VoidCallback() {
@Override
public void operationComplete() {
log.info("[{}][{}] Updated md-position={} into cursor-ledger {}", ledger.getName(), name,
markDeletePosition, cursorLedger.getId());
cursorLedger.asyncClose((rc, lh, ctx1) -> {
callback.closeComplete(ctx);
if (rc == BKException.Code.OK) {
log.info("[{}][{}] Closed cursor-ledger {}", ledger.getName(), name,
cursorLedger.getId());
} else {
log.warn("[{}][{}] Failed to close cursor-ledger {}: {}", ledger.getName(), name,
cursorLedger.getId(), BKException.getMessage(rc));
}
}, ctx);
}
@Override
public void operationFailed(ManagedLedgerException e) {
log.warn("[{}][{}] Failed to persist mark-delete position into cursor-ledger{}: {}",
ledger.getName(), name, cursorLedger.getId(), e.getMessage());
callback.closeFailed(e, ctx);
}
});
} else {
persistPositionMetaStore(-1, position, properties, new MetaStoreCallback<Void>() {
@Override
public void operationComplete(Void result, Stat stat) {
log.info("[{}][{}] Closed cursor at md-position={}", ledger.getName(), name, markDeletePosition);
// At this point the position had already been safely stored in the cursor z-node
callback.closeComplete(ctx);
asyncDeleteLedger(cursorLedger);
}
@Override
public void operationFailed(MetaStoreException e) {
log.warn("[{}][{}] Failed to update cursor info when closing: {}", ledger.getName(), name,
e.getMessage());
callback.closeFailed(e, ctx);
}
}, true);
}
}
private boolean shouldPersistUnackRangesToLedger() {
return cursorLedger != null
&& !isCursorLedgerReadOnly
&& config.getMaxUnackedRangesToPersist() > 0
&& individualDeletedMessages.size() > config.getMaxUnackedRangesToPersistInZk();
}
private void persistPositionMetaStore(long cursorsLedgerId, PositionImpl position, Map<String, Long> properties,
MetaStoreCallback<Void> callback, boolean persistIndividualDeletedMessageRanges) {
if (state == State.Closed) {
ledger.getExecutor().execute(safeRun(() -> callback.operationFailed(new MetaStoreException(
new CursorAlreadyClosedException(name + " cursor already closed")))));
return;
}
// When closing we store the last mark-delete position in the z-node itself, so we won't need the cursor ledger,
// hence we write it as -1. The cursor ledger is deleted once the z-node write is confirmed.
ManagedCursorInfo.Builder info = ManagedCursorInfo.newBuilder() //
.setCursorsLedgerId(cursorsLedgerId) //
.setMarkDeleteLedgerId(position.getLedgerId()) //
.setMarkDeleteEntryId(position.getEntryId()) //
.setLastActive(lastActive); //
info.addAllProperties(buildPropertiesMap(properties));
if (persistIndividualDeletedMessageRanges) {
info.addAllIndividualDeletedMessages(buildIndividualDeletedMessageRanges());
if (config.isDeletionAtBatchIndexLevelEnabled()) {
info.addAllBatchedEntryDeletionIndexInfo(buildBatchEntryDeletionIndexInfoList());
}
}
if (log.isDebugEnabled()) {
log.debug("[{}][{}] Closing cursor at md-position: {}", ledger.getName(), name, position);
}
ledger.getStore().asyncUpdateCursorInfo(ledger.getName(), name, info.build(), cursorLedgerStat,
new MetaStoreCallback<Void>() {
@Override
public void operationComplete(Void result, Stat stat) {
cursorLedgerStat = stat;
callback.operationComplete(result, stat);
}
@Override
public void operationFailed(MetaStoreException e) {
if (e instanceof MetaStoreException.BadVersionException) {
log.warn("[{}] Failed to update cursor metadata for {} due to version conflict {}",
ledger.name, name, e.getMessage());
// it means previous owner of the ml might have updated the version incorrectly. So, check
// the ownership and refresh the version again.
if (ledger.mlOwnershipChecker != null && ledger.mlOwnershipChecker.get()) {
ledger.getStore().asyncGetCursorInfo(ledger.getName(), name,
new MetaStoreCallback<ManagedCursorInfo>() {
@Override
public void operationComplete(ManagedCursorInfo info, Stat stat) {
cursorLedgerStat = stat;
}
@Override
public void operationFailed(MetaStoreException e) {
if (log.isDebugEnabled()) {
log.debug(
"[{}] Failed to refresh cursor metadata-version for {} due to {}",
ledger.name, name, e.getMessage());
}
}
});
}
}
callback.operationFailed(e);
}
});
}
@Override
public void asyncClose(final AsyncCallbacks.CloseCallback callback, final Object ctx) {
State oldState = STATE_UPDATER.getAndSet(this, State.Closing);
if (oldState == State.Closed || oldState == State.Closing) {
log.info("[{}] [{}] State is already closed", ledger.getName(), name);
callback.closeComplete(ctx);
return;
}
persistPositionWhenClosing(lastMarkDeleteEntry.newPosition, lastMarkDeleteEntry.properties, callback, ctx);
STATE_UPDATER.set(this, State.Closed);
}
/**
* Internal version of seek that doesn't do the validation check.
*
* @param newReadPositionInt
*/
void setReadPosition(Position newReadPositionInt) {
checkArgument(newReadPositionInt instanceof PositionImpl);
if (this.markDeletePosition == null
|| ((PositionImpl) newReadPositionInt).compareTo(this.markDeletePosition) > 0) {
this.readPosition = (PositionImpl) newReadPositionInt;
}
}
// //////////////////////////////////////////////////
void startCreatingNewMetadataLedger() {
// Change the state so that new mark-delete ops will be queued and not immediately submitted
State oldState = STATE_UPDATER.getAndSet(this, State.SwitchingLedger);
if (oldState == State.SwitchingLedger) {
// Ignore double request
return;
}
// Check if we can immediately switch to a new metadata ledger
if (PENDING_MARK_DELETED_SUBMITTED_COUNT_UPDATER.get(this) == 0) {
createNewMetadataLedger();
}
}
void createNewMetadataLedger() {
createNewMetadataLedger(new VoidCallback() {
@Override
public void operationComplete() {
// We now have a new ledger where we can write
synchronized (pendingMarkDeleteOps) {
flushPendingMarkDeletes();
// Resume normal mark-delete operations
STATE_UPDATER.set(ManagedCursorImpl.this, State.Open);
}
}
@Override
public void operationFailed(ManagedLedgerException exception) {
log.error("[{}][{}] Metadata ledger creation failed", ledger.getName(), name, exception);
synchronized (pendingMarkDeleteOps) {
while (!pendingMarkDeleteOps.isEmpty()) {
MarkDeleteEntry entry = pendingMarkDeleteOps.poll();
entry.callback.markDeleteFailed(exception, entry.ctx);
}
// At this point we don't have a ledger ready
STATE_UPDATER.set(ManagedCursorImpl.this, State.NoLedger);
}
}
});
}
private void flushPendingMarkDeletes() {
if (!pendingMarkDeleteOps.isEmpty()) {
internalFlushPendingMarkDeletes();
}
}
void internalFlushPendingMarkDeletes() {
MarkDeleteEntry lastEntry = pendingMarkDeleteOps.getLast();
lastEntry.callbackGroup = Lists.newArrayList(pendingMarkDeleteOps);
pendingMarkDeleteOps.clear();
internalMarkDelete(lastEntry);
}
void createNewMetadataLedger(final VoidCallback callback) {
ledger.mbean.startCursorLedgerCreateOp();
ledger.asyncCreateLedger(bookkeeper, config, digestType, (rc, lh, ctx) -> {
if (ledger.checkAndCompleteLedgerOpTask(rc, lh, ctx)) {
return;
}
ledger.getExecutor().execute(safeRun(() -> {
ledger.mbean.endCursorLedgerCreateOp();
if (rc != BKException.Code.OK) {
log.warn("[{}] Error creating ledger for cursor {}: {}", ledger.getName(), name,
BKException.getMessage(rc));
callback.operationFailed(new ManagedLedgerException(BKException.getMessage(rc)));
return;
}
if (log.isDebugEnabled()) {
log.debug("[{}] Created ledger {} for cursor {}", ledger.getName(), lh.getId(), name);
}
// Created the ledger, now write the last position
// content
MarkDeleteEntry mdEntry = lastMarkDeleteEntry;
persistPositionToLedger(lh, mdEntry, new VoidCallback() {
@Override
public void operationComplete() {
if (log.isDebugEnabled()) {
log.debug("[{}] Persisted position {} for cursor {}", ledger.getName(),
mdEntry.newPosition, name);
}
switchToNewLedger(lh, new VoidCallback() {
@Override
public void operationComplete() {
callback.operationComplete();
}
@Override
public void operationFailed(ManagedLedgerException exception) {
// it means it failed to switch the newly created ledger so, it should be
// deleted to prevent leak
bookkeeper.asyncDeleteLedger(lh.getId(), (int rc, Object ctx) -> {
if (rc != BKException.Code.OK) {
log.warn("[{}] Failed to delete orphan ledger {}", ledger.getName(),
lh.getId());
}
}, null);
callback.operationFailed(exception);
}
});
}
@Override
public void operationFailed(ManagedLedgerException exception) {
log.warn("[{}] Failed to persist position {} for cursor {}", ledger.getName(),
mdEntry.newPosition, name);
ledger.mbean.startCursorLedgerDeleteOp();
bookkeeper.asyncDeleteLedger(lh.getId(), new DeleteCallback() {
@Override
public void deleteComplete(int rc, Object ctx) {
ledger.mbean.endCursorLedgerDeleteOp();
}
}, null);
callback.operationFailed(exception);
}
});
}));
}, LedgerMetadataUtils.buildAdditionalMetadataForCursor(name));
}
private List<LongProperty> buildPropertiesMap(Map<String, Long> properties) {
if (properties.isEmpty()) {
return Collections.emptyList();
}
List<LongProperty> longProperties = Lists.newArrayList();
properties.forEach((name, value) -> {
LongProperty lp = LongProperty.newBuilder().setName(name).setValue(value).build();
longProperties.add(lp);
});
return longProperties;
}
private List<MLDataFormats.MessageRange> buildIndividualDeletedMessageRanges() {
lock.readLock().lock();
try {
if (individualDeletedMessages.isEmpty()) {
return Collections.emptyList();
}
MLDataFormats.NestedPositionInfo.Builder nestedPositionBuilder = MLDataFormats.NestedPositionInfo
.newBuilder();
MLDataFormats.MessageRange.Builder messageRangeBuilder = MLDataFormats.MessageRange.newBuilder();
List<MessageRange> rangeList = Lists.newArrayList();
individualDeletedMessages.forEach((positionRange) -> {
PositionImpl p = positionRange.lowerEndpoint();
nestedPositionBuilder.setLedgerId(p.getLedgerId());
nestedPositionBuilder.setEntryId(p.getEntryId());
messageRangeBuilder.setLowerEndpoint(nestedPositionBuilder.build());
p = positionRange.upperEndpoint();
nestedPositionBuilder.setLedgerId(p.getLedgerId());
nestedPositionBuilder.setEntryId(p.getEntryId());
messageRangeBuilder.setUpperEndpoint(nestedPositionBuilder.build());
rangeList.add(messageRangeBuilder.build());
return rangeList.size() <= config.getMaxUnackedRangesToPersist();
});
return rangeList;
} finally {
lock.readLock().unlock();
}
}
private List<MLDataFormats.BatchedEntryDeletionIndexInfo> buildBatchEntryDeletionIndexInfoList() {
if (!config.isDeletionAtBatchIndexLevelEnabled() || batchDeletedIndexes == null || batchDeletedIndexes.isEmpty()) {
return Collections.emptyList();
}
MLDataFormats.NestedPositionInfo.Builder nestedPositionBuilder = MLDataFormats.NestedPositionInfo
.newBuilder();
MLDataFormats.BatchedEntryDeletionIndexInfo.Builder batchDeletedIndexInfoBuilder = MLDataFormats.BatchedEntryDeletionIndexInfo
.newBuilder();
List<MLDataFormats.BatchedEntryDeletionIndexInfo> result = Lists.newArrayList();
Iterator<Map.Entry<PositionImpl, BitSetRecyclable>> iterator = batchDeletedIndexes.entrySet().iterator();
while (iterator.hasNext() && result.size() < config.getMaxBatchDeletedIndexToPersist()) {
Map.Entry<PositionImpl, BitSetRecyclable> entry = iterator.next();
nestedPositionBuilder.setLedgerId(entry.getKey().getLedgerId());
nestedPositionBuilder.setEntryId(entry.getKey().getEntryId());
batchDeletedIndexInfoBuilder.setPosition(nestedPositionBuilder.build());
long[] array = entry.getValue().toLongArray();
List<Long> deleteSet = new ArrayList<>(array.length);
for (long l : array) {
deleteSet.add(l);
}
batchDeletedIndexInfoBuilder.addAllDeleteSet(deleteSet);
result.add(batchDeletedIndexInfoBuilder.build());
}
return result;
}
void persistPositionToLedger(final LedgerHandle lh, MarkDeleteEntry mdEntry, final VoidCallback callback) {
PositionImpl position = mdEntry.newPosition;
PositionInfo pi = PositionInfo.newBuilder().setLedgerId(position.getLedgerId())
.setEntryId(position.getEntryId())
.addAllIndividualDeletedMessages(buildIndividualDeletedMessageRanges())
.addAllBatchedEntryDeletionIndexInfo(buildBatchEntryDeletionIndexInfoList())
.addAllProperties(buildPropertiesMap(mdEntry.properties)).build();
if (log.isDebugEnabled()) {
log.debug("[{}] Cursor {} Appending to ledger={} position={}", ledger.getName(), name, lh.getId(),
position);
}
checkNotNull(lh);
lh.asyncAddEntry(pi.toByteArray(), (rc, lh1, entryId, ctx) -> {
if (rc == BKException.Code.OK) {
if (log.isDebugEnabled()) {
log.debug("[{}] Updated cursor {} position {} in meta-ledger {}", ledger.getName(), name, position,
lh1.getId());
}
if (shouldCloseLedger(lh1)) {
if (log.isDebugEnabled()) {
log.debug("[{}] Need to create new metadata ledger for consumer {}", ledger.getName(), name);
}
startCreatingNewMetadataLedger();
}
callback.operationComplete();
} else {
log.warn("[{}] Error updating cursor {} position {} in meta-ledger {}: {}", ledger.getName(), name,
position, lh1.getId(), BKException.getMessage(rc));
// If we've had a write error, the ledger will be automatically closed, we need to create a new one,
// in the meantime the mark-delete will be queued.
STATE_UPDATER.compareAndSet(ManagedCursorImpl.this, State.Open, State.NoLedger);
// Before giving up, try to persist the position in the metadata store
persistPositionMetaStore(-1, position, mdEntry.properties, new MetaStoreCallback<Void>() {
@Override
public void operationComplete(Void result, Stat stat) {
if (log.isDebugEnabled()) {
log.debug(
"[{}][{}] Updated cursor in meta store after previous failure in ledger at position {}",
ledger.getName(), name, position);
}
callback.operationComplete();
}
@Override
public void operationFailed(MetaStoreException e) {
log.warn("[{}][{}] Failed to update cursor in meta store after previous failure in ledger: {}",
ledger.getName(), name, e.getMessage());
callback.operationFailed(createManagedLedgerException(rc));
}
}, true);
}
}, null);
}
boolean shouldCloseLedger(LedgerHandle lh) {
long now = clock.millis();
if ((lh.getLastAddConfirmed() >= config.getMetadataMaxEntriesPerLedger()
|| lastLedgerSwitchTimestamp < (now - config.getLedgerRolloverTimeout() * 1000))
&& (STATE_UPDATER.get(this) != State.Closed && STATE_UPDATER.get(this) != State.Closing)) {
// It's safe to modify the timestamp since this method will be only called from a callback, implying that
// calls will be serialized on one single thread
lastLedgerSwitchTimestamp = now;
return true;
} else {
return false;
}
}
void switchToNewLedger(final LedgerHandle lh, final VoidCallback callback) {
if (log.isDebugEnabled()) {
log.debug("[{}] Switching cursor {} to ledger {}", ledger.getName(), name, lh.getId());
}
persistPositionMetaStore(lh.getId(), lastMarkDeleteEntry.newPosition, lastMarkDeleteEntry.properties,
new MetaStoreCallback<Void>() {
@Override
public void operationComplete(Void result, Stat stat) {
log.info("[{}] Updated cursor {} with ledger id {} md-position={} rd-position={}", ledger.getName(),
name, lh.getId(), markDeletePosition, readPosition);
final LedgerHandle oldLedger = cursorLedger;
cursorLedger = lh;
isCursorLedgerReadOnly = false;
cursorLedgerStat = stat;
// At this point the position had already been safely markdeleted
callback.operationComplete();
asyncDeleteLedger(oldLedger);
}
@Override
public void operationFailed(MetaStoreException e) {
log.warn("[{}] Failed to update consumer {}", ledger.getName(), name, e);
callback.operationFailed(e);
}
}, false);
}
/**
*
* @return Whether the cursor responded to the notification
*/
void notifyEntriesAvailable() {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Received ml notification", ledger.getName(), name);
}
OpReadEntry opReadEntry = WAITING_READ_OP_UPDATER.getAndSet(this, null);
if (opReadEntry != null) {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Received notification of new messages persisted, reading at {} -- last: {}",
ledger.getName(), name, opReadEntry.readPosition, ledger.lastConfirmedEntry);
log.debug("[{}] Consumer {} cursor notification: other counters: consumed {} mdPos {} rdPos {}",
ledger.getName(), name, messagesConsumedCounter, markDeletePosition, readPosition);
}
PENDING_READ_OPS_UPDATER.incrementAndGet(this);
opReadEntry.readPosition = (PositionImpl) getReadPosition();
ledger.asyncReadEntries(opReadEntry);
} else {
// No one is waiting to be notified. Ignore
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Received notification but had no pending read operation", ledger.getName(), name);
}
}
}
void asyncCloseCursorLedger(final AsyncCallbacks.CloseCallback callback, final Object ctx) {
LedgerHandle lh = cursorLedger;
ledger.mbean.startCursorLedgerCloseOp();
log.info("[{}] [{}] Closing metadata ledger {}", ledger.getName(), name, lh.getId());
lh.asyncClose(new CloseCallback() {
@Override
public void closeComplete(int rc, LedgerHandle lh, Object ctx) {
ledger.mbean.endCursorLedgerCloseOp();
if (rc == BKException.Code.OK) {
callback.closeComplete(ctx);
} else {
callback.closeFailed(createManagedLedgerException(rc), ctx);
}
}
}, ctx);
}
void decrementPendingMarkDeleteCount() {
if (PENDING_MARK_DELETED_SUBMITTED_COUNT_UPDATER.decrementAndGet(this) == 0) {
final State state = STATE_UPDATER.get(this);
if (state == State.SwitchingLedger) {
// A metadata ledger switch was pending and now we can do it since we don't have any more
// outstanding mark-delete requests
createNewMetadataLedger();
}
}
}
void readOperationCompleted() {
if (PENDING_READ_OPS_UPDATER.decrementAndGet(this) == 0) {
synchronized (pendingMarkDeleteOps) {
if (STATE_UPDATER.get(this) == State.Open) {
// Flush the pending writes only if the state is open.
flushPendingMarkDeletes();
} else if (PENDING_MARK_DELETED_SUBMITTED_COUNT_UPDATER.get(this) != 0) {
log.info(
"[{}] read operation completed and cursor was closed. need to call any queued cursor close",
name);
}
}
}
}
void asyncDeleteLedger(final LedgerHandle lh) {
asyncDeleteLedger(lh, DEFAULT_LEDGER_DELETE_RETRIES);
}
private void asyncDeleteLedger(final LedgerHandle lh, int retry) {
if (lh == null || retry <= 0) {
if (lh != null) {
log.warn("[{}-{}] Failed to delete ledger after retries {}", ledger.getName(), name, lh.getId());
}
return;
}
ledger.mbean.startCursorLedgerDeleteOp();
bookkeeper.asyncDeleteLedger(lh.getId(), (rc, ctx) -> {
ledger.mbean.endCursorLedgerDeleteOp();
if (rc != BKException.Code.OK) {
log.warn("[{}] Failed to delete ledger {}: {}", ledger.getName(), lh.getId(),
BKException.getMessage(rc));
if (!isNoSuchLedgerExistsException(rc)) {
ledger.getScheduledExecutor().schedule(safeRun(() -> asyncDeleteLedger(lh, retry - 1)),
DEFAULT_LEDGER_DELETE_BACKOFF_TIME_SEC, TimeUnit.SECONDS);
}
return;
} else {
log.info("[{}][{}] Successfully closed & deleted ledger {} in cursor", ledger.getName(), name,
lh.getId());
}
}, null);
}
void asyncDeleteCursorLedger() {
asyncDeleteCursorLedger(DEFAULT_LEDGER_DELETE_RETRIES);
}
private void asyncDeleteCursorLedger(int retry) {
STATE_UPDATER.set(this, State.Closed);
if (cursorLedger == null || retry <= 0) {
if (cursorLedger != null) {
log.warn("[{}-{}] Failed to delete ledger after retries {}", ledger.getName(), name,
cursorLedger.getId());
}
return;
}
ledger.mbean.startCursorLedgerDeleteOp();
bookkeeper.asyncDeleteLedger(cursorLedger.getId(), (rc, ctx) -> {
ledger.mbean.endCursorLedgerDeleteOp();
if (rc == BKException.Code.OK) {
log.info("[{}][{}] Deleted cursor ledger {}", ledger.getName(), name, cursorLedger.getId());
} else {
log.warn("[{}][{}] Failed to delete ledger {}: {}", ledger.getName(), name, cursorLedger.getId(),
BKException.getMessage(rc));
if (!isNoSuchLedgerExistsException(rc)) {
ledger.getScheduledExecutor().schedule(safeRun(() -> asyncDeleteCursorLedger(retry - 1)),
DEFAULT_LEDGER_DELETE_BACKOFF_TIME_SEC, TimeUnit.SECONDS);
}
}
}, null);
}
/**
* return BK error codes that are considered not likely to be recoverable.
*/
public static boolean isBkErrorNotRecoverable(int rc) {
switch (rc) {
case Code.NoSuchLedgerExistsException:
case Code.NoSuchLedgerExistsOnMetadataServerException:
case Code.ReadException:
case Code.LedgerRecoveryException:
case Code.NoSuchEntryException:
return true;
default:
return false;
}
}
/**
* If we fail to recover the cursor ledger, we want to still open the ML and rollback.
*
* @param info
*/
private PositionImpl getRollbackPosition(ManagedCursorInfo info) {
PositionImpl firstPosition = ledger.getFirstPosition();
PositionImpl snapshottedPosition = new PositionImpl(info.getMarkDeleteLedgerId(), info.getMarkDeleteEntryId());
if (firstPosition == null) {
// There are no ledgers in the ML, any position is good
return snapshottedPosition;
} else if (snapshottedPosition.compareTo(firstPosition) < 0) {
// The snapshotted position might be pointing to a ledger that was already deleted
return firstPosition;
} else {
return snapshottedPosition;
}
}
// / Expose internal values for debugging purpose
public int getPendingReadOpsCount() {
return PENDING_READ_OPS_UPDATER.get(this);
}
public long getMessagesConsumedCounter() {
return messagesConsumedCounter;
}
public long getCursorLedger() {
LedgerHandle lh = cursorLedger;
return lh != null ? lh.getId() : -1;
}
public long getCursorLedgerLastEntry() {
LedgerHandle lh = cursorLedger;
return lh != null ? lh.getLastAddConfirmed() : -1;
}
public String getIndividuallyDeletedMessages() {
lock.readLock().lock();
try {
return individualDeletedMessages.toString();
} finally {
lock.readLock().unlock();
}
}
@VisibleForTesting
public LongPairRangeSet<PositionImpl> getIndividuallyDeletedMessagesSet() {
return individualDeletedMessages;
}
public boolean isMessageDeleted(Position position) {
checkArgument(position instanceof PositionImpl);
return individualDeletedMessages.contains(((PositionImpl) position).getLedgerId(),
((PositionImpl) position).getEntryId()) || ((PositionImpl) position).compareTo(markDeletePosition) <= 0 ;
}
/**
* Checks given position is part of deleted-range and returns next position of upper-end as all the messages are
* deleted up to that point.
*
* @param position
* @return next available position
*/
public PositionImpl getNextAvailablePosition(PositionImpl position) {
Range<PositionImpl> range = individualDeletedMessages.rangeContaining(position.getLedgerId(), position.getEntryId());
if (range != null) {
PositionImpl nextPosition = range.upperEndpoint().getNext();
return (nextPosition != null && nextPosition.compareTo(position) > 0) ? nextPosition : position.getNext();
}
return position.getNext();
}
public Position getNextLedgerPosition(long currentLedgerId) {
Long nextExistingLedger = ledger.getNextValidLedger(currentLedgerId);
return nextExistingLedger != null ? PositionImpl.get(nextExistingLedger, 0) : null;
}
public boolean isIndividuallyDeletedEntriesEmpty() {
lock.readLock().lock();
try {
return individualDeletedMessages.isEmpty();
} finally {
lock.readLock().unlock();
}
}
public long getLastLedgerSwitchTimestamp() {
return lastLedgerSwitchTimestamp;
}
public String getState() {
return STATE_UPDATER.get(this).toString();
}
@Override
public double getThrottleMarkDelete() {
return this.markDeleteLimiter.getRate();
}
@Override
public void setThrottleMarkDelete(double throttleMarkDelete) {
if (throttleMarkDelete > 0.0) {
if (markDeleteLimiter == null) {
markDeleteLimiter = RateLimiter.create(throttleMarkDelete);
} else {
this.markDeleteLimiter.setRate(throttleMarkDelete);
}
} else {
// Disable mark-delete rate limiter
markDeleteLimiter = null;
}
}
@Override
public ManagedLedger getManagedLedger() {
return this.ledger;
}
@Override
public Range<PositionImpl> getLastIndividualDeletedRange() {
return individualDeletedMessages.lastRange();
}
@Override
public void trimDeletedEntries(List<Entry> entries) {
entries.removeIf(entry -> ((PositionImpl) entry.getPosition()).compareTo(markDeletePosition) <= 0
|| individualDeletedMessages.contains(entry.getLedgerId(), entry.getEntryId()));
}
private ManagedCursorImpl cursorImpl() {
return this;
}
@Override
public long[] getDeletedBatchIndexesAsLongArray(PositionImpl position) {
if (config.isDeletionAtBatchIndexLevelEnabled() && batchDeletedIndexes != null) {
BitSetRecyclable bitSet = batchDeletedIndexes.get(position);
return bitSet == null ? null : bitSet.toLongArray();
} else {
return null;
}
}
void updateReadStats(int readEntriesCount, long readEntriesSize) {
this.entriesReadCount += readEntriesCount;
this.entriesReadSize += readEntriesSize;
}
private int applyMaxSizeCap(int maxEntries, long maxSizeBytes) {
if (maxSizeBytes == NO_MAX_SIZE_LIMIT) {
return maxEntries;
}
double avgEntrySize = ledger.getStats().getEntrySizeAverage();
if (!Double.isFinite(avgEntrySize)) {
// We don't have yet any stats on the topic entries. Let's try to use the cursor avg size stats
avgEntrySize = (double) entriesReadSize / (double) entriesReadCount;
}
if (!Double.isFinite(avgEntrySize)) {
// If we still don't have any information, it means this is the first time we attempt reading
// and there are no writes. Let's start with 1 to avoid any overflow and start the avg stats
return 1;
}
int maxEntriesBasedOnSize = (int)(maxSizeBytes / avgEntrySize);
if (maxEntriesBasedOnSize < 1) {
// We need to read at least one entry
return 1;
}
return Math.min(maxEntriesBasedOnSize, maxEntries);
}
private static final Logger log = LoggerFactory.getLogger(ManagedCursorImpl.class);
}
|
package model;
public class ongs {
private int id;
private String nome;
private String nomeFantasia;
private Double conta;
private Double agencia;
private Double telefone;
public ongs(int id, String nome, String nomeFantasia, Double conta, Double agencia, Double telefone) {
super();
this.id = id;
this.nome = nome;
this.nomeFantasia = nomeFantasia;
this.conta = conta;
this.agencia = agencia;
this.telefone = telefone;
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public String getNome() {
return nome;
}
public void setNome(String nome) {
this.nome = nome;
}
public String getNomeFantasia() {
return nomeFantasia;
}
public void setNomeFantasia(String nomeFantasia) {
this.nomeFantasia = nomeFantasia;
}
public Double getConta() {
return conta;
}
public void setConta(Double conta) {
this.conta = conta;
}
public Double getAgencia() {
return agencia;
}
public void setAgencia(Double agencia) {
this.agencia = agencia;
}
public Double getTelefone() {
return telefone;
}
public void setTelefone(Double telefone) {
this.telefone = telefone;
}
}
|
package dev.davivieira.framework.adapters.output.h2.data;
import jakarta.persistence.*;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.NoArgsConstructor;
import org.eclipse.persistence.annotations.Convert;
import org.eclipse.persistence.annotations.Converter;
import java.io.Serializable;
import java.util.UUID;
@Getter
@AllArgsConstructor
@NoArgsConstructor
@Entity
@Table(name = "routers")
@SecondaryTable(name = "switches")
@MappedSuperclass
@Converter(name="uuidConverter", converterClass= UUIDTypeConverter.class)
public class RouterData implements Serializable {
@Id
@Column(name="router_id",
columnDefinition = "uuid",
updatable = false )
@Convert("uuidConverter")
private UUID routerId;
@Embedded
@Enumerated(EnumType.STRING)
@Column(name="router_type")
private RouterTypeData routerType;
@OneToOne(cascade = CascadeType.ALL)
@JoinColumn(table = "switches",
name = "router_id",
referencedColumnName = "router_id")
private SwitchData networkSwitch;
}
|
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.testsuite.arquillian;
import org.apache.commons.io.filefilter.WildcardFileFilter;
import org.apache.commons.lang.StringUtils;
import org.jboss.arquillian.container.spi.ContainerRegistry;
import org.jboss.arquillian.container.spi.client.container.LifecycleException;
import org.jboss.arquillian.container.spi.client.container.DeploymentException;
import org.jboss.arquillian.container.spi.event.StartContainer;
import org.jboss.arquillian.container.spi.event.StartSuiteContainers;
import org.jboss.arquillian.container.spi.event.StopContainer;
import org.jboss.arquillian.container.spi.event.container.AfterStart;
import org.jboss.arquillian.container.spi.event.container.BeforeStop;
import org.jboss.arquillian.container.test.api.ContainerController;
import org.jboss.arquillian.core.api.Event;
import org.jboss.arquillian.core.api.Instance;
import org.jboss.arquillian.core.api.InstanceProducer;
import org.jboss.arquillian.core.api.annotation.ApplicationScoped;
import org.jboss.arquillian.core.api.annotation.Inject;
import org.jboss.arquillian.core.api.annotation.Observes;
import org.jboss.arquillian.test.spi.annotation.ClassScoped;
import org.jboss.arquillian.test.spi.annotation.SuiteScoped;
import org.jboss.arquillian.test.spi.event.suite.AfterClass;
import org.jboss.arquillian.test.spi.event.suite.AfterSuite;
import org.jboss.arquillian.test.spi.event.suite.BeforeClass;
import org.jboss.arquillian.test.spi.event.suite.BeforeSuite;
import org.jboss.logging.Logger;
import org.jboss.shrinkwrap.api.ShrinkWrap;
import org.keycloak.admin.client.Keycloak;
import org.keycloak.common.util.StringPropertyReplacer;
import org.keycloak.representations.idm.RealmRepresentation;
import org.keycloak.services.error.KeycloakErrorHandler;
import org.keycloak.testsuite.arquillian.annotation.SetDefaultProvider;
import org.keycloak.testsuite.arquillian.annotation.UncaughtServerErrorExpected;
import org.keycloak.testsuite.arquillian.annotation.EnableVault;
import org.keycloak.testsuite.client.KeycloakTestingClient;
import org.keycloak.testsuite.util.LogChecker;
import org.keycloak.testsuite.util.OAuthClient;
import org.keycloak.testsuite.util.SpiProvidersSwitchingUtils;
import org.keycloak.testsuite.util.SqlUtils;
import org.keycloak.testsuite.util.SystemInfoHelper;
import org.keycloak.testsuite.util.VaultUtils;
import org.keycloak.testsuite.util.ServerURLs;
import org.keycloak.testsuite.util.TextFileChecker;
import org.wildfly.extras.creaper.core.ManagementClient;
import org.wildfly.extras.creaper.core.online.OnlineManagementClient;
import org.wildfly.extras.creaper.core.online.OnlineOptions;
import org.wildfly.extras.creaper.core.online.operations.Address;
import org.wildfly.extras.creaper.core.online.operations.Operations;
import org.wildfly.extras.creaper.core.online.operations.admin.Administration;
import java.io.File;
import java.io.FileFilter;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.security.Provider;
import java.security.Security;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import javax.ws.rs.NotFoundException;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import org.jboss.arquillian.test.spi.event.suite.After;
import org.jboss.arquillian.test.spi.event.suite.Before;
import org.jboss.shrinkwrap.api.importer.ZipImporter;
import org.jboss.shrinkwrap.api.spec.JavaArchive;
import org.jboss.shrinkwrap.resolver.api.maven.Maven;
import org.junit.Assert;
import org.w3c.dom.Document;
import org.w3c.dom.NodeList;
import org.xml.sax.SAXException;
import static org.keycloak.testsuite.arquillian.ServerTestEnricherUtil.addHttpsListener;
import static org.keycloak.testsuite.arquillian.ServerTestEnricherUtil.reloadOrRestartTimeoutClient;
import static org.keycloak.testsuite.arquillian.ServerTestEnricherUtil.removeHttpsListener;
import static org.keycloak.testsuite.util.ServerURLs.getAuthServerContextRoot;
import static org.keycloak.testsuite.util.ServerURLs.removeDefaultPorts;
/**
*
* @author tkyjovsk
* @author vramik
*/
public class AuthServerTestEnricher {
protected static final Logger log = Logger.getLogger(AuthServerTestEnricher.class);
@Inject
private Instance<ContainerController> containerConroller;
@Inject
private Instance<ContainerRegistry> containerRegistry;
@Inject
private Event<StartContainer> startContainerEvent;
@Inject
private Event<StopContainer> stopContainerEvent;
private JavaArchive testsuiteProvidersArchive;
private String currentContainerName;
public static final String AUTH_SERVER_CONTAINER_DEFAULT = "auth-server-undertow";
public static final String AUTH_SERVER_CONTAINER_PROPERTY = "auth.server.container";
public static final String AUTH_SERVER_CONTAINER = System.getProperty(AUTH_SERVER_CONTAINER_PROPERTY, AUTH_SERVER_CONTAINER_DEFAULT);
public static final String AUTH_SERVER_BACKEND_DEFAULT = AUTH_SERVER_CONTAINER + "-backend";
public static final String AUTH_SERVER_BACKEND_PROPERTY = "auth.server.backend";
public static final String AUTH_SERVER_BACKEND = System.getProperty(AUTH_SERVER_BACKEND_PROPERTY, AUTH_SERVER_BACKEND_DEFAULT);
public static final String AUTH_SERVER_LEGACY = "auth-server-legacy";
public static final String AUTH_SERVER_BALANCER_DEFAULT = "auth-server-balancer";
public static final String AUTH_SERVER_BALANCER_PROPERTY = "auth.server.balancer";
public static final String AUTH_SERVER_BALANCER = System.getProperty(AUTH_SERVER_BALANCER_PROPERTY, AUTH_SERVER_BALANCER_DEFAULT);
public static final String AUTH_SERVER_CLUSTER_PROPERTY = "auth.server.cluster";
public static final boolean AUTH_SERVER_CLUSTER = Boolean.parseBoolean(System.getProperty(AUTH_SERVER_CLUSTER_PROPERTY, "false"));
public static final String AUTH_SERVER_CROSS_DC_PROPERTY = "auth.server.crossdc";
public static final boolean AUTH_SERVER_CROSS_DC = Boolean.parseBoolean(System.getProperty(AUTH_SERVER_CROSS_DC_PROPERTY, "false"));
public static final String AUTH_SERVER_HOME_PROPERTY = "auth.server.home";
public static final String CACHE_SERVER_LIFECYCLE_SKIP_PROPERTY = "cache.server.lifecycle.skip";
public static final boolean CACHE_SERVER_LIFECYCLE_SKIP = Boolean.parseBoolean(System.getProperty(CACHE_SERVER_LIFECYCLE_SKIP_PROPERTY, "false"));
private static final String MIGRATION_MODE_PROPERTY = "migration.mode";
private static final String MIGRATION_MODE_AUTO = "auto";
private static final String MIGRATION_MODE_MANUAL = "manual";
public static final Boolean START_MIGRATION_CONTAINER = MIGRATION_MODE_AUTO.equals(System.getProperty(MIGRATION_MODE_PROPERTY)) ||
MIGRATION_MODE_MANUAL.equals(System.getProperty(MIGRATION_MODE_PROPERTY));
@Inject
@SuiteScoped
private InstanceProducer<SuiteContext> suiteContextProducer;
private SuiteContext suiteContext;
@Inject
@ApplicationScoped // needed in AdapterTestExecutionDecider
private InstanceProducer<TestContext> testContextProducer;
@Inject
@ClassScoped
private InstanceProducer<OAuthClient> oAuthClientProducer;
public static boolean isAuthServerRemote() {
return AUTH_SERVER_CONTAINER.equals("auth-server-remote");
}
public static boolean isAuthServerQuarkus() {
return AUTH_SERVER_CONTAINER.equals("auth-server-quarkus");
}
public static String getHttpAuthServerContextRoot() {
String host = System.getProperty("auth.server.host", "localhost");
int httpPort = Integer.parseInt(System.getProperty("auth.server.http.port")); // property must be set
return removeDefaultPorts(String.format("%s://%s:%s", "http", host, httpPort));
}
public static String getHttpsAuthServerContextRoot() {
String host = System.getProperty("auth.server.host", "localhost");
int httpPort = Integer.parseInt(System.getProperty("auth.server.https.port")); // property must be set
return removeDefaultPorts(String.format("%s://%s:%s", "https", host, httpPort));
}
public static String getAuthServerBrowserContextRoot() throws MalformedURLException {
return getAuthServerBrowserContextRoot(new URL(getAuthServerContextRoot()));
}
public static String getAuthServerBrowserContextRoot(URL contextRoot) {
String browserHost = System.getProperty("auth.server.browserHost");
if (StringUtils.isEmpty(browserHost)) {
browserHost = contextRoot.getHost();
}
return String.format("%s://%s%s", contextRoot.getProtocol(), browserHost,
contextRoot.getPort() == -1 || contextRoot.getPort() == contextRoot.getDefaultPort()
? ""
: ":" + contextRoot.getPort());
}
public static OnlineManagementClient getManagementClient() {
try {
return ManagementClient.online(OnlineOptions
.standalone()
.hostAndPort(System.getProperty("auth.server.management.host", "localhost"), Integer.parseInt(System.getProperty("auth.server.management.port", "10090")))
.auth("admin", "admin")
.build()
);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public void distinguishContainersInConsoleOutput(@Observes(precedence = 5) StartContainer event) {
log.info("************************" + event.getContainer().getName()
+ "*****************************************************************************");
currentContainerName = event.getContainer().getName();
}
public void initializeSuiteContext(@Observes(precedence = 2) BeforeSuite event) {
Set<ContainerInfo> containers = containerRegistry.get().getContainers().stream()
.map(ContainerInfo::new)
.collect(Collectors.toSet());
suiteContext = new SuiteContext(containers);
if (AUTH_SERVER_CROSS_DC) {
// if cross-dc mode enabled, load-balancer is the frontend of datacenter cluster
containers.stream()
.filter(c -> c.getQualifier().startsWith(AUTH_SERVER_BALANCER + "-cross-dc"))
.forEach(c -> {
String portOffsetString = c.getArquillianContainer().getContainerConfiguration().getContainerProperties().getOrDefault("bindHttpPortOffset", "0");
String dcString = c.getArquillianContainer().getContainerConfiguration().getContainerProperties().getOrDefault("dataCenter", "0");
updateWithAuthServerInfo(c, Integer.valueOf(portOffsetString));
suiteContext.addAuthServerInfo(Integer.valueOf(dcString), c);
});
if (suiteContext.getDcAuthServerInfo().isEmpty()) {
throw new IllegalStateException("Not found frontend container (load balancer): " + AUTH_SERVER_BALANCER);
}
if (suiteContext.getDcAuthServerInfo().stream().anyMatch(Objects::isNull)) {
throw new IllegalStateException("Frontend container (load balancer) misconfiguration");
}
containers.stream()
.filter(c -> c.getQualifier().startsWith("auth-server-" + System.getProperty("node.name") + "-"))
.sorted((a, b) -> a.getQualifier().compareTo(b.getQualifier()))
.forEach(c -> {
String portOffsetString = c.getArquillianContainer().getContainerConfiguration().getContainerProperties().getOrDefault("bindHttpPortOffset", "0");
updateWithAuthServerInfo(c, Integer.valueOf(portOffsetString));
String dcString = c.getArquillianContainer().getContainerConfiguration().getContainerProperties().getOrDefault("dataCenter", "0");
suiteContext.addAuthServerBackendsInfo(Integer.valueOf(dcString), c);
});
containers.stream()
.filter(c -> c.getQualifier().startsWith("cache-server-"))
.sorted((a, b) -> a.getQualifier().compareTo(b.getQualifier()))
.forEach(containerInfo -> {
log.info(String.format("cache container: %s", containerInfo.getQualifier()));
int prefixSize = containerInfo.getQualifier().lastIndexOf("-") + 1;
int dcIndex = Integer.parseInt(containerInfo.getQualifier().substring(prefixSize)) - 1;
suiteContext.addCacheServerInfo(dcIndex, containerInfo);
});
if (suiteContext.getDcAuthServerInfo().isEmpty()) {
throw new RuntimeException(String.format("No auth server container matching '%s' found in arquillian.xml.", AUTH_SERVER_BACKEND));
}
if (suiteContext.getDcAuthServerBackendsInfo().stream().anyMatch(Objects::isNull)) {
throw new IllegalStateException("Frontend container (load balancer) misconfiguration");
}
if (suiteContext.getDcAuthServerBackendsInfo().stream().anyMatch(List::isEmpty)) {
throw new RuntimeException(String.format("Some data center has no auth server container matching '%s' defined in arquillian.xml.", AUTH_SERVER_BACKEND));
}
if (suiteContext.getCacheServersInfo().isEmpty() && !CACHE_SERVER_LIFECYCLE_SKIP) {
throw new IllegalStateException("Cache containers misconfiguration");
}
log.info("Using frontend containers: " + this.suiteContext.getDcAuthServerInfo().stream()
.map(ContainerInfo::getQualifier)
.collect(Collectors.joining(", ")));
} else if (AUTH_SERVER_CLUSTER) {
// if cluster mode enabled, load-balancer is the frontend
ContainerInfo container = containers.stream()
.filter(c -> c.getQualifier().startsWith(AUTH_SERVER_BALANCER))
.findAny()
.orElseThrow(() -> new IllegalStateException("Not found frontend container: " + AUTH_SERVER_BALANCER));
updateWithAuthServerInfo(container);
suiteContext.setAuthServerInfo(container);
containers.stream()
.filter(c -> c.getQualifier().startsWith(AUTH_SERVER_BACKEND))
.sorted((a, b) -> a.getQualifier().compareTo(b.getQualifier())) // ordering is expected by the cluster tests
.forEach(c -> {
int portOffset = Integer.parseInt(c.getQualifier().substring(AUTH_SERVER_BACKEND.length()));
updateWithAuthServerInfo(c, portOffset);
suiteContext.addAuthServerBackendsInfo(0, c);
});
if (Boolean.parseBoolean(System.getProperty("auth.server.jboss.legacy"))) {
ContainerInfo legacy = containers.stream()
.filter(c -> c.getQualifier().startsWith(AUTH_SERVER_LEGACY))
.findAny()
.orElseThrow(() -> new IllegalStateException("Not found legacy container: " + AUTH_SERVER_LEGACY));
updateWithAuthServerInfo(legacy, 500);
suiteContext.setLegacyAuthServerInfo(legacy);
}
if (suiteContext.getAuthServerBackendsInfo().isEmpty()) {
throw new RuntimeException(String.format("No auth server container matching '%s' found in arquillian.xml.", AUTH_SERVER_BACKEND));
}
log.info("Using frontend container: " + container.getQualifier());
} else {
// frontend-only
ContainerInfo container = containers.stream()
.filter(c -> c.getQualifier().startsWith(AUTH_SERVER_CONTAINER))
.findAny()
.orElseThrow(() -> new IllegalStateException("Not found frontend container: " + AUTH_SERVER_CONTAINER));
updateWithAuthServerInfo(container);
suiteContext.setAuthServerInfo(container);
}
if (START_MIGRATION_CONTAINER) {
// init migratedAuthServerInfo
for (ContainerInfo container : suiteContext.getContainers()) {
// migrated auth server
if (container.getQualifier().equals("auth-server-jboss-migration")) {
updateWithAuthServerInfo(container);
suiteContext.setMigratedAuthServerInfo(container);
}
}
// validate setup
if (suiteContext.getMigratedAuthServerInfo() == null) {
throw new RuntimeException(String.format("Migration test was enabled but no auth server from which to migrate was activated. "
+ "A container matching auth-server-jboss-migration needs to be enabled in arquillian.xml."));
}
}
suiteContextProducer.set(suiteContext);
CrossDCTestEnricher.initializeSuiteContext(suiteContext);
log.info("\n\n" + suiteContext);
log.info("\n\n" + SystemInfoHelper.getSystemInfo());
// Remove all map storages present in target directory
// This is useful for example in intellij where target directory is not removed between test runs
File dir = new File(System.getProperty("project.build.directory", "target"));
FileFilter fileFilter = new WildcardFileFilter("map-*.json");
File[] files = dir.listFiles(fileFilter);
if (files != null) {
for (File f : files) {
f.delete();
}
}
}
public static void executeCli(String... commands) throws Exception {
OnlineManagementClient client = AuthServerTestEnricher.getManagementClient();
Administration administration = new Administration(client);
for (String c : commands) {
client.execute(c).assertSuccess();
}
administration.reload();
client.close();
}
private ContainerInfo updateWithAuthServerInfo(ContainerInfo authServerInfo) {
return updateWithAuthServerInfo(authServerInfo, 0);
}
private ContainerInfo updateWithAuthServerInfo(ContainerInfo authServerInfo, int clusterPortOffset) {
try {
URL contextRoot = new URL(getAuthServerContextRoot(clusterPortOffset));
authServerInfo.setContextRoot(contextRoot);
authServerInfo.setBrowserContextRoot(new URL(getAuthServerBrowserContextRoot(contextRoot)));
} catch (MalformedURLException ex) {
throw new IllegalArgumentException(ex);
}
return authServerInfo;
}
public void startMigratedContainer(@Observes(precedence = 3) StartSuiteContainers event) {
if (suiteContext.isAuthServerMigrationEnabled()) {
log.info("\n\n### Starting keycloak " + System.getProperty("migrated.auth.server.version", "- previous") + " ###\n\n");
startContainerEvent.fire(new StartContainer(suiteContext.getMigratedAuthServerInfo().getArquillianContainer()));
initializeTLS(suiteContext.getMigratedAuthServerInfo());
}
}
public void deployProviders(@Observes(precedence = -1) AfterStart event) throws DeploymentException {
if (isAuthServerRemote() && currentContainerName.contains("auth-server")) {
this.testsuiteProvidersArchive = ShrinkWrap.create(ZipImporter.class, "testsuiteProviders.jar")
.importFrom(Maven.configureResolverViaPlugin()
.resolve("org.keycloak.testsuite:integration-arquillian-testsuite-providers")
.withoutTransitivity()
.asSingleFile()
).as(JavaArchive.class)
.addAsManifestResource("jboss-deployment-structure.xml");
event.getDeployableContainer().deploy(testsuiteProvidersArchive);
}
}
public void unDeployProviders(@Observes(precedence = 20) BeforeStop event) throws DeploymentException {
if (testsuiteProvidersArchive != null) {
event.getDeployableContainer().undeploy(testsuiteProvidersArchive);
}
}
public void runPreMigrationTask(@Observes(precedence = 2) StartSuiteContainers event) throws Exception {
if (suiteContext.isAuthServerMigrationEnabled()) {
log.info("\n\n### Run preMigration task on keycloak " + System.getProperty("migrated.auth.server.version", "- previous") + " ###\n\n");
suiteContext.getMigrationContext().runPreMigrationTask();
}
}
public void stopMigratedContainer(@Observes(precedence = 1) StartSuiteContainers event) {
if (suiteContext.isAuthServerMigrationEnabled()) {
log.info("## STOP old container: " + suiteContext.getMigratedAuthServerInfo().getQualifier());
stopContainerEvent.fire(new StopContainer(suiteContext.getMigratedAuthServerInfo().getArquillianContainer()));
}
}
public void startAuthContainer(@Observes(precedence = 0) StartSuiteContainers event) {
//frontend-only (either load-balancer or auth-server)
log.debug("Starting auth server before suite");
try {
startContainerEvent.fire(new StartContainer(suiteContext.getAuthServerInfo().getArquillianContainer()));
} catch (Exception e) {
// It is expected that server startup fails with migration-mode-manual
if (e instanceof LifecycleException && handleManualMigration()) {
log.info("Set log file checker to end of file.");
try {
// this will mitigate possible issues in manual server update tests
// when the auth server started with not updated DB
// e.g. Caused by: org.keycloak.ServerStartupError: Database not up-to-date, please migrate database with
if (suiteContext.getServerLogChecker() == null) {
setServerLogChecker();
}
suiteContext.getServerLogChecker()
.updateLastCheckedPositionsOfAllFilesToEndOfFile();
} catch (IOException ioe) {
log.warn("Server log checker failed to update position:", ioe);
}
log.info("Starting server again after manual DB migration was finished");
startContainerEvent.fire(new StartContainer(suiteContext.getAuthServerInfo().getArquillianContainer()));
return;
}
// Just re-throw the exception
throw e;
}
}
/**
* Returns true if we are in manual DB migration test and if the previously created SQL script was successfully executed.
* Returns false if we are not in manual DB migration test or SQL script couldn't be executed for any reason.
* @return see method description
*/
private boolean handleManualMigration() {
// It is expected that server startup fails with migration-mode-manual
if (!MIGRATION_MODE_MANUAL.equals(System.getProperty(MIGRATION_MODE_PROPERTY))) {
return false;
}
String authServerHome = System.getProperty(AUTH_SERVER_HOME_PROPERTY);
if (authServerHome == null) {
log.warnf("Property '%s' was missing during manual mode migration test", AUTH_SERVER_HOME_PROPERTY);
return false;
}
String sqlScriptPath = authServerHome + File.separator + "keycloak-database-update.sql";
if (!new File(sqlScriptPath).exists()) {
log.warnf("File '%s' didn't exists during manual mode migration test", sqlScriptPath);
return false;
}
// Run manual migration with the ant task
log.infof("Running SQL script created by liquibase during manual migration flow", sqlScriptPath);
String prefix = "keycloak.connectionsJpa.";
String jdbcDriver = System.getProperty(prefix + "driver");
String dbUrl = StringPropertyReplacer.replaceProperties(System.getProperty(prefix + "url"));
String dbUser = System.getProperty(prefix + "user");
String dbPassword = System.getProperty(prefix + "password");
SqlUtils.runSqlScript(sqlScriptPath, jdbcDriver, dbUrl, dbUser, dbPassword);
return true;
}
private static final Pattern RECOGNIZED_ERRORS = Pattern.compile("ERROR \\[|SEVERE \\[|Exception ");
private static final Pattern IGNORED = Pattern.compile("Jetty ALPN support not found|org.keycloak.events");
private static final boolean isRecognizedErrorLog(String logText) {
//There is expected string "Exception" in server log: Adding provider
//singleton org.keycloak.services.resources.ModelExceptionMapper
return RECOGNIZED_ERRORS.matcher(logText).find() && ! IGNORED.matcher(logText).find();
}
private static final void failOnRecognizedErrorInLog(Stream<String> logStream) {
Optional<String> anyRecognizedError = logStream.filter(AuthServerTestEnricher::isRecognizedErrorLog).findAny();
if (anyRecognizedError.isPresent()) {
throw new RuntimeException(String.format("Server log file contains ERROR: '%s'", anyRecognizedError.get()));
}
}
private void setServerLogChecker() throws IOException {
String jbossHomePath = suiteContext.getAuthServerInfo().getProperties().get("jbossHome");
suiteContext.setServerLogChecker(LogChecker.getJBossServerLogsChecker(jbossHomePath));
}
public void checkServerLogs(@Observes(precedence = -1) BeforeSuite event) throws IOException, InterruptedException {
if (! suiteContext.getAuthServerInfo().isJBossBased()) {
suiteContext.setServerLogChecker(new TextFileChecker()); // checks nothing
return;
}
if (suiteContext.getServerLogChecker() == null) {
setServerLogChecker();
}
boolean checkLog = Boolean.parseBoolean(System.getProperty("auth.server.log.check", "true"));
if (checkLog) {
suiteContext.getServerLogChecker()
.checkFiles(true, AuthServerTestEnricher::failOnRecognizedErrorInLog);
}
}
public void restartAuthServer() throws Exception {
if (isAuthServerRemote()) {
try (OnlineManagementClient client = getManagementClient()) {
int timeoutInSec = Integer.getInteger(System.getProperty("auth.server.jboss.startup.timeout"), 300);
Administration administration = new Administration(client, timeoutInSec);
administration.reload();
}
} else {
stopContainerEvent.fire(new StopContainer(suiteContext.getAuthServerInfo().getArquillianContainer()));
startContainerEvent.fire(new StartContainer(suiteContext.getAuthServerInfo().getArquillianContainer()));
}
}
public void initializeTestContext(@Observes(precedence = 2) BeforeClass event) throws Exception {
TestContext testContext = new TestContext(suiteContext, event.getTestClass().getJavaClass());
testContextProducer.set(testContext);
if (!isAuthServerRemote()) {
boolean wasUpdated = false;
if (event.getTestClass().isAnnotationPresent(SetDefaultProvider.class)) {
SetDefaultProvider defaultProvider = event.getTestClass().getAnnotation(SetDefaultProvider.class);
if (defaultProvider.beforeEnableFeature()) {
SpiProvidersSwitchingUtils.addProviderDefaultValue(suiteContext, defaultProvider);
wasUpdated = true;
}
}
if (event.getTestClass().isAnnotationPresent(EnableVault.class)) {
VaultUtils.enableVault(suiteContext, event.getTestClass().getAnnotation(EnableVault.class).providerId());
wasUpdated = true;
}
if (wasUpdated) {
restartAuthServer();
testContext.reconnectAdminClient();
}
}
}
public void initializeTLS(@Observes(precedence = 3) BeforeClass event) throws Exception {
// TLS for Undertow is configured in KeycloakOnUndertow since it requires
// SSLContext while initializing HTTPS handlers
if (!suiteContext.isAuthServerCrossDc() && !suiteContext.isAuthServerCluster()) {
initializeTLS(suiteContext.getAuthServerInfo());
}
}
public static void initializeTLS(ContainerInfo containerInfo) {
if (ServerURLs.AUTH_SERVER_SSL_REQUIRED && containerInfo.isJBossBased()) {
log.infof("\n\n### Setting up TLS for %s ##\n\n", containerInfo);
try (OnlineManagementClient client = getManagementClient(containerInfo)) {
AuthServerTestEnricher.enableTLS(client);
} catch (Exception e) {
log.warn("Failed to set up TLS for container '" + containerInfo.getQualifier() + "'. This may lead to unexpected behavior unless the test" +
" sets it up manually", e);
}
}
}
/** KEYCLOAK-15692 Work-around the OpenJSSE TlsMasterSecretGenerator error:
*
* https://github.com/openjsse/openjsse/issues/11
*
* To prevent above TLS handshake error when initiating a TLS connection
* ensure:
* * Either both server and client endpoints of the future TLS connection
* simultaneously utilize a JSSE security provider using the OpenJSSE
* extension,
*
* * Or both server and client endpoints simultaneously use a JSSE
* security provider, which doesn't depend on the OpenJSSE extension.
*
* Do this by performing the following:
* * On platforms where implementation of the SunJSSE provider depends on
* OpenJSSE extension ensure only SunJSSE provider is used to define the
* SSL context of the Elytron client used for outbound SSL connections.
*
* * On other platforms, use any suitable JSSE provider by querying all
* the platform providers for respective property.
*
*/
public static void setJsseSecurityProviderForOutboundSslConnectionsOfElytronClient(@Observes(precedence = 100) StartSuiteContainers event) {
log.info(
"Determining the JSSE security provider to use for outbound " +
"SSL/TLS connections of the Elytron client"
);
// Use path to wildfly-config.xml directly if specified
String wildflyConfigXmlPath =
System.getProperty("wildfly-client.config.path");
// Otherwise scan the classpath to determine its location
if (wildflyConfigXmlPath == null) {
log.debug("Scanning classpath to locate wildfly-config.xml");
final String javaClassPath = System.getProperty("java.class.path");
for (String dir : javaClassPath.split(File.pathSeparator)) {
if (!dir.isEmpty()) {
String candidatePath = dir + File.separator +
"wildfly-config.xml";
if (new File(candidatePath).exists()) {
wildflyConfigXmlPath = candidatePath;
log.debugf(
"Found wildfly-config.xml at '%s' location",
wildflyConfigXmlPath
);
break;
}
}
}
}
final File wildflyConfigXml = ( wildflyConfigXmlPath != null ) ?
new File(wildflyConfigXmlPath) :
null;
// Throw an error if wildfly-config.xml path specified directly via the
// 'wildfly-client.config.path' property doesn't represent a regular file
// on the file system, or if it wasn't found by scanning the classpath
if ( wildflyConfigXml == null || ! wildflyConfigXml.exists() ) {
throw new RuntimeException(
"Failed to locate the wildfly-config.xml to use for " +
"the configuration of Elytron client"
);
} else {
log.debugf(
"Using wildfly-config.xml from '%s' location",
wildflyConfigXmlPath
);
}
/** Determine the name of the system property from wildfly-config.xml
* holding the name of the security provider which is used by Elytron
* client to define its SSL context for outbound SSL connections.
*/
String jsseSecurityProviderSystemProperty = null;
try {
DocumentBuilder documentBuilder = DocumentBuilderFactory
.newInstance().newDocumentBuilder();
Document xmlDoc = documentBuilder.parse(wildflyConfigXml);
NodeList nodeList = xmlDoc.getElementsByTagName("provider-name");
// Sanity check
if (nodeList.getLength() != 1) {
throw new RuntimeException(
"Failed to locate the 'provider-name' element " +
"in wildfly-config.xml XML file"
);
}
String providerNameElement = nodeList.item(0).getAttributes()
.getNamedItem("name").getNodeValue();
// Drop Wildfly's expressions notation from the attribute's value
jsseSecurityProviderSystemProperty = providerNameElement
.replaceAll("(\\$|\\{|\\}|(:.*$))", new String());
} catch (IOException e) {
throw new RuntimeException(String.format(
"Error reading the '%s' file. Please make sure the provided " +
"path is correct and retry",
wildflyConfigXml.getAbsolutePath()
));
} catch (ParserConfigurationException|SAXException e) {
throw new RuntimeException(String.format(
"Failed to parse the '%s' XML file",
wildflyConfigXml.getAbsolutePath()
));
}
boolean determineJsseSecurityProviderName = false;
if (jsseSecurityProviderSystemProperty != null) {
// Does JSSE security provider system property already exist?
if (
System.getProperty(jsseSecurityProviderSystemProperty) == null
) {
// If not, determine it
determineJsseSecurityProviderName = true;
}
} else {
throw new RuntimeException(
"Failed to determine the name of system property " +
"holding JSSE security provider's name for Elytron client"
);
}
if (determineJsseSecurityProviderName) {
/** Detect if OpenJSSE extension is present on the platform
*
* Since internal 'com.sun.net.ssl.*' classes of the SunJSSE
* provider have identical names regardless if the OpenJSSE
* extension is used or not:
*
* https://github.com/openjsse/openjsse/blob/master/pom.xml#L125
*
* detect the presence of the OpenJSSE extension by checking the
* presence of the 'openjsse.jar' file within the JRE extensions
* directory.
*
*/
final String jreExtensionsDir = System.getProperty("java.home") +
File.separator + "lib" + File.separator + "ext" +
File.separator + "openjsse.jar";
boolean openJsseExtensionPresent = new File(
jreExtensionsDir).exists();
Provider platformJsseProvider = Security
.getProviders("SSLContext.TLSv1.2")[0];
if (platformJsseProvider != null) {
// If OpenJSSE extension is present
if (openJsseExtensionPresent) {
// Sanity check - confirm SunJSSE provider is present on
// the platform (if OpenJSSE extension is present, it
// shouldn't ever happen SunJSSE won't be, but double-check
// for any case)
Provider sunJsseProvider = Stream.of(
Security.getProviders()
).filter(p -> p.getName().equals("SunJSSE"))
.collect(Collectors.toList())
.get(0);
// Use it or throw an error if absent
if (sunJsseProvider != null) {
platformJsseProvider = sunJsseProvider;
} else {
throw new RuntimeException(
"The SunJSSE provider is not present " +
"on the platform"
);
}
}
// Propagate the final provider name to system property used by
// wildfly-config.xml to configure the JSSE provider name
System.setProperty(
jsseSecurityProviderSystemProperty,
platformJsseProvider.getName()
);
} else {
throw new RuntimeException(
"Cannot identify a security provider for Elytron client " +
"offering the TLSv1.2 capability"
);
}
log.infof(
"Using the '%s' JSSE provider", platformJsseProvider.getName()
);
}
}
private static OnlineManagementClient getManagementClient(ContainerInfo containerInfo) {
try {
return ManagementClient.online(OnlineOptions
.standalone()
.hostAndPort("localhost", Integer.parseInt(containerInfo.getProperties().get("managementPort")))
.build()
);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private static void enableTLS(OnlineManagementClient client) throws Exception {
Administration administration = new Administration(client);
Operations operations = new Operations(client);
if(!operations.exists(Address.coreService("management").and("security-realm", "UndertowRealm"))) {
client.execute("/core-service=management/security-realm=UndertowRealm:add()");
client.execute("/core-service=management/security-realm=UndertowRealm/server-identity=ssl:add(keystore-relative-to=jboss.server.config.dir,keystore-password=secret,keystore-path=keycloak.jks");
client.execute("/core-service=management/security-realm=UndertowRealm/authentication=truststore:add(keystore-relative-to=jboss.server.config.dir,keystore-password=secret,keystore-path=keycloak.truststore");
removeHttpsListener(client, administration);
addHttpsListener(client);
reloadOrRestartTimeoutClient(administration);
} else {
log.info("## The Auth Server has already configured TLS. Skipping ##");
}
}
protected boolean isAuthServerJBossBased() {
return containerRegistry.get().getContainers().stream()
.map(ContainerInfo::new)
.anyMatch(ContainerInfo::isJBossBased);
}
public void initializeOAuthClient(@Observes(precedence = 4) BeforeClass event) {
// TODO workaround. Check if can be removed
OAuthClient.updateURLs(suiteContext.getAuthServerInfo().getContextRoot().toString());
OAuthClient oAuthClient = new OAuthClient();
oAuthClientProducer.set(oAuthClient);
}
public void beforeTest(@Observes(precedence = 100) Before event) throws IOException {
suiteContext.getServerLogChecker().updateLastCheckedPositionsOfAllFilesToEndOfFile();
}
public void startTestClassProvider(@Observes(precedence = 1) BeforeSuite beforeSuite) {
TestClassProvider testClassProvider = new TestClassProvider();
testClassProvider.start();
suiteContext.setTestClassProvider(testClassProvider);
}
public void stopTestClassProvider(@Observes(precedence = -1) AfterSuite afterSuite) {
suiteContext.getTestClassProvider().stop();
}
private static final Pattern UNEXPECTED_UNCAUGHT_ERROR = Pattern.compile(
KeycloakErrorHandler.class.getSimpleName()
+ ".*"
+ Pattern.quote(KeycloakErrorHandler.UNCAUGHT_SERVER_ERROR_TEXT)
+ "[\\s:]*(.*)$"
);
private void checkForNoUnexpectedUncaughtError(Stream<String> logStream) {
Optional<Matcher> anyUncaughtError = logStream.map(UNEXPECTED_UNCAUGHT_ERROR::matcher).filter(Matcher::find).findAny();
if (anyUncaughtError.isPresent()) {
Matcher m = anyUncaughtError.get();
Assert.fail("Uncaught server error detected: " + m.group(1));
}
}
public void afterTest(@Observes(precedence = -1) After event) throws IOException {
if (event.getTestMethod().getAnnotation(UncaughtServerErrorExpected.class) == null) {
suiteContext.getServerLogChecker().checkFiles(false, this::checkForNoUnexpectedUncaughtError);
}
}
public void afterClass(@Observes(precedence = 1) AfterClass event) throws Exception {
//check if a test accidentally left the auth-server not running
ContainerController controller = containerConroller.get();
if (!controller.isStarted(suiteContext.getAuthServerInfo().getQualifier())) {
log.warn("Auth server wasn't running. Starting " + suiteContext.getAuthServerInfo().getQualifier());
controller.start(suiteContext.getAuthServerInfo().getQualifier());
}
TestContext testContext = testContextProducer.get();
Keycloak adminClient = testContext.getAdminClient();
KeycloakTestingClient testingClient = testContext.getTestingClient();
removeTestRealms(testContext, adminClient);
if (!isAuthServerRemote()) {
boolean wasUpdated = false;
if (event.getTestClass().isAnnotationPresent(SetDefaultProvider.class)) {
SpiProvidersSwitchingUtils.resetProvider(suiteContext, event.getTestClass().getAnnotation(SetDefaultProvider.class));
wasUpdated = true;
}
if (event.getTestClass().isAnnotationPresent(EnableVault.class) && !isAuthServerQuarkus()) {
VaultUtils.disableVault(suiteContext, event.getTestClass().getAnnotation(EnableVault.class).providerId());
wasUpdated = true;
}
if (wasUpdated) {
restartAuthServer();
testContext.reconnectAdminClient();
}
}
if (adminClient != null) {
adminClient.close();
}
if (testingClient != null) {
testingClient.close();
}
}
public static void removeTestRealms(TestContext testContext, Keycloak adminClient) {
List<RealmRepresentation> testRealmReps = testContext.getTestRealmReps();
if (testRealmReps != null && !testRealmReps.isEmpty()) {
log.info("removing test realms after test class");
StringBuilder realms = new StringBuilder();
for (RealmRepresentation testRealm : testRealmReps) {
try {
adminClient.realms().realm(testRealm.getRealm()).remove();
realms.append(testRealm.getRealm()).append(", ");
} catch (NotFoundException e) {
// Ignore
}
}
log.info("removed realms: " + realms);
}
}
}
|
/*
* Copyright © 2014 - 2020 Leipzig University (Database Research Group)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradoop.temporal.model.impl.operators.tostring.functions;
import org.gradoop.flink.model.impl.operators.tostring.functions.ElementToDataString;
import org.gradoop.temporal.model.impl.pojo.TemporalElement;
/**
* Superclass of data-bases string representations of temporal elements,
* i.e. such including label, properties and valid time.
*
* @param <EL> temporal element type
*/
public class TemporalElementToDataString<EL extends TemporalElement> extends ElementToDataString<EL> {
/**
* Represents the time fields of the input element as a comma separated string.
* Default times result in an empty string for this field.
*
* @param element temporal element
* @return valid time and transaction time as of element as string
*/
protected String time(EL element) {
StringBuilder builder = new StringBuilder();
builder.append('(');
Long time = element.getValidFrom();
if (!time.equals(TemporalElement.DEFAULT_TIME_FROM)) {
builder.append(time);
}
builder.append(',');
time = element.getValidTo();
if (!time.equals(TemporalElement.DEFAULT_TIME_TO)) {
builder.append(time);
}
builder.append(')');
return builder.toString();
}
}
|
/*
* RTaskType.java
*
* Copyright (C) 2010-2016, Microsoft Corporation
*
* This program is licensed to you under the terms of Version 2.0 of the
* Apache License. This program is distributed WITHOUT
* ANY EXPRESS OR IMPLIED WARRANTY, INCLUDING THOSE OF NON-INFRINGEMENT,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Please refer to the
* Apache License 2.0 (http://www.apache.org/licenses/LICENSE-2.0) for more details.
*
*/
package com.revo.deployr.client.broker;
/**
* <p>
* Defines the currently supported set of
* {@link com.revo.deployr.client.broker.RTask}.
* </p>
* Each {@link com.revo.deployr.client.broker.RTaskResult}
* identifies it's type on
* {@link com.revo.deployr.client.broker.RTaskResult#getType}.
*/
public enum RTaskType {
/**
* Discrete task.
*/
DISCRETE,
/**
* Pooled task.
*/
POOLED,
/**
* Background task.
*/
BACKGROUND
}
|
package com.example.android.todolist.database;
import android.arch.persistence.room.Database;
import android.arch.persistence.room.Room;
import android.arch.persistence.room.RoomDatabase;
import android.arch.persistence.room.TypeConverters;
import android.content.Context;
import android.util.Log;
@Database(entities = {TaskEntry.class}, version = 1, exportSchema = false)
@TypeConverters(DateConverter.class)
public abstract class AppDatabase extends RoomDatabase {
private static final String LOG_TAG = AppDatabase.class.getSimpleName();
private static final Object LOCK = new Object();
private static final String DATABASE_NAME = "todolist";
private static AppDatabase sInstance;
public static AppDatabase getInstance(Context context) {
if (sInstance == null) {
synchronized (LOCK) {
Log.d(LOG_TAG, "Creating new database instance");
sInstance = Room.databaseBuilder(context.getApplicationContext(),
AppDatabase.class, AppDatabase.DATABASE_NAME)
// COMPLETED (2) call allowMainThreadQueries before building the instance
// Queries should be done in a separate thread to avoid locking the UI
// We will allow this only TEMPORALLY to see that our DB is working
.allowMainThreadQueries()
.build();
}
}
Log.d(LOG_TAG, "Getting the database instance");
return sInstance;
}
public abstract TaskDao taskDao();
}
|
package com.redhat.fuse.boosters.rest.http;
import org.apache.camel.Header;
import org.springframework.stereotype.Service;
//import com.coh.greetinglibrary2.GreetingStarter;
@Service("greetingsService")
public class GreetingsServiceImpl implements GreetingsService {
private static final String THE_GREETINGS = "Hello, ";
@Override
public Greetings getGreetings(@Header("name") String name ) {
return new Greetings( THE_GREETINGS + ", " + name );
}
}
|
package com.orhanobut.wasp;
import java.lang.reflect.Array;
import java.lang.reflect.GenericArrayType;
import java.lang.reflect.GenericDeclaration;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.lang.reflect.TypeVariable;
import java.lang.reflect.WildcardType;
import java.util.Arrays;
import java.util.NoSuchElementException;
/**
* This class is copied from retrofit
*/
final class RetroTypes {
private static final Type[] EMPTY_TYPE_ARRAY = new Type[0];
private RetroTypes() {
// No instances.
}
public static Class<?> getRawType(Type type) {
if (type instanceof Class<?>) {
// Type is a normal class.
return (Class<?>) type;
} else if (type instanceof ParameterizedType) {
ParameterizedType parameterizedType = (ParameterizedType) type;
// I'm not exactly sure why getRawType() returns Type instead of Class. Neal isn't either but
// suspects some pathological case related to nested classes exists.
Type rawType = parameterizedType.getRawType();
if (!(rawType instanceof Class)) throw new IllegalArgumentException();
return (Class<?>) rawType;
} else if (type instanceof GenericArrayType) {
Type componentType = ((GenericArrayType) type).getGenericComponentType();
return Array.newInstance(getRawType(componentType), 0).getClass();
} else if (type instanceof TypeVariable) {
// We could use the variable's bounds, but that won't work if there are multiple. Having a raw
// type that's more general than necessary is okay.
return Object.class;
} else if (type instanceof WildcardType) {
return getRawType(((WildcardType) type).getUpperBounds()[0]);
} else {
String className = type == null ? "null" : type.getClass().getName();
throw new IllegalArgumentException("Expected a Class, ParameterizedType, or "
+ "GenericArrayType, but <" + type + "> is of type " + className);
}
}
/**
* Returns true if {@code a} and {@code b} are equal.
*/
public static boolean equals(Type a, Type b) {
if (a == b) {
return true; // Also handles (a == null && b == null).
} else if (a instanceof Class) {
return a.equals(b); // Class already specifies equals().
} else if (a instanceof ParameterizedType) {
if (!(b instanceof ParameterizedType)) return false;
ParameterizedType pa = (ParameterizedType) a;
ParameterizedType pb = (ParameterizedType) b;
return equal(pa.getOwnerType(), pb.getOwnerType())
&& pa.getRawType().equals(pb.getRawType())
&& Arrays.equals(pa.getActualTypeArguments(), pb.getActualTypeArguments());
} else if (a instanceof GenericArrayType) {
if (!(b instanceof GenericArrayType)) return false;
GenericArrayType ga = (GenericArrayType) a;
GenericArrayType gb = (GenericArrayType) b;
return equals(ga.getGenericComponentType(), gb.getGenericComponentType());
} else if (a instanceof WildcardType) {
if (!(b instanceof WildcardType)) return false;
WildcardType wa = (WildcardType) a;
WildcardType wb = (WildcardType) b;
return Arrays.equals(wa.getUpperBounds(), wb.getUpperBounds())
&& Arrays.equals(wa.getLowerBounds(), wb.getLowerBounds());
} else if (a instanceof TypeVariable) {
if (!(b instanceof TypeVariable)) return false;
TypeVariable<?> va = (TypeVariable<?>) a;
TypeVariable<?> vb = (TypeVariable<?>) b;
return va.getGenericDeclaration() == vb.getGenericDeclaration()
&& va.getName().equals(vb.getName());
} else {
return false; // This isn't a type we support!
}
}
/**
* Returns the generic supertype for {@code supertype}. For example, given a class {@code
* IntegerSet}, the result for when supertype is {@code Set.class} is {@code Set<Integer>} and the
* result when the supertype is {@code Collection.class} is {@code Collection<Integer>}.
*/
static Type getGenericSupertype(Type context, Class<?> rawType, Class<?> toResolve) {
if (toResolve == rawType) return context;
// We skip searching through interfaces if unknown is an interface.
if (toResolve.isInterface()) {
Class<?>[] interfaces = rawType.getInterfaces();
for (int i = 0, length = interfaces.length; i < length; i++) {
if (interfaces[i] == toResolve) {
return rawType.getGenericInterfaces()[i];
} else if (toResolve.isAssignableFrom(interfaces[i])) {
return getGenericSupertype(rawType.getGenericInterfaces()[i], interfaces[i], toResolve);
}
}
}
// Check our supertypes.
if (!rawType.isInterface()) {
while (rawType != Object.class) {
Class<?> rawSupertype = rawType.getSuperclass();
if (rawSupertype == toResolve) {
return rawType.getGenericSuperclass();
} else if (toResolve.isAssignableFrom(rawSupertype)) {
return getGenericSupertype(rawType.getGenericSuperclass(), rawSupertype, toResolve);
}
rawType = rawSupertype;
}
}
// We can't resolve this further.
return toResolve;
}
private static int indexOf(Object[] array, Object toFind) {
for (int i = 0; i < array.length; i++) {
if (toFind.equals(array[i])) return i;
}
throw new NoSuchElementException();
}
private static boolean equal(Object a, Object b) {
return a == b || (a != null && a.equals(b));
}
private static int hashCodeOrZero(Object o) {
return o != null ? o.hashCode() : 0;
}
public static String typeToString(Type type) {
return type instanceof Class ? ((Class<?>) type).getName() : type.toString();
}
/**
* Returns the generic form of {@code supertype}. For example, if this is {@code
* ArrayList<String>}, this returns {@code Iterable<String>} given the input {@code
* Iterable.class}.
*
* @param supertype a superclass of, or interface implemented by, this.
*/
public static Type getSupertype(Type context, Class<?> contextRawType, Class<?> supertype) {
if (!supertype.isAssignableFrom(contextRawType)) throw new IllegalArgumentException();
return resolve(context, contextRawType,
getGenericSupertype(context, contextRawType, supertype));
}
public static Type resolve(Type context, Class<?> contextRawType, Type toResolve) {
// This implementation is made a little more complicated in an attempt to avoid object-creation.
while (true) {
if (toResolve instanceof TypeVariable) {
TypeVariable<?> typeVariable = (TypeVariable<?>) toResolve;
toResolve = resolveTypeVariable(context, contextRawType, typeVariable);
if (toResolve == typeVariable) {
return toResolve;
}
} else if (toResolve instanceof Class && ((Class<?>) toResolve).isArray()) {
Class<?> original = (Class<?>) toResolve;
Type componentType = original.getComponentType();
Type newComponentType = resolve(context, contextRawType, componentType);
return componentType == newComponentType ? original : new GenericArrayTypeImpl(
newComponentType);
} else if (toResolve instanceof GenericArrayType) {
GenericArrayType original = (GenericArrayType) toResolve;
Type componentType = original.getGenericComponentType();
Type newComponentType = resolve(context, contextRawType, componentType);
return componentType == newComponentType ? original : new GenericArrayTypeImpl(
newComponentType);
} else if (toResolve instanceof ParameterizedType) {
ParameterizedType original = (ParameterizedType) toResolve;
Type ownerType = original.getOwnerType();
Type newOwnerType = resolve(context, contextRawType, ownerType);
boolean changed = newOwnerType != ownerType;
Type[] args = original.getActualTypeArguments();
for (int t = 0, length = args.length; t < length; t++) {
Type resolvedTypeArgument = resolve(context, contextRawType, args[t]);
if (resolvedTypeArgument != args[t]) {
if (!changed) {
args = args.clone();
changed = true;
}
args[t] = resolvedTypeArgument;
}
}
return changed
? new ParameterizedTypeImpl(newOwnerType, original.getRawType(), args)
: original;
} else if (toResolve instanceof WildcardType) {
WildcardType original = (WildcardType) toResolve;
Type[] originalLowerBound = original.getLowerBounds();
Type[] originalUpperBound = original.getUpperBounds();
if (originalLowerBound.length == 1) {
Type lowerBound = resolve(context, contextRawType, originalLowerBound[0]);
if (lowerBound != originalLowerBound[0]) {
return new WildcardTypeImpl(new Type[]{Object.class}, new Type[]{lowerBound});
}
} else if (originalUpperBound.length == 1) {
Type upperBound = resolve(context, contextRawType, originalUpperBound[0]);
if (upperBound != originalUpperBound[0]) {
return new WildcardTypeImpl(new Type[]{upperBound}, EMPTY_TYPE_ARRAY);
}
}
return original;
} else {
return toResolve;
}
}
}
private static Type resolveTypeVariable(
Type context, Class<?> contextRawType, TypeVariable<?> unknown) {
Class<?> declaredByRaw = declaringClassOf(unknown);
// We can't reduce this further.
if (declaredByRaw == null) return unknown;
Type declaredBy = getGenericSupertype(context, contextRawType, declaredByRaw);
if (declaredBy instanceof ParameterizedType) {
int index = indexOf(declaredByRaw.getTypeParameters(), unknown);
return ((ParameterizedType) declaredBy).getActualTypeArguments()[index];
}
return unknown;
}
/**
* Returns the declaring class of {@code typeVariable}, or {@code null} if it was not declared by
* a class.
*/
private static Class<?> declaringClassOf(TypeVariable<?> typeVariable) {
GenericDeclaration genericDeclaration = typeVariable.getGenericDeclaration();
return genericDeclaration instanceof Class ? (Class<?>) genericDeclaration : null;
}
private static void checkNotPrimitive(Type type) {
if (type instanceof Class<?> && ((Class<?>) type).isPrimitive()) {
throw new IllegalArgumentException();
}
}
private static final class ParameterizedTypeImpl implements ParameterizedType {
private final Type ownerType;
private final Type rawType;
private final Type[] typeArguments;
public ParameterizedTypeImpl(Type ownerType, Type rawType, Type... typeArguments) {
// Require an owner type if the raw type needs it.
if (rawType instanceof Class<?>
&& (ownerType == null) != (((Class<?>) rawType).getEnclosingClass() == null)) {
throw new IllegalArgumentException();
}
this.ownerType = ownerType;
this.rawType = rawType;
this.typeArguments = typeArguments.clone();
for (Type typeArgument : this.typeArguments) {
if (typeArgument == null) throw new NullPointerException();
checkNotPrimitive(typeArgument);
}
}
@Override
public Type[] getActualTypeArguments() {
return typeArguments.clone();
}
@Override
public Type getRawType() {
return rawType;
}
@Override
public Type getOwnerType() {
return ownerType;
}
@Override
public boolean equals(Object other) {
return other instanceof ParameterizedType
&& RetroTypes.equals(this, (ParameterizedType) other);
}
@Override
public int hashCode() {
return Arrays.hashCode(typeArguments) ^ rawType.hashCode() ^ hashCodeOrZero(ownerType);
}
@Override
public String toString() {
StringBuilder result = new StringBuilder(30 * (typeArguments.length + 1));
result.append(typeToString(rawType));
if (typeArguments.length == 0) return result.toString();
result.append("<").append(typeToString(typeArguments[0]));
for (int i = 1; i < typeArguments.length; i++) {
result.append(", ").append(typeToString(typeArguments[i]));
}
return result.append(">").toString();
}
}
private static final class GenericArrayTypeImpl implements GenericArrayType {
private final Type componentType;
public GenericArrayTypeImpl(Type componentType) {
this.componentType = componentType;
}
@Override
public Type getGenericComponentType() {
return componentType;
}
@Override
public boolean equals(Object o) {
return o instanceof GenericArrayType
&& RetroTypes.equals(this, (GenericArrayType) o);
}
@Override
public int hashCode() {
return componentType.hashCode();
}
@Override
public String toString() {
return typeToString(componentType) + "[]";
}
}
/**
* The WildcardType interface supports multiple upper bounds and multiple
* lower bounds. We only support what the Java 6 language needs - at most one
* bound. If a lower bound is set, the upper bound must be Object.class.
*/
private static final class WildcardTypeImpl implements WildcardType {
private final Type upperBound;
private final Type lowerBound;
public WildcardTypeImpl(Type[] upperBounds, Type[] lowerBounds) {
if (lowerBounds.length > 1) throw new IllegalArgumentException();
if (upperBounds.length != 1) throw new IllegalArgumentException();
if (lowerBounds.length == 1) {
if (lowerBounds[0] == null) throw new NullPointerException();
checkNotPrimitive(lowerBounds[0]);
if (upperBounds[0] != Object.class) throw new IllegalArgumentException();
this.lowerBound = lowerBounds[0];
this.upperBound = Object.class;
} else {
if (upperBounds[0] == null) throw new NullPointerException();
checkNotPrimitive(upperBounds[0]);
this.lowerBound = null;
this.upperBound = upperBounds[0];
}
}
@Override
public Type[] getUpperBounds() {
return new Type[]{upperBound};
}
@Override
public Type[] getLowerBounds() {
return lowerBound != null ? new Type[]{lowerBound} : EMPTY_TYPE_ARRAY;
}
@Override
public boolean equals(Object other) {
return other instanceof WildcardType && RetroTypes.equals(this, (WildcardType) other);
}
@Override
public int hashCode() {
// This equals Arrays.hashCode(getLowerBounds()) ^ Arrays.hashCode(getUpperBounds()).
return (lowerBound != null ? 31 + lowerBound.hashCode() : 1) ^ (31 + upperBound.hashCode());
}
@Override
public String toString() {
if (lowerBound != null) return "? super " + typeToString(lowerBound);
if (upperBound == Object.class) return "?";
return "? extends " + typeToString(upperBound);
}
}
}
|
package com.github.lapesd.hdtss;
import com.github.lapesd.hdtss.controller.execution.SparqlExecutor;
import io.micronaut.context.event.ApplicationEventListener;
import io.micronaut.runtime.server.event.ServerStartupEvent;
import jakarta.inject.Singleton;
import lombok.Value;
import org.checkerframework.checker.nullness.qual.NonNull;
@Singleton @Value
public class LateInitializer implements ApplicationEventListener<ServerStartupEvent> {
@NonNull SparqlExecutor sparqlExecutor;
@Override public void onApplicationEvent(ServerStartupEvent ignored) {
sparqlExecutor.dispatcher().init();
}
}
|
package com.logginghub.logging.frontend.charting;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import com.logginghub.logging.LogEvent;
import com.logginghub.logging.LogEventMultiplexer;
import com.logginghub.logging.frontend.analysis.ChunkedResultMultiplexer;
import com.logginghub.logging.frontend.analysis.ResultKeyBuilder;
import com.logginghub.logging.frontend.analysis.SimpleMatcher;
import com.logginghub.logging.frontend.analysis.TimeChunkingGenerator;
import com.logginghub.logging.frontend.charting.model.ChartingModel;
import com.logginghub.logging.frontend.charting.model.ParserModel;
import com.logginghub.logging.frontend.charting.model.TimeChunkerModel;
import com.logginghub.logging.listeners.LogEventListener;
import com.logginghub.logging.messaging.PatternModel;
import com.logginghub.logging.utils.ValueStripper2;
import com.logginghub.utils.observable.ObservableList;
import com.logginghub.utils.observable.ObservableListListener;
/**
* Manages the relationships between the {@link ChartingModel} and the various helper classes that
* do all the data processing {@link ValueStripper2}, {@link ResultKeyBuilder} and
* {@link TimeChunkingGenerator} etc. It also binds the visual charting elements to the
* {@link ChartingModel} pages and charts.
*
* @author James
*
*/
public class ChartingController implements LogEventListener {
private ChartingModel model;
/**
* List of generators - this need to receive clear messages
*/
List<TimeChunkingGenerator> generators = new CopyOnWriteArrayList<TimeChunkingGenerator>();
/**
* The entities that listen to the raw logging stream
*/
private LogEventMultiplexer logEventMultiplexer = new LogEventMultiplexer();
/**
* Connection point into ChunkedResultHandlers - ie the charts themselves will plug in here
*/
private ChunkedResultMultiplexer chunkedResultMultiplexer = new ChunkedResultMultiplexer();
public ChartingController(ChartingModel model) {
this.model = model;
setupInternalStructures(model);
}
public ChunkedResultMultiplexer getChunkedResultMultiplexer() {
return chunkedResultMultiplexer;
}
private void setupInternalStructures(ChartingModel model) {
// Build the time chunkers
ObservableList<TimeChunkerModel> timeChunkers = model.getTimeChunkers();
timeChunkers.addListenerAndNotifyCurrent(new ObservableListListener<TimeChunkerModel>() {
@Override public void onAdded(TimeChunkerModel timeChunkerModel) {
TimeChunkingGenerator timeChunkingGenerator = createTimeChunkingGenerator(timeChunkerModel);
timeChunkingGenerator.addChunkedResultHandler(chunkedResultMultiplexer);
timeChunkerModel.setCounterpart(timeChunkingGenerator);
generators.add(timeChunkingGenerator);
}
@Override public void onRemoved(TimeChunkerModel timeChunkerModel, int index) {
TimeChunkingGenerator timeChunkingGenerator = timeChunkerModel.getCounterpart();
timeChunkingGenerator.removeChunkedResultHandler(chunkedResultMultiplexer);
generators.remove(timeChunkingGenerator);
}
@Override public void onCleared() {}
});
}
protected TimeChunkingGenerator createTimeChunkingGenerator(final TimeChunkerModel timeChunkerModel) {
final TimeChunkingGenerator generator = new TimeChunkingGenerator(timeChunkerModel.getInterval().get());
timeChunkerModel.getParserModels().addListenerAndNotifyCurrent(new ObservableListListener<ParserModel>() {
@Override public void onRemoved(ParserModel parserModel, int index) {
ResultKeyBuilder resultKeyBuilder = parserModel.getCounterpart();
resultKeyBuilder.removeResultListener(generator);
}
@Override public void onCleared() {}
@Override public void onAdded(ParserModel parserModel) {
ResultKeyBuilder resultKeyBuilder = createResultKeyBuilder(parserModel);
resultKeyBuilder.addResultListener(generator);
}
});
return generator;
}
protected ResultKeyBuilder createResultKeyBuilder(ParserModel parserModel) {
String format = parserModel.getFormat().get();
final ResultKeyBuilder resultKeyBuilder = new ResultKeyBuilder(format);
parserModel.setCounterpart(resultKeyBuilder);
parserModel.getPatterns().addListenerAndNotifyCurrent(new ObservableListListener<PatternModel>() {
@Override public void onRemoved(PatternModel t, int index) {
ValueStripper2 valueStripper = t.getCounterpart();
valueStripper.removeResultListener(resultKeyBuilder);
}
@Override public void onCleared() {}
@Override public void onAdded(PatternModel t) {
ValueStripper2 valueStripper = createValueStripper(t, resultKeyBuilder);
t.setCounterpart(valueStripper);
}
});
return resultKeyBuilder;
}
protected ValueStripper2 createValueStripper(PatternModel patternModel, ValueStripper2.ValueStripper2ResultListener resultListener) {
String pattern = patternModel.getPattern().get();
boolean debug = patternModel.getDebug().get();
boolean cleanUp = patternModel.getCleanUp().get();
ValueStripper2 stripper = new ValueStripper2();
stripper.setPattern(pattern);
stripper.setDebug(debug);
if (stripper.getLabels().isEmpty()) {
// Hmm no labels set, this could be a simple pattern
String name = patternModel.getName().get();
if (name != null) {
SimpleMatcher matcher = new SimpleMatcher(pattern, name);
matcher.addResultListener(resultListener);
addEventListener(matcher);
}
else {
// TODO : this is an xml error, might need to reformat it?
throw new RuntimeException(String.format("Your parsers config looks broken : the pattern was '%s'; it didn't contain any {labels}, and it didn't have a name='' attribute either - you have to have one or more {labels} or a name element to build a matcher",
pattern));
}
}
else {
stripper.addResultListener(resultListener);
addEventListener(stripper);
}
return stripper;
}
private void addEventListener(LogEventListener matcher) {
logEventMultiplexer.addLogEventListener(matcher);
}
public ChartingModel getModel() {
return model;
}
@Override public void onNewLogEvent(LogEvent event) {
logEventMultiplexer.onNewLogEvent(event);
}
public void flush() {
for (TimeChunkingGenerator timeChunkingGenerator : generators) {
timeChunkingGenerator.flush();
}
}
}
|
/*
* Copyright (c) 2004-2022, University of Oslo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of the HISP project nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.hisp.dhis.mapgeneration;
import static com.google.common.base.Preconditions.checkNotNull;
import java.awt.*;
import java.awt.image.BufferedImage;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.hisp.dhis.analytics.AnalyticsFinancialYearStartKey;
import org.hisp.dhis.analytics.AnalyticsService;
import org.hisp.dhis.common.BaseAnalyticalObject;
import org.hisp.dhis.common.Grid;
import org.hisp.dhis.commons.filter.FilterUtils;
import org.hisp.dhis.i18n.I18nManager;
import org.hisp.dhis.mapping.Map;
import org.hisp.dhis.mapping.MapView;
import org.hisp.dhis.organisationunit.OrganisationUnit;
import org.hisp.dhis.organisationunit.OrganisationUnitService;
import org.hisp.dhis.period.Period;
import org.hisp.dhis.setting.SettingKey;
import org.hisp.dhis.setting.SystemSettingManager;
import org.hisp.dhis.system.filter.OrganisationUnitWithCoordinatesFilter;
import org.hisp.dhis.user.CurrentUserService;
import org.hisp.dhis.user.User;
import org.springframework.stereotype.Service;
import org.springframework.util.Assert;
/**
* An implementation of MapGenerationService that uses GeoTools to generate
* maps.
*
* @author Kenneth Solbø Andersen <kennetsa@ifi.uio.no>
* @author Kristin Simonsen <krissimo@ifi.uio.no>
* @author Kjetil Andresen <kjetand@ifi.uio.no>
* @author Olai Solheim <olais@ifi.uio.no>
*/
@Service( "org.hisp.dhis.mapgeneration.MapGenerationService" )
public class GeoToolsMapGenerationService
implements MapGenerationService
{
// -------------------------------------------------------------------------
// Dependencies
// -------------------------------------------------------------------------
private final OrganisationUnitService organisationUnitService;
private final AnalyticsService analyticsService;
private final CurrentUserService currentUserService;
private final SystemSettingManager systemSettingManager;
private final I18nManager i18nManager;
public GeoToolsMapGenerationService( OrganisationUnitService organisationUnitService,
AnalyticsService analyticsService, CurrentUserService currentUserService,
SystemSettingManager systemSettingManager, I18nManager i18nManager )
{
checkNotNull( organisationUnitService );
checkNotNull( analyticsService );
checkNotNull( currentUserService );
checkNotNull( systemSettingManager );
checkNotNull( i18nManager );
this.organisationUnitService = organisationUnitService;
this.analyticsService = analyticsService;
this.currentUserService = currentUserService;
this.systemSettingManager = systemSettingManager;
this.i18nManager = i18nManager;
}
// -------------------------------------------------------------------------
// MapGenerationService implementation
// -------------------------------------------------------------------------
@Override
public BufferedImage generateMapImage( MapView mapView )
{
Map map = new Map();
map.getMapViews().add( mapView );
return generateMapImage( map );
}
@Override
public BufferedImage generateMapImage( Map map )
{
return generateMapImage( map, new Date(), null, 512, null );
}
@Override
public BufferedImage generateMapImage( Map map, Date date, OrganisationUnit unit, Integer width, Integer height )
{
return generateMapImageForUser( map, date, unit, width, height, currentUserService.getCurrentUser() );
}
@Override
public BufferedImage generateMapImageForUser( Map map, Date date, OrganisationUnit unit, Integer width,
Integer height, User user )
{
Assert.isTrue( map != null, "Map cannot be null" );
if ( width == null && height == null )
{
width = MapUtils.DEFAULT_MAP_WIDTH;
}
InternalMap internalMap = new InternalMap();
List<MapView> mapViews = new ArrayList<>( map.getMapViews() );
Collections.reverse( mapViews );
for ( MapView mapView : mapViews )
{
InternalMapLayer mapLayer = getSingleInternalMapLayer( mapView, user, date );
if ( mapLayer != null )
{
internalMap.getLayers().add( mapLayer );
}
}
if ( internalMap.getLayers().isEmpty() )
{
return null;
}
InternalMapLayer dataLayer = internalMap.getFirstDataLayer();
BufferedImage mapImage = MapUtils.render( internalMap, width, height );
if ( dataLayer == null )
{
mapViews.forEach( BaseAnalyticalObject::clearTransientState );
return mapImage;
}
else
{
LegendSet legendSet = new LegendSet( dataLayer );
BufferedImage legendImage = legendSet.render( i18nManager.getI18nFormat() );
BufferedImage titleImage = MapUtils.renderTitle( map.getName(), getImageWidth( legendImage, mapImage ) );
mapViews.forEach( BaseAnalyticalObject::clearTransientState );
return combineLegendAndMapImages( titleImage, legendImage, mapImage );
}
}
// -------------------------------------------------------------------------
// Internal
// -------------------------------------------------------------------------
private static final String DEFAULT_COLOR_HIGH = "#ff0000";
private static final String DEFAULT_COLOR_LOW = "#ffff00";
private static final float DEFAULT_OPACITY = 0.75f;
private static final Integer DEFAULT_RADIUS_HIGH = 35;
private static final Integer DEFAULT_RADIUS_LOW = 15;
private InternalMapLayer getSingleInternalMapLayer( MapView mapView, User user, Date date )
{
if ( mapView == null )
{
return null;
}
List<OrganisationUnit> atLevels = new ArrayList<>();
List<OrganisationUnit> inGroups = new ArrayList<>();
if ( mapView.hasOrganisationUnitLevels() )
{
atLevels.addAll( organisationUnitService.getOrganisationUnitsAtLevels( mapView.getOrganisationUnitLevels(),
mapView.getOrganisationUnits() ) );
}
if ( mapView.hasItemOrganisationUnitGroups() )
{
inGroups.addAll( organisationUnitService.getOrganisationUnits( mapView.getItemOrganisationUnitGroups(),
mapView.getOrganisationUnits() ) );
}
mapView.init( user, date, null, atLevels, inGroups, null );
List<OrganisationUnit> organisationUnits = mapView.getAllOrganisationUnits();
FilterUtils.filter( organisationUnits, new OrganisationUnitWithCoordinatesFilter() );
java.util.Map<String, OrganisationUnit> uidOuMap = new HashMap<>();
for ( OrganisationUnit ou : organisationUnits )
{
uidOuMap.put( ou.getUid(), ou );
}
String name = mapView.getName();
Period period = null;
if ( !mapView.getPeriods().isEmpty() ) // TODO integrate with
// BaseAnalyticalObject
{
period = mapView.getPeriods().get( 0 );
}
else if ( mapView.getRelatives() != null )
{
AnalyticsFinancialYearStartKey financialYearStart = systemSettingManager
.getSystemSetting( SettingKey.ANALYTICS_FINANCIAL_YEAR_START, AnalyticsFinancialYearStartKey.class );
period = mapView.getRelatives().getRelativePeriods( date, null, false, financialYearStart ).get( 0 );
}
Integer radiusLow = mapView.getRadiusLow() != null ? mapView.getRadiusLow() : DEFAULT_RADIUS_LOW;
Integer radiusHigh = mapView.getRadiusHigh() != null ? mapView.getRadiusHigh() : DEFAULT_RADIUS_HIGH;
// Get the low and high colors, typically in hexadecimal form, e.g.
// #ff3200
Color colorLow = MapUtils
.createColorFromString( StringUtils.trimToNull( mapView.getColorLow() ) != null ? mapView.getColorLow()
: DEFAULT_COLOR_LOW );
Color colorHigh = MapUtils
.createColorFromString( StringUtils.trimToNull( mapView.getColorHigh() ) != null ? mapView.getColorHigh()
: DEFAULT_COLOR_HIGH );
float opacity = mapView.getOpacity() != null ? mapView.getOpacity().floatValue() : DEFAULT_OPACITY;
boolean hasLegendSet = mapView.hasLegendSet();
// Create and setup an internal layer
InternalMapLayer mapLayer = new InternalMapLayer();
mapLayer.setName( name );
mapLayer.setPeriod( period );
mapLayer.setMethod( mapView.getMethod() );
mapLayer.setLayer( mapView.getLayer() );
mapLayer.setRadiusLow( radiusLow );
mapLayer.setRadiusHigh( radiusHigh );
mapLayer.setColorLow( colorLow );
mapLayer.setColorHigh( colorHigh );
mapLayer.setOpacity( opacity );
mapLayer.setClasses( mapView.getClasses() );
if ( !mapView.isDataLayer() ) // Boundary (and facility) layer
{
for ( OrganisationUnit unit : organisationUnits )
{
mapLayer.addBoundaryMapObject( unit );
}
}
else // Thematic layer
{
Collection<MapValue> mapValues = getAggregatedMapValues( mapView );
if ( mapValues.isEmpty() )
{
return null;
}
// Build and set the internal GeoTools map objects for the layer
for ( MapValue mapValue : mapValues )
{
OrganisationUnit orgUnit = uidOuMap.get( mapValue.getOu() );
if ( orgUnit != null )
{
mapLayer.addDataMapObject( mapValue.getValue(), orgUnit );
}
}
if ( !mapLayer.hasMapObjects() )
{
return null;
}
// Create an interval set for this map layer that distributes its
// map
// objects into their respective intervals
if ( hasLegendSet )
{
mapLayer.setIntervalSetFromLegendSet( mapView.getLegendSet() );
mapLayer.distributeAndUpdateMapObjectsInIntervalSet();
}
else
{
mapLayer.setAutomaticIntervalSet( mapLayer.getClasses() );
mapLayer.distributeAndUpdateMapObjectsInIntervalSet();
}
// Update the radius of each map object in this map layer according
// to
// its map object's highest and lowest values
mapLayer.applyInterpolatedRadii();
}
return mapLayer;
}
/**
* Returns a list of map values for the given map view. If the map view is
* not a data layer, an empty list is returned.
*/
private List<MapValue> getAggregatedMapValues( MapView mapView )
{
Grid grid = analyticsService.getAggregatedDataValues( mapView );
return getMapValues( grid );
}
/**
* Creates a list of aggregated map values.
*/
private List<MapValue> getMapValues( Grid grid )
{
List<MapValue> mapValues = new ArrayList<>();
for ( List<Object> row : grid.getRows() )
{
if ( row != null && row.size() >= 3 )
{
int ouIndex = row.size() - 2;
int valueIndex = row.size() - 1;
String ou = (String) row.get( ouIndex );
Double value = ((Number) row.get( valueIndex )).doubleValue();
mapValues.add( new MapValue( ou, value ) );
}
}
return mapValues;
}
private BufferedImage combineLegendAndMapImages( BufferedImage titleImage, BufferedImage legendImage,
BufferedImage mapImage )
{
Assert.notNull( titleImage, "Title image cannot be null" );
Assert.notNull( legendImage, "Legend image cannot be null" );
Assert.notNull( mapImage, "Map image cannot be null" );
// Create image, note that image height cannot be less than legend
int width = getImageWidth( legendImage, mapImage );
int height = Math.max( titleImage.getHeight() + mapImage.getHeight(), (legendImage.getHeight() + 1) );
BufferedImage finalImage = new BufferedImage( width, height, mapImage.getType() );
// Draw the two images onto the final image with the legend to the left
// and the map to the right
Graphics graphics = finalImage.getGraphics();
graphics.drawImage( titleImage, 0, 0, null );
graphics.drawImage( legendImage, 0, MapUtils.TITLE_HEIGHT, null );
graphics.drawImage( mapImage, legendImage.getWidth(), MapUtils.TITLE_HEIGHT, null );
return finalImage;
}
private int getImageWidth( BufferedImage legendImage, BufferedImage mapImage )
{
return (legendImage != null ? legendImage.getWidth() : 0) + (mapImage != null ? mapImage.getWidth() : 0);
}
}
|
/**
* Copyright 2020 Alibaba Group Holding Limited.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.maxgraph.groot.coordinator;
import com.alibaba.maxgraph.groot.common.SnapshotListener;
import com.alibaba.maxgraph.common.config.CommonConfig;
import com.alibaba.maxgraph.common.config.Configs;
import com.alibaba.maxgraph.common.config.CoordinatorConfig;
import com.alibaba.maxgraph.compiler.api.exception.MaxGraphException;
import com.alibaba.maxgraph.common.util.ThreadFactoryUtils;
import com.alibaba.maxgraph.groot.common.wal.LogReader;
import com.alibaba.maxgraph.groot.common.wal.LogService;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
/**
* SnapshotManager runs on a central node, e.g. Master/Coordinator. It is the core component of the
* snapshot protocol. SnapshotManager is designed for the following purpose:
* - increase writeSnapshotId, and broadcast to all Frontend nodes
* - accumulates ingest progresses from the WriteAgents, and calculate the available querySnapshotId, then
* broadcast querySnapshotId to all Frontend nodes
*
* There are two parameters we need to decide: Frontend nodes count and broadcast interval.
*
* For most cases, Frontend nodes won't be more than 100. I think we can assume the maximum count of
* Frontend nodes is 256, and use 256 Frontend nodes in the stress test.
*
* The broadcast interval affects the time user have to wait for querying the data after
* {@link com.alibaba.maxgraph.frontendservice.realtime.RealtimeWriter#writeOperations(OperationTxn)}
* returns successfully. Shorter broadcast interval means less time to wait, but that leads to
* heavier load of SnapshotManager, since SnapshotManager need to broadcast writeSnapshotId / querySnapshotId
* to all the Frontend nodes at the fixed interval.
*
* The default broadcast interval can be set to 1 second. User can increase the interval to reduce the load
* of SnapshotManager. If user want a shorter interval, I will suggest we provide a sync write interface
* rather than decrease the interval to milliseconds. The general idea is to let SnapshotManager broadcast
* new writeSnapshotId to Frontend nodes on demand for syncWrite, instead of consistently broadcast new
* writeSnapshotId in high frequency. I will briefly introduce the syncWrite implementation below.
*
* When RealtimeWriter receives a syncWrite call, it invokes
* {@link com.alibaba.maxgraph.frontendservice.realtime.RealtimeWriter#writeOperations(OperationTxn)}
* same as asyncWrite and get the returned snapshotId, e.g. s0. Then RealtimeWriter registers a SnapshotListener
* with s0 to the SnapshotManager, and blocking on the listener callback. SnapshotManager will cache the
* listener and immediately broadcast a new writeSnapshotId to the Frontend nodes. When SnapshotManager knows
* the available querySnapshotId is greater than or equal to s0, it first broadcasts new querySnapshotId to
* all Frontend nodes, then triggers the listener callback to complete the syncWrite call.
*
*---------------------------------------------------------------------------------------------------------------
*
* Another important issue is failure recovery.
*
* We assume that there is a distributed reliable KV store, e.g. ZooKeeper, for recovery information persistence.
*
* The information need to persist are:
* - last generated writeSnapshotId
* - last available querySnapshotId
* - consumed snapshotId of each GraphNode // not necessary?
* - consumed offset of each queue on each GraphNode
*
* When and in what frequency should we persist these information:
* - "last generated writeSnapshotId" and "last available querySnapshotId" are the same. They must be persisted
* before they are broadcast to the Frontend nodes. Otherwise, the system might step backward after process
* recovery, which is not acceptable.
*
* - "consumed snapshotId of each GraphNode" and "consumed offset of each queue on each GraphNode" can be
* persisted asynchronously. We can endure lost the latest value of these information. The only drawback is
* that the data replay might process some duplicate data, which is acceptable.
*
* After persist snapshot information as described above, the recovery process will be simply load the persisted
* information from the reliable KV store when initializing the SnapshotManager.
*
*/
public class SnapshotManager {
private static final Logger logger = LoggerFactory.getLogger(SnapshotManager.class);
public static final String WRITE_SNAPSHOT_ID_PATH = "write_snapshot_id";
public static final String QUERY_SNAPSHOT_INFO_PATH = "query_snapshot_info";
public static final String QUEUE_OFFSETS_PATH = "queue_offsets";
private MetaStore metaStore;
private LogService logService;
private WriteSnapshotIdNotifier writeSnapshotIdNotifier;
private int storeCount;
private int queueCount;
private long snapshotIncreaseIntervalMs;
private long offsetsPersistIntervalMs;
private volatile SnapshotInfo querySnapshotInfo;
private volatile long writeSnapshotId;
private Map<Integer, SnapshotInfo> storeToSnapshotInfo;
private Map<Integer, List<Long>> storeToOffsets;
private AtomicReference<List<Long>> queueOffsetsRef;
private ScheduledExecutorService increaseWriteSnapshotIdScheduler;
private ScheduledExecutorService persistOffsetsScheduler;
private List<QuerySnapshotListener> listeners = new CopyOnWriteArrayList<>();
private TreeMap<Long, List<SnapshotListener>> snapshotToListeners = new TreeMap<>();
private Object querySnapshotLock = new Object();
private Lock writeSnapshotLock = new ReentrantLock();
private ObjectMapper objectMapper;
public SnapshotManager(Configs configs, MetaStore metaStore, LogService logService,
WriteSnapshotIdNotifier writeSnapshotIdNotifier) {
this.metaStore = metaStore;
this.logService = logService;
this.writeSnapshotIdNotifier = writeSnapshotIdNotifier;
this.objectMapper = new ObjectMapper();
this.queueCount = CommonConfig.INGESTOR_QUEUE_COUNT.get(configs);
this.storeCount = CommonConfig.STORE_NODE_COUNT.get(configs);
this.snapshotIncreaseIntervalMs = CoordinatorConfig.SNAPSHOT_INCREASE_INTERVAL_MS.get(configs);
this.offsetsPersistIntervalMs = CoordinatorConfig.OFFSETS_PERSIST_INTERVAL_MS.get(configs);
this.storeToSnapshotInfo = new ConcurrentHashMap<>();
this.storeToOffsets = new ConcurrentHashMap<>();
}
public void start() {
try {
recover();
} catch (IOException e) {
throw new MaxGraphException(e);
}
this.increaseWriteSnapshotIdScheduler = Executors.newSingleThreadScheduledExecutor(
ThreadFactoryUtils.daemonThreadFactoryWithLogExceptionHandler("increase-write-snapshot-scheduler", logger));
this.increaseWriteSnapshotIdScheduler.scheduleWithFixedDelay(() -> {
try {
long snapshotId = increaseWriteSnapshotId();
logger.debug("writeSnapshotId updated to [" + snapshotId + "]");
} catch (Exception e) {
logger.error("error in increaseWriteSnapshotId, ignore", e);
}
}, 0L, snapshotIncreaseIntervalMs, TimeUnit.MILLISECONDS);
this.persistOffsetsScheduler = Executors.newSingleThreadScheduledExecutor(
ThreadFactoryUtils.daemonThreadFactoryWithLogExceptionHandler("persist-offsets-scheduler", logger));
this.persistOffsetsScheduler.scheduleWithFixedDelay(() -> {
try {
updateQueueOffsets();
} catch (Exception e) {
logger.error("error in updateQueueOffsets, ignore", e);
}
}, offsetsPersistIntervalMs, offsetsPersistIntervalMs, TimeUnit.MILLISECONDS);
}
public void stop() {
if (this.persistOffsetsScheduler != null) {
this.persistOffsetsScheduler.shutdown();
try {
this.persistOffsetsScheduler.awaitTermination(3000, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
// Ignore
}
this.persistOffsetsScheduler = null;
}
if (this.increaseWriteSnapshotIdScheduler != null) {
this.increaseWriteSnapshotIdScheduler.shutdown();
try {
this.increaseWriteSnapshotIdScheduler.awaitTermination(3000, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
// Ignore
}
this.increaseWriteSnapshotIdScheduler = null;
}
}
private void checkMetaPath(String path) throws FileNotFoundException {
if (!this.metaStore.exists(path)) {
throw new FileNotFoundException(path);
}
}
private void recover() throws IOException {
checkMetaPath(QUERY_SNAPSHOT_INFO_PATH);
checkMetaPath(WRITE_SNAPSHOT_ID_PATH);
checkMetaPath(QUEUE_OFFSETS_PATH);
byte[] querySnapshotInfoBytes = this.metaStore.read(QUERY_SNAPSHOT_INFO_PATH);
SnapshotInfo recoveredQuerySnapshotInfo =
this.objectMapper.readValue(querySnapshotInfoBytes, SnapshotInfo.class);
byte[] writeSnapshotIdBytes = this.metaStore.read(WRITE_SNAPSHOT_ID_PATH);
long recoveredWriteSnapshotId = this.objectMapper.readValue(writeSnapshotIdBytes, Long.class);
if (recoveredQuerySnapshotInfo.getSnapshotId() > recoveredWriteSnapshotId) {
throw new IllegalStateException("recovered querySnapshotInfo [" + recoveredQuerySnapshotInfo +
"] > writeSnapshotId [" + recoveredWriteSnapshotId + "]");
}
byte[] queueOffsetsBytes = this.metaStore.read(QUEUE_OFFSETS_PATH);
List<Long> recoveredQueueOffsets = this.objectMapper.readValue(queueOffsetsBytes,
new TypeReference<List<Long>>() {});
if (recoveredQueueOffsets.size() != this.queueCount) {
throw new IllegalStateException("recovered queueCount [" + recoveredQueueOffsets.size() +
"], but expect queueCount [" + this.queueCount + "]");
}
for (int i = 0; i < this.queueCount; i++) {
long recoveredOffset = recoveredQueueOffsets.get(i);
LogReader reader = null;
try {
reader = logService.createReader(i, recoveredOffset + 1);
} catch (Exception e) {
throw new IOException("recovered queue [" + i + "] offset [" + recoveredOffset + "] is not available",
e);
} finally {
if (reader != null) {
reader.close();
}
}
}
this.querySnapshotInfo = recoveredQuerySnapshotInfo;
this.writeSnapshotId = recoveredWriteSnapshotId;
this.queueOffsetsRef = new AtomicReference<>(recoveredQueueOffsets);
}
/**
* WriterAgent use this method to commit snapshot information to SnapshotManager
* @param storeId
* @param snapshotId
* @param ddlSnapshotId
* @param queueOffsets
*/
public synchronized void commitSnapshotId(int storeId, long snapshotId, long ddlSnapshotId, List<Long> queueOffsets) {
this.storeToSnapshotInfo.compute(storeId, (k, v) ->
(v != null && v.getSnapshotId() >= snapshotId) ? v : new SnapshotInfo(snapshotId, ddlSnapshotId));
this.storeToOffsets.compute(storeId, (k, v) -> {
if (v != null) {
if (v.size() != queueOffsets.size()) {
throw new IllegalArgumentException("current offset size [" + v.size() + "], commit offset size [" +
queueOffsets.size() + "]");
}
for (int i = 0; i < v.size(); i++) {
if (v.get(i) > queueOffsets.get(i)) {
return v;
}
}
}
return queueOffsets;
});
maybeUpdateQuerySnapshotId();
}
public void addSnapshotListener(long snapshotId, SnapshotListener snapshotListener) {
synchronized (this.querySnapshotLock) {
if (querySnapshotInfo.getSnapshotId() >= snapshotId) {
snapshotListener.onSnapshotAvailable();
return;
}
List<SnapshotListener> snapshotListeners =
this.snapshotToListeners.computeIfAbsent(snapshotId, k -> new ArrayList<>());
snapshotListeners.add(snapshotListener);
}
}
public void addListener(QuerySnapshotListener listener) {
this.listeners.add(listener);
SnapshotInfo querySnapshotInfo = this.querySnapshotInfo;
try {
listener.snapshotAdvanced(querySnapshotInfo.getSnapshotId(), querySnapshotInfo.getDdlSnapshotId());
} catch (Exception e) {
logger.error("error occurred when notify listeners", e);
}
}
public void removeListener(QuerySnapshotListener listener) {
this.listeners.remove(listener);
}
private void maybeUpdateQuerySnapshotId() {
if (this.storeToSnapshotInfo.size() < this.storeCount) {
logger.warn("Not all store nodes reported snapshot progress. current storeToSnapshot [" +
this.storeToSnapshotInfo + "]");
return;
}
SnapshotInfo minSnapshotInfo = Collections.min(this.storeToSnapshotInfo.values());
if (minSnapshotInfo.getSnapshotId() > this.querySnapshotInfo.getSnapshotId()) {
synchronized (this.querySnapshotLock) {
long snapshotId = minSnapshotInfo.getSnapshotId();
long ddlSnapshotId = minSnapshotInfo.getDdlSnapshotId();
long currentSnapshotId = this.querySnapshotInfo.getSnapshotId();
long currentDdlSnapshotId = this.querySnapshotInfo.getDdlSnapshotId();
if (snapshotId > currentSnapshotId) {
try {
if (ddlSnapshotId < currentDdlSnapshotId) {
// During failover, store might send smaller ddlSnapshotId
minSnapshotInfo = new SnapshotInfo(snapshotId, currentDdlSnapshotId);
// throw new IllegalStateException("minSnapshotInfo [" + minSnapshotInfo +
// "], currentSnapshotInfo [" + this.querySnapshotInfo + "]");
}
persistQuerySnapshotId(minSnapshotInfo);
this.querySnapshotInfo = minSnapshotInfo;
logger.debug("querySnapshotInfo updated to [" + minSnapshotInfo + "]");
} catch (IOException e) {
logger.error("update querySnapshotInfo failed", e);
return;
}
long newSnapshotId = minSnapshotInfo.getSnapshotId();
long newDdlSnapshotId = minSnapshotInfo.getDdlSnapshotId();
NavigableMap<Long, List<SnapshotListener>> listenersToTrigger =
this.snapshotToListeners.headMap(newSnapshotId, true);
for (Map.Entry<Long, List<SnapshotListener>> listenerEntry : listenersToTrigger.entrySet()) {
List<SnapshotListener> listeners = listenerEntry.getValue();
for (SnapshotListener listener : listeners) {
try {
listener.onSnapshotAvailable();
} catch (Exception e) {
logger.warn("trigger snapshotListener failed. snapshotId [" + snapshotId + "]");
}
}
}
listenersToTrigger.clear();
for (QuerySnapshotListener listener : this.listeners) {
try {
listener.snapshotAdvanced(newSnapshotId, newDdlSnapshotId);
} catch (Exception e) {
logger.error("error occurred when notify normal listeners", e);
}
}
}
}
}
}
private void persistQuerySnapshotId(SnapshotInfo snapshotInfo) throws IOException {
byte[] b = this.objectMapper.writeValueAsBytes(snapshotInfo);
this.metaStore.write(QUERY_SNAPSHOT_INFO_PATH, b);
}
public long increaseWriteSnapshotId() throws IOException {
this.writeSnapshotLock.lock();
try {
long snapshotId = this.writeSnapshotId + 1;
persistWriteSnapshotId(snapshotId);
this.writeSnapshotId = snapshotId;
this.writeSnapshotIdNotifier.notifyWriteSnapshotIdChanged(this.writeSnapshotId);
return this.writeSnapshotId;
} finally {
this.writeSnapshotLock.unlock();
}
}
public void lockWriteSnapshot() {
this.writeSnapshotLock.lock();
}
public void unlockWriteSnapshot() {
this.writeSnapshotLock.unlock();
}
public long getCurrentWriteSnapshotId() {
return this.writeSnapshotId;
}
private void persistWriteSnapshotId(long snapshotId) throws IOException {
byte[] b = this.objectMapper.writeValueAsBytes(snapshotId);
this.metaStore.write(WRITE_SNAPSHOT_ID_PATH, b);
}
private void updateQueueOffsets() throws IOException {
if (this.storeToOffsets.size() < this.storeCount) {
logger.warn("Not all store nodes reported queue offsets. current storeToOffsets [" +
this.storeToOffsets + "]");
return;
}
List<Long> queueOffsets = this.queueOffsetsRef.get();
List<Long> newQueueOffsets = new ArrayList<>(queueOffsets);
boolean changed = false;
for (int qId = 0; qId < queueOffsets.size(); qId++) {
long minOffset = Long.MAX_VALUE;
for (List<Long> storeOffsets : this.storeToOffsets.values()) {
minOffset = Math.min(storeOffsets.get(qId), minOffset);
}
if (minOffset != Long.MAX_VALUE && minOffset > newQueueOffsets.get(qId)) {
newQueueOffsets.set(qId, minOffset);
changed = true;
}
}
if (changed) {
persistQueueOffsets(newQueueOffsets);
this.queueOffsetsRef.set(newQueueOffsets);
}
}
private void persistQueueOffsets(List<Long> queueOffsets) throws IOException {
byte[] bytes = this.objectMapper.writeValueAsBytes(queueOffsets);
this.metaStore.write(QUEUE_OFFSETS_PATH, bytes);
}
/**
* Get offset list according to the input queueId list. This is for IngestNode to get the correct
* start offset for replay.
*
* @param queueIdList
* @return
*/
public List<Long> getTailOffsets(List<Integer> queueIdList) {
List<Long> tailOffsets = new ArrayList<>(queueIdList.size());
List<Long> queueOffsets = this.queueOffsetsRef.get();
for (int queueId : queueIdList) {
tailOffsets.add(queueOffsets.get(queueId));
}
return tailOffsets;
}
public List<Long> getQueueOffsets() {
return this.queueOffsetsRef.get();
}
}
|
package com.donkeycode.core.response;
import lombok.Getter;
import lombok.Setter;
/**
* Created by Ace on 2017/6/11.
*/
@Setter
@Getter
public class ObjectResponse<T> extends BaseResponse {
private T data;
private boolean rel;
public ObjectResponse<T> rel(boolean rel) {
this.setRel(rel);
return this;
}
public ObjectResponse<T> data(T data) {
this.setData(data);
return this;
}
}
|
/*
* DBeaver - Universal Database Manager
* Copyright (C) 2010-2017 Serge Rider (serge@jkiss.org)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jkiss.dbeaver.ext.oracle.model;
import org.jkiss.dbeaver.DBException;
import org.jkiss.dbeaver.Log;
import org.jkiss.dbeaver.model.DBIcon;
import org.jkiss.dbeaver.model.DBPImage;
import org.jkiss.dbeaver.model.runtime.DBRProgressMonitor;
import org.jkiss.dbeaver.model.struct.DBSObject;
import org.jkiss.dbeaver.model.struct.DBSObjectType;
import java.util.HashMap;
import java.util.Map;
/**
* Object type
*/
public enum OracleObjectType implements DBSObjectType {
CLUSTER("CLUSTER", null, DBSObject.class, null),
CONSTRAINT ("CONSTRAINT", DBIcon.TREE_CONSTRAINT, OracleTableConstraint.class, null), // fake object
CONSUMER_GROUP("CONSUMER GROUP", null, DBSObject.class, null),
CONTEXT("CONTEXT", null, DBSObject.class, null),
DIRECTORY("DIRECTORY", null, DBSObject.class, null),
EVALUATION_CONTEXT("EVALUATION CONTEXT", null, DBSObject.class, null),
FOREIGN_KEY ("FOREIGN KEY", DBIcon.TREE_FOREIGN_KEY, OracleTableForeignKey.class, null), // fake object
FUNCTION("FUNCTION", DBIcon.TREE_PROCEDURE, OracleProcedureStandalone.class, new ObjectFinder() {
@Override
public OracleProcedureStandalone findObject(DBRProgressMonitor monitor, OracleSchema schema, String objectName) throws DBException
{
return schema.proceduresCache.getObject(monitor, schema, objectName);
}
}),
INDEX("INDEX", DBIcon.TREE_INDEX, OracleTableIndex.class, new ObjectFinder() {
@Override
public OracleTableIndex findObject(DBRProgressMonitor monitor, OracleSchema schema, String objectName) throws DBException
{
return schema.indexCache.getObject(monitor, schema, objectName);
}
}),
INDEX_PARTITION("INDEX PARTITION", null, DBSObject.class, null),
INDEXTYPE("INDEXTYPE", null, DBSObject.class, null),
JAVA_CLASS("JAVA CLASS", DBIcon.TREE_JAVA_CLASS, OracleJavaClass.class, new ObjectFinder() {
@Override
public OracleJavaClass findObject(DBRProgressMonitor monitor, OracleSchema schema, String objectName) throws DBException
{
return schema.javaCache.getObject(monitor, schema, objectName);
}
}),
JAVA_DATA("JAVA DATA", null, DBSObject.class, null),
JAVA_RESOURCE("JAVA RESOURCE", null, DBSObject.class, null),
JOB("JOB", null, DBSObject.class, null),
JOB_CLASS("JOB CLASS", null, DBSObject.class, null),
LIBRARY("LIBRARY", null, DBSObject.class, null),
LOB("CONTENT", null, DBSObject.class, null),
MATERIALIZED_VIEW("MATERIALIZED VIEW", null, DBSObject.class, null),
OPERATOR("OPERATOR", null, DBSObject.class, null),
PACKAGE("PACKAGE", DBIcon.TREE_PACKAGE, OraclePackage.class, new ObjectFinder() {
@Override
public OraclePackage findObject(DBRProgressMonitor monitor, OracleSchema schema, String objectName) throws DBException
{
return schema.packageCache.getObject(monitor, schema, objectName);
}
}),
PACKAGE_BODY("PACKAGE BODY", DBIcon.TREE_PACKAGE, OraclePackage.class, new ObjectFinder() {
@Override
public OraclePackage findObject(DBRProgressMonitor monitor, OracleSchema schema, String objectName) throws DBException
{
return schema.packageCache.getObject(monitor, schema, objectName);
}
}),
PROCEDURE("PROCEDURE", DBIcon.TREE_PROCEDURE, OracleProcedureStandalone.class, new ObjectFinder() {
@Override
public OracleProcedureStandalone findObject(DBRProgressMonitor monitor, OracleSchema schema, String objectName) throws DBException
{
return schema.proceduresCache.getObject(monitor, schema, objectName);
}
}),
PROGRAM("PROGRAM", null, DBSObject.class, null),
QUEUE("QUEUE", null, DBSObject.class, null),
RULE("RULE", null, DBSObject.class, null),
RULE_SET("RULE SET", null, DBSObject.class, null),
SCHEDULE("SCHEDULE", null, DBSObject.class, null),
SEQUENCE("SEQUENCE", DBIcon.TREE_SEQUENCE, OracleSequence.class, new ObjectFinder() {
@Override
public OracleSequence findObject(DBRProgressMonitor monitor, OracleSchema schema, String objectName) throws DBException
{
return schema.sequenceCache.getObject(monitor, schema, objectName);
}
}),
SYNONYM("SYNONYM", DBIcon.TREE_SYNONYM, OracleSynonym.class, new ObjectFinder() {
@Override
public OracleSynonym findObject(DBRProgressMonitor monitor, OracleSchema schema, String objectName) throws DBException
{
return schema.synonymCache.getObject(monitor, schema, objectName);
}
}),
TABLE("TABLE", DBIcon.TREE_TABLE, OracleTable.class, new ObjectFinder() {
@Override
public OracleTableBase findObject(DBRProgressMonitor monitor, OracleSchema schema, String objectName) throws DBException
{
return schema.tableCache.getObject(monitor, schema, objectName);
}
}),
TABLE_PARTITION("TABLE PARTITION", null, DBSObject.class, null),
TRIGGER("TRIGGER", DBIcon.TREE_TRIGGER, OracleTrigger.class, new ObjectFinder() {
@Override
public OracleTrigger findObject(DBRProgressMonitor monitor, OracleSchema schema, String objectName) throws DBException
{
return schema.triggerCache.getObject(monitor, schema, objectName);
}
}),
TYPE("TYPE", DBIcon.TREE_DATA_TYPE, OracleDataType.class, new ObjectFinder() {
@Override
public OracleDataType findObject(DBRProgressMonitor monitor, OracleSchema schema, String objectName) throws DBException
{
return schema.dataTypeCache.getObject(monitor, schema, objectName);
}
}),
TYPE_BODY("TYPE BODY", DBIcon.TREE_DATA_TYPE, OracleDataType.class, new ObjectFinder() {
@Override
public OracleDataType findObject(DBRProgressMonitor monitor, OracleSchema schema, String objectName) throws DBException
{
return schema.dataTypeCache.getObject(monitor, schema, objectName);
}
}),
VIEW("VIEW", DBIcon.TREE_VIEW, OracleView.class, new ObjectFinder() {
@Override
public OracleView findObject(DBRProgressMonitor monitor, OracleSchema schema, String objectName) throws DBException
{
return schema.tableCache.getObject(monitor, schema, objectName, OracleView.class);
}
}),
WINDOW("WINDOW", null, DBSObject.class, null),
WINDOW_GROUP("WINDOW GROUP", null, DBSObject.class, null),
XML_SCHEMA("XML SCHEMA", null, DBSObject.class, null);
private static final Log log = Log.getLog(OracleObjectType.class);
private static Map<String, OracleObjectType> typeMap = new HashMap<>();
static {
for (OracleObjectType type : values()) {
typeMap.put(type.getTypeName(), type);
}
}
public static OracleObjectType getByType(String typeName)
{
return typeMap.get(typeName);
}
private static interface ObjectFinder {
DBSObject findObject(DBRProgressMonitor monitor, OracleSchema schema, String objectName) throws DBException;
}
private final String objectType;
private final DBPImage image;
private final Class<? extends DBSObject> typeClass;
private final ObjectFinder finder;
<OBJECT_TYPE extends DBSObject> OracleObjectType(String objectType, DBPImage image, Class<OBJECT_TYPE> typeClass, ObjectFinder finder)
{
this.objectType = objectType;
this.image = image;
this.typeClass = typeClass;
this.finder = finder;
}
public boolean isBrowsable()
{
return finder != null;
}
@Override
public String getTypeName()
{
return objectType;
}
@Override
public String getDescription()
{
return null;
}
@Override
public DBPImage getImage()
{
return image;
}
@Override
public Class<? extends DBSObject> getTypeClass()
{
return typeClass;
}
public DBSObject findObject(DBRProgressMonitor monitor, OracleSchema schema, String objectName) throws DBException
{
if (finder != null) {
return finder.findObject(monitor, schema, objectName);
} else {
return null;
}
}
public static Object resolveObject(
DBRProgressMonitor monitor,
OracleDataSource dataSource,
String dbLink,
String objectTypeName,
String objectOwner,
String objectName) throws DBException
{
if (dbLink != null) {
return objectName;
}
OracleObjectType objectType = OracleObjectType.getByType(objectTypeName);
if (objectType == null) {
log.debug("Unrecognized Oracle object type: " + objectTypeName);
return objectName;
}
if (!objectType.isBrowsable()) {
log.debug("Unsupported Oracle object type: " + objectTypeName);
return objectName;
}
final OracleSchema schema = dataSource.getSchema(monitor, objectOwner);
if (schema == null) {
log.debug("Schema '" + objectOwner + "' not found");
return objectName;
}
final DBSObject object = objectType.findObject(monitor, schema, objectName);
if (object == null) {
log.debug(objectTypeName + " '" + objectName + "' not found in '" + schema.getName() + "'");
return objectName;
}
return object;
}
@Override
public String toString()
{
return objectType;
}
}
|
package net.snowflake.client.jdbc;
import static junit.framework.TestCase.assertEquals;
import static junit.framework.TestCase.fail;
import static net.snowflake.client.AbstractDriverIT.getFullPathFileInResource;
import static net.snowflake.client.jdbc.SnowflakeDriverIT.findFile;
import static net.snowflake.client.jdbc.SnowflakeUtil.systemGetProperty;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.net.Authenticator;
import java.net.PasswordAuthentication;
import java.sql.*;
import java.util.Properties;
import net.snowflake.client.category.TestCategoryOthers;
import net.snowflake.client.core.HttpUtil;
import net.snowflake.common.core.SqlState;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TemporaryFolder;
// To run these tests, you must:
// 1.) Start up a proxy connection. The simplest ways are via Squid or BurpSuite. Confluence doc on
// setup here:
// https://snowflakecomputing.atlassian.net/wiki/spaces/EN/pages/65438343/How+to+setup+Proxy+Server+for+Client+tests
// 2.) Enter your own username and password for the account you're connecting to
// 3.) Adjust parameters like role, database, schema, etc to match with account accordingly
@Category(TestCategoryOthers.class)
public class CustomProxyLatestIT {
@Rule public TemporaryFolder tmpFolder = new TemporaryFolder();
/**
* Before running this test, change the user and password to appropriate values. Set up 2
* different proxy ports that can run simultaneously. This is easy with Burpsuite.
*
* <p>This tests that separate, successful connections can be made with 2 separate proxies at the
* same time.
*
* @throws SQLException
*/
@Test
@Ignore
public void test2ProxiesWithSameJVM() throws SQLException {
Properties props = new Properties();
props.put("user", "USER");
props.put("password", "PASSWORD");
props.put("useProxy", true);
props.put("proxyHost", "localhost");
props.put("proxyPort", "8080");
// Set up the first connection and proxy
Connection con1 =
DriverManager.getConnection(
"jdbc:snowflake://s3testaccount.us-east-1.snowflakecomputing.com", props);
Statement stmt = con1.createStatement();
ResultSet rs = stmt.executeQuery("select 1");
rs.next();
assertEquals(1, rs.getInt(1));
// Change the proxy settings for the 2nd connection, but all other properties can be re-used.
// Set up the second connection.
props.put("proxyPort", "8081");
Connection con2 =
DriverManager.getConnection(
"jdbc:snowflake://aztestaccount.east-us-2.azure.snowflakecomputing.com", props);
rs = con2.createStatement().executeQuery("select 2");
rs.next();
assertEquals(2, rs.getInt(1));
// To ensure that the http client map is functioning properly, make a third connection with the
// same properties and proxy as the first connection.
props.put("proxyPort", "8080");
Connection con3 =
DriverManager.getConnection(
"jdbc:snowflake://s3testaccount.us-east-1.snowflakecomputing.com", props);
stmt = con3.createStatement();
rs = stmt.executeQuery("select 1");
rs.next();
assertEquals(1, rs.getInt(1));
// Assert that although there are 3 connections, 2 of them (1st and 3rd) use the same httpclient
// object in the map. The total map size should be 2 for the 3 connections.
assertEquals(2, HttpUtil.httpClient.size());
con2.close();
con1.close();
con3.close();
}
/**
* This requires a TLS proxy connection. This can be done by configuring the squid.conf file (with
* squid proxy) and adding certs to the keystore. For info on setup, see
* https://snowflakecomputing.atlassian.net/wiki/spaces/EN/pages/65438343/How+to+setup+Proxy+Server+for+Client+tests.
*
* @throws SQLException
*/
@Test
@Ignore
public void testTLSIssue() throws SQLException {
Properties props = new Properties();
props.put("user", "USER");
props.put("password", "PASSWORD");
props.put("tracing", "ALL");
props.put("useProxy", true);
props.put("proxyHost", "localhost");
props.put("proxyPort", "3128");
// protocol must be specified for https (default is http)
props.put("proxyProtocol", "https");
Connection con1 =
DriverManager.getConnection(
"jdbc:snowflake://s3testaccount.us-east-1.snowflakecomputing.com", props);
Statement stmt = con1.createStatement();
ResultSet rs = stmt.executeQuery("select 1");
rs.next();
assertEquals(1, rs.getInt(1));
// Test with jvm properties instead
props.put("useProxy", false);
System.setProperty("http.useProxy", "true");
System.setProperty("http.proxyHost", "localhost");
System.setProperty("http.proxyPort", "3128");
con1 =
DriverManager.getConnection(
"jdbc:snowflake://s3testaccount.us-east-1.snowflakecomputing.com", props);
stmt = con1.createStatement();
rs = stmt.executeQuery("select 1");
rs.next();
assertEquals(1, rs.getInt(1));
}
/**
* Test nonProxyHosts is honored with JVM proxy parameters. Recommended test: 1. Test that proxy
* is not used with current settings since nonProxyHosts = * 2. Change nonProxyHosts value to
* nonsense value to ensure it can be set and proxy still works 3. Run same 2 above tests but with
* http instead of https proxy parameters for non-TLS proxy
*/
@Test
@Ignore
public void testJVMParamsWithNonProxyHostsHonored() throws SQLException {
Properties props = new Properties();
props.put("user", "USER");
props.put("password", "PASSWORD");
props.put("tracing", "ALL");
// Set JVM properties. Test once with TLS proxy, and edit to test with non-TLS proxy
System.setProperty("http.useProxy", "true");
System.setProperty("https.proxyHost", "localhost");
System.setProperty("https.proxyPort", "3128");
System.setProperty("http.nonProxyHosts", "*");
Connection con =
DriverManager.getConnection(
"jdbc:snowflake://s3testaccoutn.us-east-1.snowflakecomputing.com", props);
Statement stmt = con.createStatement();
ResultSet rs = stmt.executeQuery("select 1");
rs.next();
assertEquals(1, rs.getInt(1));
con.close();
}
/** Test TLS issue against S3 client to ensure proxy works with PUT/GET statements */
@Test
@Ignore
public void testTLSIssueWithConnectionStringAgainstS3()
throws ClassNotFoundException, SQLException {
String connectionUrl =
"jdbc:snowflake://s3testaccount.us-east-1.snowflakecomputing.com/?tracing=ALL"
+ "&proxyHost=localhost&proxyPort=3128&useProxy=true&proxyProtocol=https";
// should finish correctly
runProxyConnection(connectionUrl);
}
/**
* Before running this test, change the user and password to appropriate values. Set up a proxy
* with Burpsuite so you can see what POST and GET requests are going through the proxy.
*
* <p>This tests that the NonProxyHosts field is sucessfully updated for the same HttpClient
* object.
*
* @throws SQLException
*/
@Test
@Ignore
public void testNonProxyHostAltering() throws SQLException {
Properties props = new Properties();
props.put("user", "USER");
props.put("password", "PASSWORD");
props.put("useProxy", true);
props.put("proxyHost", "localhost");
props.put("proxyPort", "8080");
props.put("nonProxyHosts", ".foo.com|.baz.com");
// Set up the first connection and proxy
Connection con1 =
DriverManager.getConnection(
"jdbc:snowflake://s3testaccount.us-east-1.snowflakecomputing.com", props);
Statement stmt = con1.createStatement();
ResultSet rs = stmt.executeQuery("select 1");
rs.next();
assertEquals(1, rs.getInt(1));
// Assert that nonProxyHosts string is correct for initial value
HttpUtil.httpClient
.entrySet()
.forEach((entry) -> assertEquals(".foo.com|.baz.com", entry.getKey().getNonProxyHosts()));
// Now make 2nd connection with all the same settings except different nonProxyHosts field
props.put("nonProxyHosts", "*.snowflakecomputing.com");
// Manually check here that nonProxyHost setting works by checking that nothing else goes
// through proxy from this point onward. *.snowflakecomputing.com should ensure that proxy is
// skipped.
Connection con2 =
DriverManager.getConnection(
"jdbc:snowflake://s3testaccount.us-east-1.snowflakecomputing.com", props);
rs = stmt.executeQuery("select 2");
rs.next();
assertEquals(2, rs.getInt(1));
assertEquals(1, HttpUtil.httpClient.size());
// Assert that the entry contains the correct updated value for nonProxyHosts string
HttpUtil.httpClient
.entrySet()
.forEach(
(entry) -> assertEquals("*.snowflakecomputing.com", entry.getKey().getNonProxyHosts()));
con1.close();
con2.close();
}
/**
* This tests that the HttpClient object is re-used when no proxies are present.
*
* @throws SQLException
*/
@Test
@Ignore
public void testSizeOfHttpClientNoProxies() throws SQLException {
Properties props = new Properties();
props.put("user", "USER");
props.put("password", "PASSWORD");
// Set up the first connection and proxy
Connection con1 =
DriverManager.getConnection(
"jdbc:snowflake://s3testaccount.us-east-1.snowflakecomputing.com", props);
Statement stmt = con1.createStatement();
ResultSet rs = stmt.executeQuery("select 1");
rs.next();
assertEquals(1, rs.getInt(1));
// put in some fake properties that won't get picked up because useProxy=false
props.put("useProxy", false);
props.put("proxyHost", "localhost");
props.put("proxyPort", "8080");
Connection con2 =
DriverManager.getConnection(
"jdbc:snowflake://s3testaccount.us-east-1.snowflakecomputing.com", props);
// Assert that the HttpClient table has only 1 entry for both non-proxy entries
assertEquals(1, HttpUtil.httpClient.size());
props.put("ocspFailOpen", "false");
Connection con3 =
DriverManager.getConnection(
"jdbc:snowflake://s3testaccount.us-east-1.snowflakecomputing.com", props);
// Table should grow in size by 1 when OCSP mode changes
assertEquals(2, HttpUtil.httpClient.size());
con1.close();
con2.close();
con3.close();
}
@Test
@Ignore
public void testCorrectProxySettingFromConnectionString()
throws ClassNotFoundException, SQLException {
String connectionUrl =
"jdbc:snowflake://s3testaccount.us-east-1.snowflakecomputing.com/?tracing=ALL"
+ "&proxyHost=localhost&proxyPort=8080"
+ "&useProxy=true";
// should finish correctly
runProxyConnection(connectionUrl);
connectionUrl =
"jdbc:snowflake://s3testaccount.us-east-1.snowflakecomputing.com/?tracing=ALL"
+ "&proxyHost=localhost&proxyPort=8080"
+ "&proxyUser=testuser1&proxyPassword=test"
+ "&useProxy=true";
// should finish correctly
runProxyConnection(connectionUrl);
}
@Test
@Ignore
public void testWrongProxyPortSettingFromConnectionString()
throws ClassNotFoundException, SQLException {
String connectionUrl =
"jdbc:snowflake://s3testaccount.us-east-1.snowflakecomputing.com/?tracing=ALL"
+ "&proxyHost=localhost&proxyPort=31281"
+ "&proxyUser=testuser1&proxyPassword=test"
+ "&nonProxyHosts=*.foo.com|localhost&useProxy=true";
// should show warning for null response for the requests
runProxyConnection(connectionUrl);
}
@Test
@Ignore
public void testWrongProxyPasswordSettingFromConnectionString()
throws ClassNotFoundException, SQLException {
String connectionUrl =
"jdbc:snowflake://s3testaccount.us-east-1.snowflakecomputing.com/?tracing=ALL"
+ "&proxyHost=localhost&proxyPort=3128"
+ "&proxyUser=testuser2&proxyPassword=test111"
+ "&nonProxyHosts=*.foo.com|localhost&useProxy=true";
// should show warning for null response for the requests
try {
runProxyConnection(connectionUrl);
} catch (SQLException e) {
assertThat(
"JDBC driver encountered communication error",
e.getErrorCode(),
equalTo(ErrorCode.NETWORK_ERROR.getMessageCode()));
}
}
@Test
@Ignore
public void testInvalidProxyPortFromConnectionString()
throws ClassNotFoundException, SQLException {
String connectionUrl =
"jdbc:snowflake://s3testaccount.us-east-1.snowflakecomputing.com/?tracing=ALL"
+ "&proxyHost=localhost"
+ "&proxyUser=testuser1&proxyPassword=test"
+ "&nonProxyHosts=*.foo.com|localhost&useProxy=true";
// should throw SnowflakeSQLException: 200051
try {
runProxyConnection(connectionUrl);
} catch (SQLException e) {
assertThat(
"invalid proxy error",
e.getErrorCode(),
equalTo(ErrorCode.INVALID_PROXY_PROPERTIES.getMessageCode()));
}
}
@Test
@Ignore
public void testNonProxyHostsFromConnectionString() throws ClassNotFoundException, SQLException {
String connectionUrl =
"jdbc:snowflake://s3testaccount.us-east-1.snowflakecomputing.com/?tracing=ALL"
+ "&proxyHost=localhost&proxyPort=31281"
+ "&proxyUser=testuser1&proxyPassword=test"
+ "&nonProxyHosts=*.snowflakecomputing.com|localhost&useProxy=true";
// should finish correctly
runProxyConnection(connectionUrl);
}
@Test
@Ignore
public void testWrongNonProxyHostsFromConnectionString()
throws ClassNotFoundException, SQLException {
String connectionUrl =
"jdbc:snowflake://s3testaccount.us-east-1.snowflakecomputing.com/?tracing=ALL"
+ "&proxyHost=localhost&proxyPort=31281"
+ "&proxyUser=testuser1&proxyPassword=test"
+ "&nonProxyHosts=*.foo.com|localhost&useProxy=true";
// should fail to connect
runProxyConnection(connectionUrl);
}
public void runProxyConnection(String connectionUrl) throws ClassNotFoundException, SQLException {
Authenticator.setDefault(
new Authenticator() {
@Override
public PasswordAuthentication getPasswordAuthentication() {
System.out.println("RequestorType: " + getRequestorType());
System.out.println("Protocol: " + getRequestingProtocol().toLowerCase());
return new PasswordAuthentication(
systemGetProperty("http.proxyUser"),
systemGetProperty("http.proxyPassword").toCharArray());
}
});
System.setProperty("http.useProxy", "true");
System.setProperty("https.proxyHost", "localhost");
System.setProperty("https.proxyPort", "3128");
// SET USER AND PASSWORD FIRST
String user = "USER";
String passwd = "PASSWORD";
Properties _connectionProperties = new Properties();
_connectionProperties.put("user", user);
_connectionProperties.put("password", passwd);
_connectionProperties.put("role", "accountadmin");
_connectionProperties.put("database", "SNOWHOUSE_IMPORT");
_connectionProperties.put("schema", "DEV");
Class.forName("net.snowflake.client.jdbc.SnowflakeDriver");
long counter = 0;
while (true) {
Connection con = DriverManager.getConnection(connectionUrl, _connectionProperties);
Statement stmt = con.createStatement();
stmt.execute("use warehouse TINY_WAREHOUSE");
stmt.execute("CREATE OR REPLACE STAGE testPutGet_stage");
assertTrue(
"Failed to put a file",
stmt.execute(
"PUT file://" + getFullPathFileInResource("orders_100.csv") + " @testPutGet_stage"));
String sql = "select $1 from values(1),(3),(5),(7)";
ResultSet res = stmt.executeQuery(sql);
while (res.next()) {
System.out.println("value: " + res.getInt(1));
}
System.out.println("OK - " + counter);
con.close();
counter++;
break;
}
}
@Test
@Ignore
public void testProxyConnectionWithAzure() throws ClassNotFoundException, SQLException {
String connectionUrl =
"jdbc:snowflake://aztestaccount.east-us-2.azure.snowflakecomputing.com/?tracing=ALL";
runAzureProxyConnection(
connectionUrl, /* usesConnectionProperties */ true, /* usesIncorrectJVMParameters */ true);
}
@Test
@Ignore
public void testProxyConnectionWithAzureWithConnectionString()
throws ClassNotFoundException, SQLException {
String connectionUrl =
"jdbc:snowflake://aztestaccount.east-us-2.azure.snowflakecomputing.com/?tracing=ALL"
+ "&proxyHost=localhost&proxyPort=8080"
+ "&proxyUser=testuser1&proxyPassword=test"
+ "&useProxy=true";
runAzureProxyConnection(
connectionUrl, /* usesConnectionProperties */ false, /* usesIncorrectJVMParameters */ true);
}
@Test
@Ignore
public void testProxyConnectionWithoutProxyPortOrHost()
throws ClassNotFoundException, SQLException {
// proxyPort is empty
String connectionUrl =
"jdbc:snowflake://aztestaccount.east-us-2.azure.snowflakecomputing.com/?tracing=ALL"
+ "&proxyHost=localhost&proxyPort="
+ "&proxyUser=testuser1&proxyPassword=test"
+ "&useProxy=true";
try {
runAzureProxyConnection(
connectionUrl, /* usesConnectionProperties */
false, /* usesIncorrectJVMParameters */
true);
fail();
} catch (SQLException e) {
assertEquals(SqlState.CONNECTION_EXCEPTION, e.getSQLState());
}
// proxyPort is non-integer value
connectionUrl =
"jdbc:snowflake://aztestaccount.east-us-2.azure.snowflakecomputing.com/?tracing=ALL"
+ "&proxyHost=localhost&proxyPort=cheese"
+ "&proxyUser=testuser1&proxyPassword=test"
+ "&useProxy=true";
try {
runAzureProxyConnection(
connectionUrl, /* usesConnectionProperties */
false, /* usesIncorrectJVMParameters */
true);
fail();
} catch (SQLException e) {
assertEquals(SqlState.CONNECTION_EXCEPTION, e.getSQLState());
}
// proxyHost is empty, proxyPort is valid
connectionUrl =
"jdbc:snowflake://aztestaccount.east-us-2.azure.snowflakecomputing.com/?tracing=ALL"
+ "&proxyHost=&proxyPort=3128"
+ "&proxyUser=testuser1&proxyPassword=test"
+ "&useProxy=true";
try {
runAzureProxyConnection(
connectionUrl, /* usesConnectionProperties */
false, /* usesIncorrectJVMParameters */
true);
fail();
} catch (SQLException e) {
assertEquals(SqlState.CONNECTION_EXCEPTION, e.getSQLState());
}
// proxyPort and proxyHost are empty, but username and password are specified
connectionUrl =
"jdbc:snowflake://aztestaccount.east-us-2.azure.snowflakecomputing.com/?tracing=ALL"
+ "&proxyUser=testuser1&proxyPassword=test"
+ "&useProxy=true";
try {
runAzureProxyConnection(
connectionUrl, /* usesConnectionProperties */
false, /* usesIncorrectJVMParameters */
true);
fail();
} catch (SQLException e) {
assertEquals(SqlState.CONNECTION_EXCEPTION, e.getSQLState());
}
}
/**
* Before running this test, change the user and password in runAzureProxyConnection() to
* appropriate values. Set up a proxy with Burpsuite so you can see what POST and GET requests are
* going through the proxy.
*
* <p>This tests that the NonProxyHosts field is sucessfully updated for the same HttpClient
* object.
*
* @throws SQLException
*/
@Test
@Ignore
public void testProxyConnectionWithJVMParameters() throws SQLException, ClassNotFoundException {
String connectionUrl =
"jdbc:snowflake://aztestaccount.east-us-2.azure.snowflakecomputing.com/?tracing=ALL";
// Set valid JVM system properties
System.setProperty("http.useProxy", "true");
System.setProperty("http.proxyHost", "localhost");
System.setProperty("http.proxyPort", "8080");
System.setProperty("http.nonProxyHosts", "*.snowflakecomputing.com");
SnowflakeUtil.systemSetEnv("NO_PROXY", "*.google.com");
runAzureProxyConnection(
connectionUrl, /* usesConnectionProperties */
false, /* usesIncorrectJVMParameters */
false);
SnowflakeUtil.systemUnsetEnv("NO_PROXY");
}
@Test
@Ignore
public void testProxyConnectionWithAzureWithWrongConnectionString()
throws ClassNotFoundException {
String connectionUrl =
"jdbc:snowflake://aztestaccount.east-us-2.azure.snowflakecomputing.com/?tracing=ALL"
+ "&proxyHost=localhost&proxyPort=31281"
+ "&proxyUser=testuser1&proxyPassword=test"
+ "&nonProxyHosts=*.foo.com%7Clocalhost&useProxy=true";
try {
runAzureProxyConnection(
connectionUrl, /* usesConnectionProperties */
false, /* usesIncorrectJVMParameters */
true);
} catch (SQLException e) {
assertThat(
"JDBC driver encountered communication error",
e.getErrorCode(),
equalTo(ErrorCode.NETWORK_ERROR.getMessageCode()));
}
}
public void runAzureProxyConnection(
String connectionUrl, boolean usesProperties, boolean usesIncorrectJVMProperties)
throws ClassNotFoundException, SQLException {
Authenticator.setDefault(
new Authenticator() {
@Override
public PasswordAuthentication getPasswordAuthentication() {
System.out.println("RequestorType: " + getRequestorType());
System.out.println("Protocol: " + getRequestingProtocol().toLowerCase());
return new PasswordAuthentication(
systemGetProperty("http.proxyUser"),
systemGetProperty("http.proxyPassword").toCharArray());
}
});
// Enable these parameters to use JVM proxy parameters instead of connection string proxy
// parameters.
// Connection parameters override JVM proxy params, so these incorrect params won't cause
// failures IF connection proxy params are enabled and working.
if (usesIncorrectJVMProperties) {
System.setProperty("http.useProxy", "true");
System.setProperty("http.proxyHost", "fakehost");
System.setProperty("http.proxyPort", "8081");
System.setProperty("https.proxyHost", "fakehost");
System.setProperty("https.proxyPort", "8081");
}
// SET USER AND PASSWORD FIRST
String user = "USER";
String passwd = "PASSWORD";
Properties _connectionProperties = new Properties();
_connectionProperties.put("user", user);
_connectionProperties.put("password", passwd);
_connectionProperties.put("role", "SYSADMIN");
_connectionProperties.put("tracing", "ALL");
if (usesProperties) {
_connectionProperties.put("useProxy", true);
_connectionProperties.put("proxyHost", "localhost");
_connectionProperties.put("proxyPort", "8080");
_connectionProperties.put("proxyUser", "testuser1");
_connectionProperties.put("proxyPassword", "test");
}
Class.forName("net.snowflake.client.jdbc.SnowflakeDriver");
String fileName = "test_copy.csv";
Connection con = DriverManager.getConnection(connectionUrl, _connectionProperties);
Statement stmt = con.createStatement();
stmt.execute("create or replace warehouse MEGTEST");
stmt.execute("use database MEGDB");
stmt.execute("use schema MEGSCHEMA");
stmt.execute("CREATE OR REPLACE STAGE testPutGet_stage");
try {
String TEST_DATA_FILE = "orders_100.csv";
String sourceFilePath = getFullPathFileInResource(TEST_DATA_FILE);
File destFolder = tmpFolder.newFolder();
String destFolderCanonicalPath = destFolder.getCanonicalPath();
String destFolderCanonicalPathWithSeparator = destFolderCanonicalPath + File.separator;
assertTrue(
"Failed to put a file",
stmt.execute("PUT file://" + sourceFilePath + " @testPutGet_stage"));
findFile(stmt, "ls @testPutGet_stage/");
// download the file we just uploaded to stage
assertTrue(
"Failed to get a file",
stmt.execute(
"GET @testPutGet_stage 'file://" + destFolderCanonicalPath + "' parallel=8"));
// Make sure that the downloaded file exists, it should be gzip compressed
File downloaded = new File(destFolderCanonicalPathWithSeparator + TEST_DATA_FILE + ".gz");
assert (downloaded.exists());
Process p =
Runtime.getRuntime()
.exec("gzip -d " + destFolderCanonicalPathWithSeparator + TEST_DATA_FILE + ".gz");
p.waitFor();
File original = new File(sourceFilePath);
File unzipped = new File(destFolderCanonicalPathWithSeparator + TEST_DATA_FILE);
assert (original.length() == unzipped.length());
} catch (Throwable t) {
t.printStackTrace();
} finally {
stmt.execute("DROP STAGE IF EXISTS testGetPut_stage");
stmt.close();
}
}
}
|
package io.quarkus.oidc.runtime;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import javax.annotation.PostConstruct;
import javax.enterprise.context.ApplicationScoped;
import javax.enterprise.inject.Instance;
import javax.inject.Inject;
import org.jboss.logging.Logger;
import io.quarkus.oidc.TenantConfigResolver;
import io.quarkus.oidc.TenantResolver;
import io.vertx.ext.web.RoutingContext;
@ApplicationScoped
public class DefaultTenantConfigResolver {
private static final Logger LOG = Logger.getLogger(DefaultTenantConfigResolver.class);
private static final String CURRENT_TENANT_CONFIG = "io.quarkus.oidc.current.tenant.config";
@Inject
Instance<TenantResolver> tenantResolver;
@Inject
Instance<TenantConfigResolver> tenantConfigResolver;
private final Map<String, TenantConfigContext> dynamicTenantsConfig = new ConcurrentHashMap<>();
@Inject
TenantConfigBean tenantConfigBean;
@PostConstruct
public void verifyResolvers() {
if (tenantConfigResolver.isResolvable()) {
if (tenantConfigResolver.isAmbiguous()) {
throw new IllegalStateException("Multiple " + TenantConfigResolver.class + " beans registered");
}
}
if (tenantResolver.isAmbiguous()) {
throw new IllegalStateException("Multiple " + TenantResolver.class + " beans registered");
}
}
/**
* Resolve {@linkplain TenantConfigContext} which contains the tenant configuration and
* the active OIDC connection instance which may be null.
*
* @param context the current request context
* @param create if true then the OIDC connection must be available or established
* for the resolution to be successful
* @return
*/
TenantConfigContext resolve(RoutingContext context, boolean create) {
TenantConfigContext config = getTenantConfigFromConfigResolver(context, create);
if (config == null) {
config = getTenantConfigFromTenantResolver(context);
}
return config;
}
private TenantConfigContext getTenantConfigFromTenantResolver(RoutingContext context) {
String tenantId = null;
if (tenantResolver.isResolvable()) {
tenantId = tenantResolver.get().resolve(context);
}
TenantConfigContext configContext = tenantId != null ? tenantConfigBean.getStaticTenantsConfig().get(tenantId) : null;
if (configContext == null) {
if (tenantId != null && !tenantId.isEmpty()) {
LOG.debugf("No configuration with a tenant id '%s' has been found, using the default configuration");
}
configContext = tenantConfigBean.getDefaultTenant();
}
return configContext;
}
boolean isBlocking(RoutingContext context) {
return getTenantConfigFromConfigResolver(context, false) == null;
}
private TenantConfigContext getTenantConfigFromConfigResolver(RoutingContext context, boolean create) {
if (tenantConfigResolver.isResolvable()) {
OidcTenantConfig tenantConfig;
if (context.get(CURRENT_TENANT_CONFIG) != null) {
tenantConfig = context.get(CURRENT_TENANT_CONFIG);
} else {
OidcTenantConfig newTenantConfig = this.tenantConfigResolver.get().resolve(context);
if (newTenantConfig != null && !newTenantConfig.tenantEnabled) {
newTenantConfig = null;
}
tenantConfig = newTenantConfig;
context.put(CURRENT_TENANT_CONFIG, tenantConfig);
}
if (tenantConfig != null) {
String tenantId = tenantConfig.getTenantId()
.orElseThrow(() -> new IllegalStateException("You must provide a tenant id"));
TenantConfigContext tenantContext = dynamicTenantsConfig.get(tenantId);
if (tenantContext == null && create) {
synchronized (dynamicTenantsConfig) {
return dynamicTenantsConfig.computeIfAbsent(tenantId,
clientId -> tenantConfigBean.getTenantConfigContextFactory().apply(tenantConfig));
}
}
return tenantContext;
}
}
return null;
}
}
|
/*
* MIT License
*
* Copyright (c) 2022 MASES s.r.l.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**************************************************************************************
* <auto-generated>
* This code was generated from a template using JCOReflector
*
* Manual changes to this file may cause unexpected behavior in your application.
* Manual changes to this file will be overwritten if the code is regenerated.
* </auto-generated>
*************************************************************************************/
package system.windows.forms;
import org.mases.jcobridge.*;
import org.mases.jcobridge.netreflection.*;
// Import section
// PACKAGE_IMPORT_SECTION
/**
* The base .NET class managing System.Windows.Forms.SizeType, System.Windows.Forms, Version=6.0.2.0, Culture=neutral, PublicKeyToken=b77a5c561934e089.
* <p>
*
* See: <a href="https://docs.microsoft.com/en-us/dotnet/api/System.Windows.Forms.SizeType" target="_top">https://docs.microsoft.com/en-us/dotnet/api/System.Windows.Forms.SizeType</a>
*/
public class SizeType extends NetObject {
/**
* Fully assembly qualified name: System.Windows.Forms, Version=6.0.2.0, Culture=neutral, PublicKeyToken=b77a5c561934e089
*/
public static final String assemblyFullName = "System.Windows.Forms, Version=6.0.2.0, Culture=neutral, PublicKeyToken=b77a5c561934e089";
/**
* Assembly name: System.Windows.Forms
*/
public static final String assemblyShortName = "System.Windows.Forms";
/**
* Qualified class name: System.Windows.Forms.SizeType
*/
public static final String className = "System.Windows.Forms.SizeType";
static JCOBridge bridge = JCOBridgeInstance.getInstance(assemblyFullName);
/**
* The type managed from JCOBridge. See {@link JCType}
*/
public static JCType classType = createType();
static JCEnum enumReflected = createEnum();
JCEnum classInstance = null;
static JCType createType() {
try {
String classToCreate = className + ", "
+ (JCOReflector.getUseFullAssemblyName() ? assemblyFullName : assemblyShortName);
if (JCOReflector.getDebug())
JCOReflector.writeLog("Creating %s", classToCreate);
JCType typeCreated = bridge.GetType(classToCreate);
if (JCOReflector.getDebug())
JCOReflector.writeLog("Created: %s",
(typeCreated != null) ? typeCreated.toString() : "Returned null value");
return typeCreated;
} catch (JCException e) {
JCOReflector.writeLog(e);
return null;
}
}
static JCEnum createEnum() {
try {
String enumToCreate = className + ", "
+ (JCOReflector.getUseFullAssemblyName() ? assemblyFullName : assemblyShortName);
if (JCOReflector.getDebug())
JCOReflector.writeLog("Creating Enum %s", enumToCreate);
JCEnum enumCreated = bridge.GetEnum(enumToCreate);
if (JCOReflector.getDebug())
JCOReflector.writeLog("Created Enum: %s",
(enumCreated != null) ? enumCreated.toString() : "Returned null value");
return enumCreated;
} catch (JCException e) {
JCOReflector.writeLog(e);
return null;
}
}
void addReference(String ref) throws Throwable {
try {
bridge.AddReference(ref);
} catch (JCNativeException jcne) {
throw translateException(jcne);
}
}
/**
* Internal constructor. Use with caution
*/
public SizeType(java.lang.Object instance) {
super(instance);
if (instance instanceof JCObject) {
try {
String enumName = NetEnum.GetName(classType, (JCObject)instance);
classInstance = enumReflected.fromValue(enumName);
} catch (Throwable t) {
JCOReflector.writeLog(t);
classInstance = enumReflected;
}
} else if (instance instanceof JCEnum) {
classInstance = (JCEnum)instance;
}
}
public SizeType() {
super();
// add reference to assemblyName.dll file
try {
addReference(JCOReflector.getUseFullAssemblyName() ? assemblyFullName : assemblyShortName);
} catch (Throwable jcne) {
JCOReflector.writeLog(jcne);
}
}
public String getJCOAssemblyName() {
return assemblyFullName;
}
public String getJCOClassName() {
return className;
}
public String getJCOObjectName() {
return className + ", " + (JCOReflector.getUseFullAssemblyName() ? assemblyFullName : assemblyShortName);
}
public java.lang.Object getJCOInstance() {
return classInstance;
}
public JCType getJCOType() {
return classType;
}
final static SizeType getFrom(JCEnum object, String value) {
try {
return new SizeType(object.fromValue(value));
} catch (JCException e) {
return new SizeType(object);
}
}
// Enum fields section
public static SizeType AutoSize = getFrom(enumReflected, "AutoSize");
public static SizeType Absolute = getFrom(enumReflected, "Absolute");
public static SizeType Percent = getFrom(enumReflected, "Percent");
// Flags management section
}
|
/*
Copyright 2016 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
package com.gs.fw.common.mithra.test.domain.evo;
import com.gs.fw.common.mithra.finder.Operation;
import java.util.Collection;
public class EvoType1ReadOnlyTypesList extends EvoType1ReadOnlyTypesListAbstract
{
public EvoType1ReadOnlyTypesList()
{
super();
}
public EvoType1ReadOnlyTypesList(int initialSize)
{
super(initialSize);
}
public EvoType1ReadOnlyTypesList(Collection c)
{
super(c);
}
public EvoType1ReadOnlyTypesList(Operation operation)
{
super(operation);
}
}
|
/*
* Copyright 2018 Google LLC.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.cloud.tools.jib.gradle;
import com.google.cloud.tools.jib.api.CacheDirectoryCreationException;
import com.google.cloud.tools.jib.api.InvalidImageReferenceException;
import com.google.cloud.tools.jib.filesystem.TempDirectoryProvider;
import com.google.cloud.tools.jib.plugins.common.BuildStepsExecutionException;
import com.google.cloud.tools.jib.plugins.common.HelpfulSuggestions;
import com.google.cloud.tools.jib.plugins.common.IncompatibleBaseImageJavaVersionException;
import com.google.cloud.tools.jib.plugins.common.InvalidAppRootException;
import com.google.cloud.tools.jib.plugins.common.InvalidContainerVolumeException;
import com.google.cloud.tools.jib.plugins.common.InvalidContainerizingModeException;
import com.google.cloud.tools.jib.plugins.common.InvalidCreationTimeException;
import com.google.cloud.tools.jib.plugins.common.InvalidFilesModificationTimeException;
import com.google.cloud.tools.jib.plugins.common.InvalidWorkingDirectoryException;
import com.google.cloud.tools.jib.plugins.common.MainClassInferenceException;
import com.google.cloud.tools.jib.plugins.common.PluginConfigurationProcessor;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import java.io.IOException;
import java.util.Optional;
import javax.annotation.Nullable;
import org.gradle.api.DefaultTask;
import org.gradle.api.GradleException;
import org.gradle.api.tasks.Nested;
import org.gradle.api.tasks.TaskAction;
import org.gradle.api.tasks.options.Option;
/** Builds a container image. */
public class BuildImageTask extends DefaultTask implements JibTask {
private static final String HELPFUL_SUGGESTIONS_PREFIX = "Build image failed";
@Nullable private JibExtension jibExtension;
/**
* This will call the property {@code "jib"} so that it is the same name as the extension. This
* way, the user would see error messages for missing configuration with the prefix {@code jib.}.
*
* @return the {@link JibExtension}.
*/
@Nested
@Nullable
public JibExtension getJib() {
return jibExtension;
}
/**
* The target image can be overridden with the {@code --image} command line option.
*
* @param targetImage the name of the 'to' image.
*/
@Option(option = "image", description = "The image reference for the target image")
public void setTargetImage(String targetImage) {
Preconditions.checkNotNull(jibExtension).getTo().setImage(targetImage);
}
@TaskAction
public void buildImage()
throws IOException, BuildStepsExecutionException, CacheDirectoryCreationException,
MainClassInferenceException {
// Asserts required @Input parameters are not null.
Preconditions.checkNotNull(jibExtension);
TaskCommon.checkDeprecatedUsage(jibExtension, getLogger());
TaskCommon.disableHttpLogging();
TempDirectoryProvider tempDirectoryProvider = new TempDirectoryProvider();
GradleProjectProperties projectProperties =
GradleProjectProperties.getForProject(getProject(), getLogger(), tempDirectoryProvider);
try {
if (Strings.isNullOrEmpty(jibExtension.getTo().getImage())) {
throw new GradleException(
HelpfulSuggestions.forToNotConfigured(
"Missing target image parameter",
"'jib.to.image'",
"build.gradle",
"gradle jib --image <your image name>"));
}
PluginConfigurationProcessor.createJibBuildRunnerForRegistryImage(
new GradleRawConfiguration(jibExtension),
ignored -> Optional.empty(),
projectProperties,
new GradleHelpfulSuggestions(HELPFUL_SUGGESTIONS_PREFIX))
.runBuild();
} catch (InvalidAppRootException ex) {
throw new GradleException(
"container.appRoot is not an absolute Unix-style path: " + ex.getInvalidPathValue(), ex);
} catch (InvalidContainerizingModeException ex) {
throw new GradleException(
"invalid value for containerizingMode: " + ex.getInvalidContainerizingMode(), ex);
} catch (InvalidWorkingDirectoryException ex) {
throw new GradleException(
"container.workingDirectory is not an absolute Unix-style path: "
+ ex.getInvalidPathValue(),
ex);
} catch (InvalidContainerVolumeException ex) {
throw new GradleException(
"container.volumes is not an absolute Unix-style path: " + ex.getInvalidVolume(), ex);
} catch (InvalidFilesModificationTimeException ex) {
throw new GradleException(
"container.filesModificationTime should be an ISO 8601 date-time (see "
+ "DateTimeFormatter.ISO_DATE_TIME) or special keyword \"EPOCH_PLUS_SECOND\": "
+ ex.getInvalidFilesModificationTime(),
ex);
} catch (InvalidCreationTimeException ex) {
throw new GradleException(
"container.creationTime should be an ISO 8601 date-time (see "
+ "DateTimeFormatter.ISO_DATE_TIME) or a special keyword (\"EPOCH\", "
+ "\"USE_CURRENT_TIMESTAMP\"): "
+ ex.getInvalidCreationTime(),
ex);
} catch (IncompatibleBaseImageJavaVersionException ex) {
throw new GradleException(
HelpfulSuggestions.forIncompatibleBaseImageJavaVersionForGradle(
ex.getBaseImageMajorJavaVersion(), ex.getProjectMajorJavaVersion()),
ex);
} catch (InvalidImageReferenceException ex) {
throw new GradleException(
HelpfulSuggestions.forInvalidImageReference(ex.getInvalidReference()), ex);
} finally {
tempDirectoryProvider.close();
projectProperties.waitForLoggingThread();
}
}
@Override
public BuildImageTask setJibExtension(JibExtension jibExtension) {
this.jibExtension = jibExtension;
return this;
}
}
|
/*
* The MIT License
*
* Copyright 2018 msav.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package ru.msav.libs.cbr.getrates;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
/**
*
* @author msav
*/
public class CurrencyTest {
private Currency CurrencyObject;
/**
* Initialize a simple currency object.
*/
public CurrencyTest() {
CurrencyObject = new Currency();
CurrencyObject.ID = "R01235";
CurrencyObject.NumCode = "840";
CurrencyObject.CharCode = "USD";
CurrencyObject.Nominal = 1.0;
CurrencyObject.Name = "Доллар США";
CurrencyObject.Rate = 56.98;
}
/**
* Test a simple currency object.
*/
@Test
public void testCurrencyConstructor() {
assertEquals("Unexpected USD currentcy ID", CurrencyObject.ID, "R01235");
assertEquals("Unexpected USD currentcy NumCode", CurrencyObject.NumCode, "840");
assertEquals("Unexpected USD currentcy CharCode", CurrencyObject.CharCode, "USD");
assertEquals("Unexpected USD currentcy Nominal", Double.compare(CurrencyObject.Nominal, 1.0), 0);
assertEquals("Unexpected USD currentcy Name", CurrencyObject.Name, "Доллар США");
assertEquals("Unexpected USD currentcy Rate", Double.compare(CurrencyObject.Rate, 56.98), 0);
}
}
|
/*
* Copyright 2000-2015 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.intellij.lang.xpath.xslt.context;
import com.intellij.lang.xml.XMLLanguage;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.SimpleFieldCache;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.psi.*;
import com.intellij.psi.util.*;
import com.intellij.psi.xml.*;
import com.intellij.util.ArrayUtil;
import com.intellij.util.Function;
import com.intellij.util.IncorrectOperationException;
import com.intellij.util.containers.ContainerUtil;
import com.intellij.xml.XmlAttributeDescriptor;
import com.intellij.xml.XmlElementDescriptor;
import com.intellij.xml.XmlNSDescriptor;
import com.intellij.xml.impl.schema.XmlElementDescriptorImpl;
import com.intellij.xml.impl.schema.XmlNSDescriptorImpl;
import com.intellij.xml.util.XmlUtil;
import gnu.trove.THashSet;
import org.intellij.lang.xpath.XPathFile;
import org.intellij.lang.xpath.context.ContextProvider;
import org.intellij.lang.xpath.context.NamespaceContext;
import org.intellij.lang.xpath.context.VariableContext;
import org.intellij.lang.xpath.psi.XPathExpression;
import org.intellij.lang.xpath.psi.XPathType;
import org.intellij.lang.xpath.validation.inspections.quickfix.XPathQuickFixFactory;
import org.intellij.lang.xpath.xslt.XsltSupport;
import org.intellij.lang.xpath.xslt.associations.FileAssociationsManager;
import org.intellij.lang.xpath.xslt.psi.XsltElement;
import org.intellij.lang.xpath.xslt.psi.XsltElementFactory;
import org.intellij.lang.xpath.xslt.psi.XsltVariable;
import org.intellij.lang.xpath.xslt.psi.XsltWithParam;
import org.intellij.lang.xpath.xslt.util.NSDeclTracker;
import org.intellij.lang.xpath.xslt.util.QNameUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.xml.namespace.QName;
import java.util.*;
/*
* Created by IntelliJ IDEA.
* User: sweinreuter
* Date: 08.01.11
*/
public abstract class XsltContextProviderBase extends ContextProvider {
protected static final Set<String> IGNORED_URIS = new THashSet<String>();
static {
IGNORED_URIS.add(XsltSupport.XSLT_NS);
IGNORED_URIS.addAll(XmlUtil.ourSchemaUrisList);
}
private static final SimpleFieldCache<CachedValue<ElementNames>, XsltContextProviderBase> myNamesCache = new SimpleFieldCache<CachedValue<ElementNames>, XsltContextProviderBase>() {
protected CachedValue<ElementNames> compute(final XsltContextProviderBase xsltContextProvider) {
return xsltContextProvider.createCachedValue(xsltContextProvider.getFile());
}
protected CachedValue<ElementNames> getValue(final XsltContextProviderBase xsltContextProvider) {
return xsltContextProvider.myNames;
}
protected void putValue(final CachedValue<ElementNames> elementNamesCachedValue, final XsltContextProviderBase xsltContextProvider) {
xsltContextProvider.myNames = elementNamesCachedValue;
}
};
private CachedValue<ElementNames> myNames;
protected final SmartPsiElementPointer<XmlElement> myContextElement;
protected final FileAssociationsManager myFileAssociationsManager;
protected XsltContextProviderBase(XmlElement element) {
final Project project = element.getProject();
myFileAssociationsManager = FileAssociationsManager.getInstance(project);
myContextElement = SmartPointerManager.getInstance(project).createSmartPsiElementPointer(element);
attachTo(element);
}
@Override
protected boolean isValid() {
return super.isValid() && matchContextType();
}
private boolean matchContextType() {
final PsiFile file = myContextElement.getContainingFile();
return file != null && XsltSupport.getXsltLanguageLevel(file).getXPathVersion() == getContextType().getVersion();
}
private static void fillFromSchema(PsiFile file, ElementNames names) {
if (!(file instanceof XmlFile)) return;
final XmlFile f = (XmlFile)file;
final XmlDocument d = f.getDocument();
if (d == null) return;
final XmlTag rootTag = d.getRootTag();
if (rootTag == null) return;
//noinspection unchecked
names.dependencies.add(new NSDeclTracker(rootTag));
try {
final Map<String, String> namespaceDeclarations = rootTag.getLocalNamespaceDeclarations();
final Collection<String> prefixes = namespaceDeclarations.keySet();
final XmlElementFactory ef = XmlElementFactory.getInstance(file.getProject());
int noSchemaNamespaces = 0;
for (String prefix : prefixes) {
final String namespace = namespaceDeclarations.get(prefix);
if (isIgnoredNamespace(prefix, namespace)) continue;
final XmlTag tag = ef.createTagFromText("<dummy-tag xmlns='" + namespace + "' />", XMLLanguage.INSTANCE);
final XmlDocument document = PsiTreeUtil.getParentOfType(tag, XmlDocument.class);
final XmlNSDescriptor rootDescriptor = tag.getNSDescriptor(tag.getNamespace(), true);
if (rootDescriptor == null ||
(rootDescriptor instanceof XmlNSDescriptorImpl && ((XmlNSDescriptorImpl)rootDescriptor).getTag() == null) ||
!rootDescriptor.getDeclaration().isPhysical()) {
final QName any = QNameUtil.createAnyLocalName(namespace);
names.elementNames.add(any);
names.attributeNames.add(any);
noSchemaNamespaces++;
continue;
}
//noinspection unchecked
names.dependencies.add(rootDescriptor.getDescriptorFile());
//noinspection unchecked
final Set<XmlElementDescriptor> history = new THashSet<XmlElementDescriptor>(150);
final XmlElementDescriptor[] e = rootDescriptor.getRootElementsDescriptors(document);
try {
for (XmlElementDescriptor descriptor : e) {
processElementDescriptors(descriptor, tag, names, history, 0);
}
} catch (StopProcessingException e1) {
Logger.getInstance(XsltContextProviderBase.class).error("Maximum recursion depth reached. Missing equals()/hashCode() implementation?", StringUtil
.join(history, new Function<XmlElementDescriptor, String>() {
@Override
public String fun(XmlElementDescriptor descriptor) {
return descriptor.getClass().getName() + "[" + descriptor.getQualifiedName() + "]";
}
}, ", "));
}
}
names.validateNames = names.elementNames.size() > noSchemaNamespaces;
// final QName any = QNameUtil.createAnyLocalName("");
// names.elementNames.add(any);
// names.attributeNames.add(any);
} catch (IncorrectOperationException e) {
Logger.getInstance(XsltContextProvider.class.getName()).error(e);
}
}
private static boolean isIgnoredNamespace(String prefix, String namespace) {
return IGNORED_URIS.contains(namespace) || prefix.length() == 0 || "xmlns".equals(prefix);
}
private static class StopProcessingException extends Exception {
@Override
public synchronized Throwable fillInStackTrace() {
return this;
}
}
private static void processElementDescriptors(XmlElementDescriptor descriptor, XmlTag tag, ElementNames names, Set<XmlElementDescriptor> history, int depth)
throws StopProcessingException {
if (!history.add(descriptor) || ++depth == 200) {
if (depth == 200) {
throw new StopProcessingException();
}
return;
}
final String namespace = descriptor instanceof XmlElementDescriptorImpl
? ((XmlElementDescriptorImpl)descriptor).getNamespace()
: tag.getNamespace();
names.elementNames.add(new QName(namespace, descriptor.getName()));
final XmlAttributeDescriptor[] attributesDescriptors = descriptor.getAttributesDescriptors(null);
for (XmlAttributeDescriptor attributesDescriptor : attributesDescriptors) {
final String localPart = attributesDescriptor.getName();
if (!"xmlns".equals(localPart)) names.attributeNames.add(new QName(localPart));
}
final XmlElementDescriptor[] descriptors = descriptor.getElementsDescriptors(tag);
for (XmlElementDescriptor elem : descriptors) {
processElementDescriptors(elem, tag, names, history, depth);
}
}
public PsiFile[] getRelatedFiles(final XPathFile file) {
final XmlAttribute attribute = PsiTreeUtil.getContextOfType(file, XmlAttribute.class, false);
assert attribute != null;
final PsiFile psiFile = attribute.getContainingFile();
assert psiFile != null;
final List<PsiFile> files = new ArrayList<PsiFile>();
psiFile.accept(new XmlRecursiveElementVisitor() {
@Override
public void visitXmlAttribute(XmlAttribute attribute) {
final PsiFile[] _files = XsltSupport.getFiles(attribute);
for (PsiFile _file : _files) {
if (_file != file) files.add(_file);
}
}
});
return PsiUtilCore.toPsiFileArray(files);
}
@Nullable
public XmlElement getContextElement() {
return myContextElement.getElement();
}
@NotNull
public XPathType getExpectedType(XPathExpression expr) {
final XmlTag tag = PsiTreeUtil.getContextOfType(expr, XmlTag.class, true);
if (tag != null && XsltSupport.isXsltTag(tag)) {
final XsltElement element = XsltElementFactory.getInstance().wrapElement(tag, XsltElement.class);
if (element instanceof XsltVariable) {
return ((XsltVariable)element).getType();
} else {
final XmlAttribute attr = PsiTreeUtil.getContextOfType(expr, XmlAttribute.class, true);
if (attr != null) {
if (element instanceof XsltWithParam) {
final XmlAttribute nameAttr = tag.getAttribute("name", null);
if (nameAttr != null) {
final XmlAttributeValue valueElement = nameAttr.getValueElement();
if (valueElement != null) {
final PsiReference[] references = valueElement.getReferences();
for (PsiReference reference : references) {
final PsiElement psiElement = reference.resolve();
if (psiElement instanceof XsltVariable) {
return ((XsltVariable)psiElement).getType();
}
}
}
}
} else {
final String name = attr.getName();
return getTypeForTag(tag, name);
}
}
}
}
return XPathType.UNKNOWN;
}
protected XPathType getTypeForTag(XmlTag tag, String attribute) {
String tagName = tag.getLocalName();
if ("select".equals(attribute)) {
if ("copy-of".equals(tagName) || "for-each".equals(tagName) || "apply-templates".equals(tagName)) {
return XPathType.NODESET;
} else if ("value-of".equals(tagName) || "sort".equals(tagName)) {
return XPathType.STRING;
}
return XPathType.ANY;
} else if ("test".equals(attribute)) {
if ("if".equals(tagName) || "when".equals(tagName)) {
return XPathType.BOOLEAN;
}
} else if ("number".equals(attribute)) {
if ("value".equals(tagName)) {
return XPathType.NUMBER;
}
}
return XPathType.UNKNOWN;
}
@NotNull
public NamespaceContext getNamespaceContext() {
return XsltNamespaceContext.NAMESPACE_CONTEXT;
}
@NotNull
public VariableContext getVariableContext() {
return XsltVariableContext.INSTANCE;
}
@Nullable
public Set<QName> getAttributes(boolean forValidation) {
final ElementNames names = getNames(getFile());
if (names != null) {
return !forValidation || names.validateNames ? names.attributeNames : null;
}
return null;
}
@Nullable
public Set<QName> getElements(boolean forValidation) {
final ElementNames names = getNames(getFile());
if (names != null) {
return !forValidation || names.validateNames ? names.elementNames : null;
}
return null;
}
@Nullable
private ElementNames getNames(@Nullable PsiFile file) {
if (file == null) return null;
return myNamesCache.get(this).getValue();
}
private CachedValue<ElementNames> createCachedValue(final PsiFile file) {
return CachedValuesManager.getManager(file.getProject()).createCachedValue(new CachedValueProvider<ElementNames>() {
public Result<ElementNames> compute() {
final ElementNames names = new ElementNames();
final PsiFile[] associations = myFileAssociationsManager.getAssociationsFor(file, FileAssociationsManager.Holder.XML_FILES);
if (associations.length == 0) {
fillFromSchema(file, names);
} else {
names.validateNames = true;
//noinspection unchecked
ContainerUtil.addAll(names.dependencies, associations);
}
//noinspection unchecked
names.dependencies.add(myFileAssociationsManager);
for (PsiFile file : associations) {
if (!(file instanceof XmlFile)) continue;
file.accept(new XmlRecursiveElementVisitor() {
@Override
public void visitXmlTag(XmlTag tag) {
names.elementNames.add(QNameUtil.createQName(tag));
super.visitXmlTag(tag);
}
@Override
public void visitXmlAttribute(XmlAttribute attribute) {
if (!attribute.isNamespaceDeclaration()) {
names.attributeNames.add(QNameUtil.createQName(attribute));
}
super.visitXmlAttribute(attribute);
}
});
}
//noinspection unchecked
return new Result<ElementNames>(names, ArrayUtil.toObjectArray(names.dependencies));
}
}, false);
}
@Nullable
private PsiFile getFile() {
final XmlElement element = getContextElement();
if (element == null) {
return null;
}
return element.getContainingFile().getOriginalFile();
}
@NotNull
public XPathQuickFixFactory getQuickFixFactory() {
return XsltQuickFixFactory.INSTANCE;
}
static class ElementNames {
boolean validateNames;
final Set<QName> elementNames = new HashSet<QName>();
final Set<QName> attributeNames = new HashSet<QName>();
@SuppressWarnings({"RawUseOfParameterizedType"})
final Set dependencies = new HashSet();
}
}
|
/**
* LabelEntityAssociationErrorReason.java
*
* This file was auto-generated from WSDL
* by the Apache Axis 1.4 Apr 22, 2006 (06:55:48 PDT) WSDL2Java emitter.
*/
package com.google.api.ads.dfp.v201306;
public class LabelEntityAssociationErrorReason implements java.io.Serializable {
private java.lang.String _value_;
private static java.util.HashMap _table_ = new java.util.HashMap();
// Constructor
protected LabelEntityAssociationErrorReason(java.lang.String value) {
_value_ = value;
_table_.put(_value_,this);
}
public static final java.lang.String _INVALID_COMPANY_TYPE = "INVALID_COMPANY_TYPE";
public static final java.lang.String _DUPLICATE_ASSOCIATION = "DUPLICATE_ASSOCIATION";
public static final java.lang.String _INVALID_ASSOCIATION = "INVALID_ASSOCIATION";
public static final java.lang.String _DUPLICATE_ASSOCIATION_WITH_NEGATION = "DUPLICATE_ASSOCIATION_WITH_NEGATION";
public static final java.lang.String _UNKNOWN = "UNKNOWN";
public static final LabelEntityAssociationErrorReason INVALID_COMPANY_TYPE = new LabelEntityAssociationErrorReason(_INVALID_COMPANY_TYPE);
public static final LabelEntityAssociationErrorReason DUPLICATE_ASSOCIATION = new LabelEntityAssociationErrorReason(_DUPLICATE_ASSOCIATION);
public static final LabelEntityAssociationErrorReason INVALID_ASSOCIATION = new LabelEntityAssociationErrorReason(_INVALID_ASSOCIATION);
public static final LabelEntityAssociationErrorReason DUPLICATE_ASSOCIATION_WITH_NEGATION = new LabelEntityAssociationErrorReason(_DUPLICATE_ASSOCIATION_WITH_NEGATION);
public static final LabelEntityAssociationErrorReason UNKNOWN = new LabelEntityAssociationErrorReason(_UNKNOWN);
public java.lang.String getValue() { return _value_;}
public static LabelEntityAssociationErrorReason fromValue(java.lang.String value)
throws java.lang.IllegalArgumentException {
LabelEntityAssociationErrorReason enumeration = (LabelEntityAssociationErrorReason)
_table_.get(value);
if (enumeration==null) throw new java.lang.IllegalArgumentException();
return enumeration;
}
public static LabelEntityAssociationErrorReason fromString(java.lang.String value)
throws java.lang.IllegalArgumentException {
return fromValue(value);
}
public boolean equals(java.lang.Object obj) {return (obj == this);}
public int hashCode() { return toString().hashCode();}
public java.lang.String toString() { return _value_;}
public java.lang.Object readResolve() throws java.io.ObjectStreamException { return fromValue(_value_);}
public static org.apache.axis.encoding.Serializer getSerializer(
java.lang.String mechType,
java.lang.Class _javaType,
javax.xml.namespace.QName _xmlType) {
return
new org.apache.axis.encoding.ser.EnumSerializer(
_javaType, _xmlType);
}
public static org.apache.axis.encoding.Deserializer getDeserializer(
java.lang.String mechType,
java.lang.Class _javaType,
javax.xml.namespace.QName _xmlType) {
return
new org.apache.axis.encoding.ser.EnumDeserializer(
_javaType, _xmlType);
}
// Type metadata
private static org.apache.axis.description.TypeDesc typeDesc =
new org.apache.axis.description.TypeDesc(LabelEntityAssociationErrorReason.class);
static {
typeDesc.setXmlType(new javax.xml.namespace.QName("https://www.google.com/apis/ads/publisher/v201306", "LabelEntityAssociationError.Reason"));
}
/**
* Return type metadata object
*/
public static org.apache.axis.description.TypeDesc getTypeDesc() {
return typeDesc;
}
}
|
package br.com.zupacademy.gustavo.casadocodigo.model;
import javax.persistence.*;
import javax.validation.constraints.NotBlank;
import javax.validation.constraints.NotNull;
@Entity
public class Estado {
@Id
@GeneratedValue(strategy = GenerationType.AUTO)
private Long id;
@NotBlank
private String nomeEstado;
@NotBlank
private String nomePais;
@NotNull
@ManyToOne
private Pais pais;
public Estado(){
}
public Estado(String nome, String nomePais, Pais pais) {
this.nomeEstado = nome;
this.nomePais = nomePais;
this.pais = pais;
}
public Long getId() {
return id;
}
public String getNomeEstado() {
return nomeEstado;
}
public String getNomePais() {
return nomePais;
}
}
|
/*
* Copyright 2013-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.sagemaker.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
* <p>
* Provides summary information about a model package.
* </p>
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ModelPackageSummary" target="_top">AWS API
* Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class ModelPackageSummary implements Serializable, Cloneable, StructuredPojo {
/**
* <p>
* The name of the model package.
* </p>
*/
private String modelPackageName;
/**
* <p>
* The Amazon Resource Name (ARN) of the model package.
* </p>
*/
private String modelPackageArn;
/**
* <p>
* A brief statement describing the model package.
* </p>
*/
private String modelPackageDescription;
/**
* <p>
* A timestamp that shows when the model package was created.
* </p>
*/
private java.util.Date creationTime;
/**
* <p>
* The overall status of the model package.
* </p>
*/
private String modelPackageStatus;
/**
* <p>
* The name of the model package.
* </p>
*
* @param modelPackageName
* The name of the model package.
*/
public void setModelPackageName(String modelPackageName) {
this.modelPackageName = modelPackageName;
}
/**
* <p>
* The name of the model package.
* </p>
*
* @return The name of the model package.
*/
public String getModelPackageName() {
return this.modelPackageName;
}
/**
* <p>
* The name of the model package.
* </p>
*
* @param modelPackageName
* The name of the model package.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ModelPackageSummary withModelPackageName(String modelPackageName) {
setModelPackageName(modelPackageName);
return this;
}
/**
* <p>
* The Amazon Resource Name (ARN) of the model package.
* </p>
*
* @param modelPackageArn
* The Amazon Resource Name (ARN) of the model package.
*/
public void setModelPackageArn(String modelPackageArn) {
this.modelPackageArn = modelPackageArn;
}
/**
* <p>
* The Amazon Resource Name (ARN) of the model package.
* </p>
*
* @return The Amazon Resource Name (ARN) of the model package.
*/
public String getModelPackageArn() {
return this.modelPackageArn;
}
/**
* <p>
* The Amazon Resource Name (ARN) of the model package.
* </p>
*
* @param modelPackageArn
* The Amazon Resource Name (ARN) of the model package.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ModelPackageSummary withModelPackageArn(String modelPackageArn) {
setModelPackageArn(modelPackageArn);
return this;
}
/**
* <p>
* A brief statement describing the model package.
* </p>
*
* @param modelPackageDescription
* A brief statement describing the model package.
*/
public void setModelPackageDescription(String modelPackageDescription) {
this.modelPackageDescription = modelPackageDescription;
}
/**
* <p>
* A brief statement describing the model package.
* </p>
*
* @return A brief statement describing the model package.
*/
public String getModelPackageDescription() {
return this.modelPackageDescription;
}
/**
* <p>
* A brief statement describing the model package.
* </p>
*
* @param modelPackageDescription
* A brief statement describing the model package.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ModelPackageSummary withModelPackageDescription(String modelPackageDescription) {
setModelPackageDescription(modelPackageDescription);
return this;
}
/**
* <p>
* A timestamp that shows when the model package was created.
* </p>
*
* @param creationTime
* A timestamp that shows when the model package was created.
*/
public void setCreationTime(java.util.Date creationTime) {
this.creationTime = creationTime;
}
/**
* <p>
* A timestamp that shows when the model package was created.
* </p>
*
* @return A timestamp that shows when the model package was created.
*/
public java.util.Date getCreationTime() {
return this.creationTime;
}
/**
* <p>
* A timestamp that shows when the model package was created.
* </p>
*
* @param creationTime
* A timestamp that shows when the model package was created.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ModelPackageSummary withCreationTime(java.util.Date creationTime) {
setCreationTime(creationTime);
return this;
}
/**
* <p>
* The overall status of the model package.
* </p>
*
* @param modelPackageStatus
* The overall status of the model package.
* @see ModelPackageStatus
*/
public void setModelPackageStatus(String modelPackageStatus) {
this.modelPackageStatus = modelPackageStatus;
}
/**
* <p>
* The overall status of the model package.
* </p>
*
* @return The overall status of the model package.
* @see ModelPackageStatus
*/
public String getModelPackageStatus() {
return this.modelPackageStatus;
}
/**
* <p>
* The overall status of the model package.
* </p>
*
* @param modelPackageStatus
* The overall status of the model package.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ModelPackageStatus
*/
public ModelPackageSummary withModelPackageStatus(String modelPackageStatus) {
setModelPackageStatus(modelPackageStatus);
return this;
}
/**
* <p>
* The overall status of the model package.
* </p>
*
* @param modelPackageStatus
* The overall status of the model package.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ModelPackageStatus
*/
public ModelPackageSummary withModelPackageStatus(ModelPackageStatus modelPackageStatus) {
this.modelPackageStatus = modelPackageStatus.toString();
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getModelPackageName() != null)
sb.append("ModelPackageName: ").append(getModelPackageName()).append(",");
if (getModelPackageArn() != null)
sb.append("ModelPackageArn: ").append(getModelPackageArn()).append(",");
if (getModelPackageDescription() != null)
sb.append("ModelPackageDescription: ").append(getModelPackageDescription()).append(",");
if (getCreationTime() != null)
sb.append("CreationTime: ").append(getCreationTime()).append(",");
if (getModelPackageStatus() != null)
sb.append("ModelPackageStatus: ").append(getModelPackageStatus());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof ModelPackageSummary == false)
return false;
ModelPackageSummary other = (ModelPackageSummary) obj;
if (other.getModelPackageName() == null ^ this.getModelPackageName() == null)
return false;
if (other.getModelPackageName() != null && other.getModelPackageName().equals(this.getModelPackageName()) == false)
return false;
if (other.getModelPackageArn() == null ^ this.getModelPackageArn() == null)
return false;
if (other.getModelPackageArn() != null && other.getModelPackageArn().equals(this.getModelPackageArn()) == false)
return false;
if (other.getModelPackageDescription() == null ^ this.getModelPackageDescription() == null)
return false;
if (other.getModelPackageDescription() != null && other.getModelPackageDescription().equals(this.getModelPackageDescription()) == false)
return false;
if (other.getCreationTime() == null ^ this.getCreationTime() == null)
return false;
if (other.getCreationTime() != null && other.getCreationTime().equals(this.getCreationTime()) == false)
return false;
if (other.getModelPackageStatus() == null ^ this.getModelPackageStatus() == null)
return false;
if (other.getModelPackageStatus() != null && other.getModelPackageStatus().equals(this.getModelPackageStatus()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getModelPackageName() == null) ? 0 : getModelPackageName().hashCode());
hashCode = prime * hashCode + ((getModelPackageArn() == null) ? 0 : getModelPackageArn().hashCode());
hashCode = prime * hashCode + ((getModelPackageDescription() == null) ? 0 : getModelPackageDescription().hashCode());
hashCode = prime * hashCode + ((getCreationTime() == null) ? 0 : getCreationTime().hashCode());
hashCode = prime * hashCode + ((getModelPackageStatus() == null) ? 0 : getModelPackageStatus().hashCode());
return hashCode;
}
@Override
public ModelPackageSummary clone() {
try {
return (ModelPackageSummary) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.sagemaker.model.transform.ModelPackageSummaryMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}
|
package net.minecraft.util;
import net.minecraft.entity.player.EntityPlayer;
import net.minecraft.item.ItemFood;
import net.minecraft.item.ItemStack;
import net.minecraft.nbt.NBTTagCompound;
import net.minecraft.world.EnumDifficulty;
public class FoodStats {
private float foodSaturationLevel = 5.0F;
private int prevFoodLevel = 20;
private int foodTimer;
private static final String __OBFID = "CL_00001729";
private int foodLevel = 20;
private float foodExhaustionLevel;
public void addExhaustion(float var1) {
this.foodExhaustionLevel = Math.min(this.foodExhaustionLevel + var1, 40.0F);
}
public int getPrevFoodLevel() {
return this.prevFoodLevel;
}
public void setFoodSaturationLevel(float var1) {
this.foodSaturationLevel = var1;
}
public void onUpdate(EntityPlayer var1) {
EnumDifficulty var2 = var1.worldObj.getDifficulty();
this.prevFoodLevel = this.foodLevel;
if (this.foodExhaustionLevel > 4.0F) {
this.foodExhaustionLevel -= 4.0F;
if (this.foodSaturationLevel > 0.0F) {
this.foodSaturationLevel = Math.max(this.foodSaturationLevel - 1.0F, 0.0F);
} else if (var2 != EnumDifficulty.PEACEFUL) {
this.foodLevel = Math.max(this.foodLevel - 1, 0);
}
}
if (var1.worldObj.getGameRules().getGameRuleBooleanValue("naturalRegeneration") && this.foodLevel >= 18 && var1.shouldHeal()) {
++this.foodTimer;
if (this.foodTimer >= 80) {
var1.heal(1.0F);
this.addExhaustion(3.0F);
this.foodTimer = 0;
}
} else if (this.foodLevel <= 0) {
++this.foodTimer;
if (this.foodTimer >= 80) {
if (var1.getHealth() > 10.0F || var2 == EnumDifficulty.HARD || var1.getHealth() > 1.0F && var2 == EnumDifficulty.NORMAL) {
var1.attackEntityFrom(DamageSource.starve, 1.0F);
}
this.foodTimer = 0;
}
} else {
this.foodTimer = 0;
}
}
public boolean needFood() {
return this.foodLevel < 20;
}
public float getSaturationLevel() {
return this.foodSaturationLevel;
}
public void addStats(ItemFood var1, ItemStack var2) {
this.addStats(var1.getHealAmount(var2), var1.getSaturationModifier(var2));
}
public void writeNBT(NBTTagCompound var1) {
var1.setInteger("foodLevel", this.foodLevel);
var1.setInteger("foodTickTimer", this.foodTimer);
var1.setFloat("foodSaturationLevel", this.foodSaturationLevel);
var1.setFloat("foodExhaustionLevel", this.foodExhaustionLevel);
}
public void setFoodLevel(int var1) {
this.foodLevel = var1;
}
public int getFoodLevel() {
return this.foodLevel;
}
public void addStats(int var1, float var2) {
this.foodLevel = Math.min(var1 + this.foodLevel, 20);
this.foodSaturationLevel = Math.min(this.foodSaturationLevel + (float)var1 * var2 * 2.0F, (float)this.foodLevel);
}
public void readNBT(NBTTagCompound var1) {
if (var1.hasKey("foodLevel", 99)) {
this.foodLevel = var1.getInteger("foodLevel");
this.foodTimer = var1.getInteger("foodTickTimer");
this.foodSaturationLevel = var1.getFloat("foodSaturationLevel");
this.foodExhaustionLevel = var1.getFloat("foodExhaustionLevel");
}
}
}
|
package jetbrains.mps.lang.constraints.editor;
/*Generated by MPS */
import jetbrains.mps.editor.runtime.descriptor.AbstractEditorBuilder;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.mps.openapi.model.SNode;
import jetbrains.mps.openapi.editor.EditorContext;
import jetbrains.mps.openapi.editor.cells.EditorCell;
import jetbrains.mps.nodeEditor.cells.EditorCell_Collection;
import jetbrains.mps.nodeEditor.cellLayout.CellLayout_Indent;
import org.jetbrains.mps.openapi.language.SReferenceLink;
import jetbrains.mps.lang.editor.cellProviders.SReferenceCellProvider;
import jetbrains.mps.editor.runtime.impl.CellUtil;
import jetbrains.mps.nodeEditor.cellMenu.SReferenceSubstituteInfo;
import jetbrains.mps.lang.smodel.generator.smodelAdapter.SNodeOperations;
import jetbrains.mps.lang.smodel.generator.smodelAdapter.IAttributeDescriptor;
import jetbrains.mps.internal.collections.runtime.Sequence;
import jetbrains.mps.internal.collections.runtime.IWhereFilter;
import java.util.Objects;
import jetbrains.mps.lang.core.behavior.LinkAttribute__BehaviorDescriptor;
import jetbrains.mps.nodeEditor.EditorManager;
import jetbrains.mps.openapi.editor.update.AttributeKind;
import jetbrains.mps.nodeEditor.cells.EditorCell_Property;
import jetbrains.mps.nodeEditor.cells.EditorCell_RefPresentation;
import jetbrains.mps.smodel.action.IReferentPresentationProvider;
import jetbrains.mps.openapi.editor.style.Style;
import jetbrains.mps.editor.runtime.style.StyleImpl;
import jetbrains.mps.editor.runtime.style.StyleAttributes;
import jetbrains.mps.smodel.adapter.structure.MetaAdapterFactory;
import org.jetbrains.mps.openapi.language.SConcept;
/*package*/ class RefPresentationMigratedProblem_EditorBuilder_a extends AbstractEditorBuilder {
@NotNull
private SNode myNode;
public RefPresentationMigratedProblem_EditorBuilder_a(@NotNull EditorContext context, @NotNull SNode node) {
super(context);
myNode = node;
}
@NotNull
@Override
public SNode getNode() {
return myNode;
}
/*package*/ EditorCell createCell() {
return createCollection_0();
}
private EditorCell createCollection_0() {
EditorCell_Collection editorCell = new EditorCell_Collection(getEditorContext(), myNode, new CellLayout_Indent());
editorCell.setCellId("Collection_v66xl_a");
editorCell.setBig(true);
setCellContext(editorCell);
editorCell.addEditorCell(createRefCell_0());
return editorCell;
}
private EditorCell createRefCell_0() {
final SReferenceLink referenceLink = LINKS.editor$9JP0;
SReferenceCellProvider provider = new SReferenceCellProvider(getNode(), referenceLink, getEditorContext()) {
protected EditorCell createReferenceCell(final SNode targetNode) {
EditorCell cell = getUpdateSession().updateReferencedNodeCell(() -> new Inline_Builder0(getEditorContext(), getNode(), targetNode).createCell(), targetNode, LINKS.editor$9JP0);
CellUtil.setupIDeprecatableStyles(targetNode, cell);
setSemanticNodeToCells(cell, getNode());
installDeleteActions_notnull(cell);
return cell;
}
};
provider.setNoTargetText("<no editor>");
EditorCell editorCell = provider.createCell();
if (editorCell.getSRole() == null) {
editorCell.setReferenceCell(true);
editorCell.setSRole(LINKS.editor$9JP0);
}
editorCell.setSubstituteInfo(new SReferenceSubstituteInfo(editorCell, referenceLink));
Iterable<SNode> referenceAttributes = SNodeOperations.ofConcept(new IAttributeDescriptor.AllAttributes().list(myNode), CONCEPTS.LinkAttribute$v_);
Iterable<SNode> currentReferenceAttributes = Sequence.fromIterable(referenceAttributes).where(new IWhereFilter<SNode>() {
public boolean accept(SNode it) {
return Objects.equals(LinkAttribute__BehaviorDescriptor.getLink_id1avfQ4BEFo6.invoke(it), referenceLink);
}
});
if (Sequence.fromIterable(currentReferenceAttributes).isNotEmpty()) {
EditorManager manager = EditorManager.getInstanceFromContext(getEditorContext());
return manager.createNodeRoleAttributeCell(Sequence.fromIterable(currentReferenceAttributes).first(), AttributeKind.REFERENCE, editorCell);
} else
return editorCell;
}
/*package*/ static class Inline_Builder0 extends AbstractEditorBuilder {
@NotNull
private SNode myNode;
private SNode myReferencingNode;
/*package*/ Inline_Builder0(@NotNull EditorContext context, SNode referencingNode, @NotNull SNode node) {
super(context);
myReferencingNode = referencingNode;
myNode = node;
}
/*package*/ EditorCell createCell() {
return createReferencePresentation_0();
}
@NotNull
@Override
public SNode getNode() {
return myNode;
}
private EditorCell createReferencePresentation_0() {
EditorCell_Property editorCell = EditorCell_RefPresentation.create(getEditorContext(), myNode, myReferencingNode, IReferentPresentationProvider.getDefaultPresentation(LINKS.editor$9JP0));
editorCell.setCellId("ReferencePresentation_v66xl_a0a0");
Style style = new StyleImpl();
style.set(StyleAttributes.AUTO_DELETABLE, true);
editorCell.getStyle().putAll(style);
return editorCell;
}
}
private static final class LINKS {
/*package*/ static final SReferenceLink editor$9JP0 = MetaAdapterFactory.getReferenceLink(0x3f4bc5f5c6c14a28L, 0x8b10c83066ffa4a1L, 0x583cd121d513aac3L, 0x583cd121d513aac4L, "editor");
}
private static final class CONCEPTS {
/*package*/ static final SConcept LinkAttribute$v_ = MetaAdapterFactory.getConcept(0xceab519525ea4f22L, 0x9b92103b95ca8c0cL, 0x2eb1ad060897da51L, "jetbrains.mps.lang.core.structure.LinkAttribute");
}
}
|
/*
* Copyright 2019-2020 Zheng Jie
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.just.common.utils.enums;
import lombok.AllArgsConstructor;
import lombok.Getter;
/**
* <p>
* 验证码业务场景
* </p>
* @author Zheng Jie
* @date 2020-05-02
*/
@Getter
@AllArgsConstructor
public enum CodeBiEnum {
/* 旧邮箱修改邮箱 */
ONE(1, "旧邮箱修改邮箱"),
/* 通过邮箱修改密码 */
TWO(2, "通过邮箱修改密码");
private final Integer code;
private final String description;
public static CodeBiEnum find(Integer code) {
for (CodeBiEnum value : CodeBiEnum.values()) {
if (code.equals(value.getCode())) {
return value;
}
}
return null;
}
}
|
/**
* Copyright (c) 2010 Perforce Software. All rights reserved.
*/
package com.perforce.team.tests.search;
import org.junit.runner.RunWith;
import org.junit.runners.Suite;
import org.junit.runners.Suite.SuiteClasses;
/**
* @author Kevin Sawicki (ksawicki@perforce.com)
*/
@RunWith(Suite.class)
@SuiteClasses({ DepotPathTest.class, PluginTest.class, QueryOptionsTest.class,
SearchQueryTest.class, SearchTest.class, SettingsTest.class })
public class SearchSuite {
}
|
class Solution {
public boolean XXX(TreeNode root) {
return XXX(root, root);
}
public boolean XXX(TreeNode left, TreeNode right) {
if (left == null && right == null){
return true;
}
if (left == null || right == null || left.val != right.val){
return false;
}
return XXX(left.left, right.right) && XXX(left.right, right.left);
}
}```
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ambari.server.serveraction.kerberos;
import com.google.inject.Inject;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.actionmanager.HostRoleStatus;
import org.apache.ambari.server.agent.CommandReport;
import org.apache.ambari.server.controller.KerberosHelper;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.ServiceComponentHost;
import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
/**
* PrepareKerberosIdentitiesServerAction is a ServerAction implementation that prepares metadata needed
* to process Kerberos identities (principals and keytabs files).
*/
public class PrepareKerberosIdentitiesServerAction extends AbstractPrepareKerberosServerAction {
private final static Logger LOG = LoggerFactory.getLogger(PrepareKerberosIdentitiesServerAction.class);
/**
* KerberosHelper
*/
@Inject
private KerberosHelper kerberosHelper;
@Inject
private KerberosConfigDataFileWriterFactory kerberosConfigDataFileWriterFactory;
/**
* Called to execute this action. Upon invocation, calls
* {@link KerberosServerAction#processIdentities(Map)}
* to iterate through the Kerberos identity metadata and call
* {@link PrepareKerberosIdentitiesServerAction#processIdentities(Map)}
* for each identity to process.
*
* @param requestSharedDataContext a Map to be used a shared data among all ServerActions related
* to a given request
* @return a CommandReport indicating the result of this action
* @throws AmbariException
* @throws InterruptedException
*/
@Override
public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext) throws
AmbariException, InterruptedException {
Cluster cluster = getCluster();
if (cluster == null) {
throw new AmbariException("Missing cluster object");
}
KerberosDescriptor kerberosDescriptor = getKerberosDescriptor(cluster);
Collection<String> identityFilter = getIdentityFilter();
List<ServiceComponentHost> schToProcess = getServiceComponentHostsToProcess(cluster, kerberosDescriptor, identityFilter);
Map<String, String> commandParameters = getCommandParameters();
String dataDirectory = getCommandParameterValue(commandParameters, DATA_DIRECTORY);
Map<String, Map<String, String>> kerberosConfigurations = new HashMap<String, Map<String, String>>();
int schCount = schToProcess.size();
if (schCount == 0) {
actionLog.writeStdOut("There are no components to process");
} else if (schCount == 1) {
actionLog.writeStdOut(String.format("Processing %d component", schCount));
} else {
actionLog.writeStdOut(String.format("Processing %d components", schCount));
}
processServiceComponentHosts(cluster, kerberosDescriptor, schToProcess, identityFilter, dataDirectory,
kerberosConfigurations, null, null, true, "true".equalsIgnoreCase(getCommandParameterValue(commandParameters,
KerberosServerAction.INCLUDE_AMBARI_IDENTITY)));
if ("true".equalsIgnoreCase(getCommandParameterValue(commandParameters, UPDATE_CONFIGURATIONS))) {
processAuthToLocalRules(cluster, kerberosDescriptor, schToProcess, kerberosConfigurations, getDefaultRealm(commandParameters));
processConfigurationChanges(dataDirectory, kerberosConfigurations, null);
}
return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", actionLog.getStdOut(), actionLog.getStdErr());
}
@Override
protected CommandReport processIdentity(Map<String, String> identityRecord, String evaluatedPrincipal,
KerberosOperationHandler operationHandler,
Map<String, String> kerberosConfiguration,
Map<String, Object> requestSharedDataContext)
throws AmbariException {
throw new UnsupportedOperationException();
}
/**
* Calls {@link KerberosHelper#getServiceComponentHostsToProcess(Cluster, KerberosDescriptor, Map, Collection, Collection, KerberosHelper.Command)}
* with no filter on ServiceComponentHosts
* <p/>
* The <code>shouldProcessCommand</code> implementation passed to KerberosHelper#getServiceComponentHostsToProcess
* always returns true, indicating to process all ServiceComponentHosts.
*
* @param cluster the cluster
* @param kerberosDescriptor the current Kerberos descriptor
* @param identityFilter a list of identities to include, or all if null @return the list of ServiceComponentHosts to process
* @throws AmbariException
* @see KerberosHelper#getServiceComponentHostsToProcess(Cluster, KerberosDescriptor, Map, Collection, Collection, KerberosHelper.Command)
*/
protected List<ServiceComponentHost> getServiceComponentHostsToProcess(Cluster cluster,
KerberosDescriptor kerberosDescriptor,
Collection<String> identityFilter)
throws AmbariException {
return kerberosHelper.getServiceComponentHostsToProcess(cluster,
kerberosDescriptor,
getServiceComponentFilter(),
getHostFilter(), identityFilter,
new KerberosHelper.Command<Boolean, ServiceComponentHost>() {
@Override
public Boolean invoke(ServiceComponentHost sch) throws AmbariException {
return true;
}
});
}
/**
* Calls {@link KerberosHelper#getKerberosDescriptor(Cluster)}
*
* @param cluster cluster instance
* @return the kerberos descriptor associated with the specified cluster
* @throws AmbariException if unable to obtain the descriptor
* @see KerberosHelper#getKerberosDescriptor(Cluster)
*/
protected KerberosDescriptor getKerberosDescriptor(Cluster cluster)
throws AmbariException {
return kerberosHelper.getKerberosDescriptor(cluster);
}
/**
* Conditionally calls {@link KerberosHelper#setAuthToLocalRules(KerberosDescriptor, Cluster, String, Map, Map)}
* if there are ServiceComponentHosts to process
*
* @param cluster cluster instance
* @param kerberosDescriptor the current Kerberos descriptor
* @param schToProcess a list of ServiceComponentHosts to process
* @param kerberosConfigurations the Kerberos-specific configuration map
* @param defaultRealm the default realm
* @throws AmbariException
* @see KerberosHelper#setAuthToLocalRules(KerberosDescriptor, Cluster, String, Map, Map)
*/
protected void processAuthToLocalRules(Cluster cluster, KerberosDescriptor kerberosDescriptor,
List<ServiceComponentHost> schToProcess,
Map<String, Map<String, String>> kerberosConfigurations,
String defaultRealm)
throws AmbariException {
if (!schToProcess.isEmpty()) {
actionLog.writeStdOut("Creating auth-to-local rules");
kerberosHelper.setAuthToLocalRules(kerberosDescriptor, cluster, defaultRealm,
kerberosHelper.calculateConfigurations(cluster, null, kerberosDescriptor.getProperties()),
kerberosConfigurations);
}
}
/**
* Processes configuration changes to determine if any work needs to be done.
* <p/>
* If work is to be done, a data file containing the details is created so it they changes may be
* processed in the appropriate stage.
*
* @param dataDirectory the directory in which to write the configuration changes data file
* @param kerberosConfigurations the Kerberos-specific configuration map
* @param propertiesToBeRemoved
* @throws AmbariException
*/
protected void processConfigurationChanges(String dataDirectory,
Map<String, Map<String, String>> kerberosConfigurations,
Map<String, Set<String>> propertiesToBeRemoved)
throws AmbariException {
actionLog.writeStdOut("Determining configuration changes");
// If there are configurations to set, create a (temporary) data file to store the configuration
// updates and fill it will the relevant configurations.
if (!kerberosConfigurations.isEmpty()) {
if (dataDirectory == null) {
String message = "The data directory has not been set. Generated data can not be stored.";
LOG.error(message);
throw new AmbariException(message);
}
File configFile = new File(dataDirectory, KerberosConfigDataFileWriter.DATA_FILE_NAME);
KerberosConfigDataFileWriter kerberosConfDataFileWriter = null;
actionLog.writeStdOut(String.format("Writing configuration changes metadata file to %s", configFile.getAbsolutePath()));
try {
kerberosConfDataFileWriter = kerberosConfigDataFileWriterFactory.createKerberosConfigDataFileWriter(configFile);
// add properties to be set
for (Map.Entry<String, Map<String, String>> entry : kerberosConfigurations.entrySet()) {
String type = entry.getKey();
Map<String, String> properties = entry.getValue();
if (properties != null) {
for (Map.Entry<String, String> configTypeEntry : properties.entrySet()) {
kerberosConfDataFileWriter.addRecord(type,
configTypeEntry.getKey(),
configTypeEntry.getValue(),
KerberosConfigDataFileWriter.OPERATION_TYPE_SET);
}
}
}
// add properties to be removed
if (propertiesToBeRemoved != null) {
for (Map.Entry<String, Set<String>> entry : propertiesToBeRemoved.entrySet()) {
String type = entry.getKey();
Set<String> properties = entry.getValue();
if (properties != null) {
for (String property : properties) {
kerberosConfDataFileWriter.addRecord(type,
property,
"",
KerberosConfigDataFileWriter.OPERATION_TYPE_REMOVE);
}
}
}
}
} catch (IOException e) {
String message = String.format("Failed to write kerberos configurations file - %s", configFile.getAbsolutePath());
LOG.error(message, e);
actionLog.writeStdOut(message);
actionLog.writeStdErr(message + "\n" + e.getLocalizedMessage());
throw new AmbariException(message, e);
} finally {
if (kerberosConfDataFileWriter != null) {
try {
kerberosConfDataFileWriter.close();
} catch (IOException e) {
String message = "Failed to close the kerberos configurations file writer";
LOG.warn(message, e);
actionLog.writeStdOut(message);
actionLog.writeStdErr(message + "\n" + e.getLocalizedMessage());
}
}
}
}
}
}
|
package com.ottoszika.sokoban.contracts;
public interface Collidable {
/**
* Get mass.
*
* @return the mass value.
*/
int getMass();
}
|
package seedu.address.logic.commands;
import static java.util.Objects.requireNonNull;
import static seedu.address.logic.parser.CliSyntax.PREFIX_CALORIES;
import static seedu.address.logic.parser.CliSyntax.PREFIX_DATE;
import static seedu.address.logic.parser.CliSyntax.PREFIX_DESCRIPTION;
import static seedu.address.logic.parser.CliSyntax.PREFIX_NAME;
import static seedu.address.model.ExerciseModel.PREDICATE_SHOW_ALL_EXERCISE;
import java.util.List;
import java.util.Optional;
import seedu.address.commons.core.Messages;
import seedu.address.commons.core.index.Index;
import seedu.address.commons.util.CollectionUtil;
import seedu.address.logic.commands.exceptions.CommandException;
import seedu.address.model.ExerciseModel;
import seedu.address.model.exercise.Calories;
import seedu.address.model.exercise.Date;
import seedu.address.model.exercise.Description;
import seedu.address.model.exercise.Exercise;
import seedu.address.model.exercise.Name;
/**
* Edits the details of an existing exercise in the exercise book.
*/
public class UpdateExerciseCommand extends CommandForExercise {
public static final String COMMAND_WORD = "update";
public static final String MESSAGE_USAGE = COMMAND_WORD + ": Edits the details of the exercise "
+ "by the index number used in the displayed exercise list.\n "
+ "Existing values will be overwritten by the input values.\n"
+ "Parameters: INDEX (must be a positive integer) "
+ "[" + PREFIX_NAME + "EXERCISE] "
+ "[" + PREFIX_DESCRIPTION + "DESCRIPTION] "
+ "[" + PREFIX_DATE + "DATE] "
+ "[" + PREFIX_CALORIES + "CALORIES]\n"
+ "Example: " + COMMAND_WORD + " 1 "
+ PREFIX_NAME + "Push up"
+ PREFIX_DESCRIPTION + "30"
+ PREFIX_DATE + "09-07-2020"
+ PREFIX_CALORIES + "260";
public static final String MESSAGE_EDIT_EXERCISE_SUCCESS = "Edited Exercise: %1$s";
public static final String MESSAGE_NOT_EDITED = "At least one field to edit must be provided.";
public static final String MESSAGE_DUPLICATE_EXERCISE = "This exercise already exists in the exercise book.";
private final Index index;
private final EditExerciseDescriptor editExerciseDescriptor;
/**
* @param index of the exercise in the filtered exercise list to edit
* @param editExerciseDescriptor details to edit the exercise with
*/
public UpdateExerciseCommand(Index index, EditExerciseDescriptor editExerciseDescriptor) {
requireNonNull(index);
requireNonNull(editExerciseDescriptor);
this.index = index;
this.editExerciseDescriptor = new UpdateExerciseCommand.EditExerciseDescriptor(editExerciseDescriptor);
}
@Override
public CommandResult execute(ExerciseModel model) throws CommandException {
requireNonNull(model);
List<Exercise> lastShownList = model.getFilteredExerciseList();
if (index.getZeroBased() >= lastShownList.size()) {
throw new CommandException(Messages.MESSAGE_INVALID_EXERCISE_DISPLAYED_INDEX);
}
Exercise exerciseToEdit = lastShownList.get(index.getZeroBased());
Exercise editedExercise = createEditedExercise(exerciseToEdit, editExerciseDescriptor);
if (!exerciseToEdit.isSameExercise(editedExercise) && model.hasExercise(editedExercise)) {
throw new CommandException(MESSAGE_DUPLICATE_EXERCISE);
}
model.setExercise(exerciseToEdit, editedExercise);
model.updateFilteredExerciseList(PREDICATE_SHOW_ALL_EXERCISE);
return new CommandResult(String.format(MESSAGE_EDIT_EXERCISE_SUCCESS, editedExercise));
}
/**
* Creates and returns a {@code Exercise} with the details of {@code exerciseToEdit}
* edited with {@code editExerciseDescriptor}.
*/
private static Exercise createEditedExercise(Exercise exerciseToEdit,
UpdateExerciseCommand.EditExerciseDescriptor editExerciseDescriptor) {
assert exerciseToEdit != null;
Name updatedName = editExerciseDescriptor.getName().orElse(exerciseToEdit.getName());
Description updatedDescription = editExerciseDescriptor.getDescription()
.orElse(exerciseToEdit.getDescription());
Date updatedDate = editExerciseDescriptor.getDate().orElse(exerciseToEdit.getDate());
Calories updatedCalories = editExerciseDescriptor.getCalories().orElse(exerciseToEdit.getCalories());
return new Exercise(updatedName, updatedDescription, updatedDate, updatedCalories);
}
@Override
public boolean equals(Object other) {
// short circuit if same object
if (other == this) {
return true;
}
// instanceof handles nulls
if (!(other instanceof UpdateExerciseCommand)) {
return false;
}
// state check
UpdateExerciseCommand e = (UpdateExerciseCommand) other;
return index.equals(e.index)
&& editExerciseDescriptor.equals(e.editExerciseDescriptor);
}
/**
* Stores the details to edit the exercise with. Each non-empty field value will replace the
* corresponding field value of the exercise.
*/
public static class EditExerciseDescriptor {
// identity field
private Name name;
private Date date;
// data field
private Description description;
private Calories calories;
public EditExerciseDescriptor() {
}
/**
* Copy constructor.
* A defensive copy of {@code tags} is used internally.
*/
public EditExerciseDescriptor(UpdateExerciseCommand.EditExerciseDescriptor toCopy) {
setName(toCopy.name);
setDate(toCopy.date);
setDescription(toCopy.description);
setCalories(toCopy.calories);
}
/**
* Returns true if at least one field is edited.
*/
public boolean isAnyFieldEdited() {
return CollectionUtil.isAnyNonNull(name, date, description, calories);
}
public void setName(Name name) {
this.name = name;
}
public Optional<Name> getName() {
return Optional.ofNullable(name);
}
public void setDate(Date date) {
this.date = date;
}
public Optional<Date> getDate() {
return Optional.ofNullable(date);
}
public void setDescription(Description description) {
this.description = description;
}
public Optional<Description> getDescription() {
return Optional.ofNullable(description);
}
public void setCalories(Calories calories) {
this.calories = calories;
}
public Optional<Calories> getCalories() {
return Optional.ofNullable(calories);
}
@Override
public boolean equals(Object other) {
// short circuit if same object
if (other == this) {
return true;
}
// instanceof handles nulls
if (!(other instanceof UpdateExerciseCommand.EditExerciseDescriptor)) {
return false;
}
// state check
UpdateExerciseCommand.EditExerciseDescriptor e = (UpdateExerciseCommand.EditExerciseDescriptor) other;
return getName().equals(e.getName())
&& getDate().equals(e.getDate())
&& getDescription().equals(e.getDescription())
&& getCalories().equals(e.getCalories());
}
}
}
|
package com.mezhou887.crawler.web;
import com.mezhou887.crawler.model.User;
import com.mezhou887.crawler.service.UserService;
import com.mezhou887.system.core.Result;
import com.mezhou887.system.core.ResultGenerator;
import com.github.pagehelper.PageHelper;
import com.github.pagehelper.PageInfo;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import javax.annotation.Resource;
import java.util.List;
/**
* Created by CodeGenerator on 2017/07/17.
*/
@RestController
@RequestMapping("/user")
public class UserController {
@Resource
private UserService userService;
@PostMapping("/add")
public Result add(User user) {
userService.save(user);
return ResultGenerator.genSuccessResult();
}
@PostMapping("/delete")
public Result delete(@RequestParam Integer id) {
userService.deleteById(id);
return ResultGenerator.genSuccessResult();
}
@PostMapping("/update")
public Result update(User user) {
userService.update(user);
return ResultGenerator.genSuccessResult();
}
@PostMapping("/detail")
public Result detail(@RequestParam Integer id) {
User user = userService.findById(id);
return ResultGenerator.genSuccessResult(user);
}
@PostMapping("/list")
public Result list(@RequestParam(defaultValue = "0") Integer page, @RequestParam(defaultValue = "0") Integer size) {
PageHelper.startPage(page, size);
List<User> list = userService.findAll();
PageInfo pageInfo = new PageInfo(list);
return ResultGenerator.genSuccessResult(pageInfo);
}
}
|
package me.gavincook.commons.mail;
import me.gavincook.commons.BaseTest;
import org.testng.Assert;
import org.testng.annotations.Test;
import java.io.IOException;
import java.util.Date;
/**
* @author Divers King
* @date 2018-01-09 15:11
* @since 1.0.0
**/
public class MailUtilTest extends BaseTest {
public static String fromMail = "564543626@qq.com"; // "market@aichainnews.com";
public static String password = "gpqgmjgqmmqybdcb"; // "EasyVaas2018"
public static String host = "smtp.qq.com"; // smtp.qq.com // smtp.163.com //smtp.exmail.qq.com
public static String toMail = "lywber@163.com";
public static String attachment_1 = MailUtilTest.class.getClassLoader().getResource("mail/attachment_1.txt").getPath();
public static String attachment_2 = MailUtilTest.class.getClassLoader().getResource("mail/attachment_2.txt").getPath();
private MailInfo createMailInfo(){
MailInfo mailInfo = new MailInfoBuilder()
.host(host)
.fromMail(fromMail)
.fromPassword(password)
.toMails(new String[]{toMail, "1027102799@qq.com"})
.ccMails(new String[]{"651551319@qq.com"})
.subject("关于2017至2018年度寒假通知" + new Date())
.content("经研究决定,放假日期从1月24日~2月28日。请各位老师做好工作安排。")
.filePaths(new String[]{attachment_1, attachment_2})
.needSSL(true)
.build();
return mailInfo;
}
@Test
public void testScaleImage() throws IOException {
MailInfo mailInfo = createMailInfo();
boolean result = MailUtil.send(mailInfo);
Assert.assertEquals(true, result);
}
}
|
package com.ming.blog.controller;
import com.ming.blog.entity.Test;
import com.ming.blog.event.EventTypeEnum;
import com.ming.blog.event.RedisEventChannel;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RestController;
import javax.annotation.Resource;
/**
* @author Jiang Zaiming
* @date 2020/6/4 4:27 下午
*/
@RestController
//@RequestMapping("/test")
public class TestController {
@Resource
private RedisTemplate redisTemplate;
@Resource
private RedisEventChannel redisEventChannel;
@GetMapping("/test1")
public String test1() {
redisTemplate.opsForValue().set("name", "111111111");
return "test";
}
@GetMapping("/test2")
public Object test2() {
Object name = redisTemplate.opsForValue().get("name");
return name;
}
@GetMapping("/test3")
public Object test3() {
redisEventChannel.publish(EventTypeEnum.MOTION_DETECTION_TASK, new Test(1, "小明"));
Object name = redisTemplate.opsForValue().get("name");
return name;
}
}
|
package com.shibofu.spring.db2.service;
import com.shibofu.spring.db2.dao.MoneyDao;
import com.shibofu.spring.model.Money;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
/**
* @author potter.fu
* @date 2018-12-07 15:34
*/
@Service
public class MoneyService {
@Autowired
private MoneyDao moneyDao;
/**
* 根据名字查找用户
*/
public Money selectMoneyById(int id) {
return moneyDao.findMoneyById(id);
}
}
|
/**
* Created on 2018/8/11.
*/
package jetcache.samples.springboot;
import com.alicp.jetcache.anno.Cached;
/**
* @author <a href="mailto:areyouok@gmail.com">huangli</a>
*/
public interface UserService {
@Cached(name = "loadUser", expire = 10)
User loadUser(long userId);
@Cached(area = "mycluster", name = "loadUserWithJedisCluster", expire = 10)
User loadUserWithJedisCluster(long userId);
}
|
/*
* Copyright (c) 2005-2022 Xceptance Software Technologies GmbH
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.xceptance.xlt.api.data;
import com.xceptance.xlt.api.util.XltException;
/**
* Thrown by a {@link DataSetProvider} implementation in case an error occurred when reading or processing test data set
* files.
*/
public class DataSetProviderException extends XltException
{
/**
* serialVersionUID
*/
private static final long serialVersionUID = 1L;
/**
* Constructor.
*/
public DataSetProviderException()
{
}
/**
* Constructor.
*
* @param message
* the message
*/
public DataSetProviderException(final String message)
{
super(message);
}
/**
* Constructor.
*
* @param cause
* the cause
*/
public DataSetProviderException(final Throwable cause)
{
super(cause);
}
/**
* Constructor.
*
* @param message
* the message
* @param cause
* the cause
*/
public DataSetProviderException(final String message, final Throwable cause)
{
super(message, cause);
}
}
|
/*
* Copyright (c) 2006, Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the Institute nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
package se.sics.cooja.interfaces;
import java.text.NumberFormat;
import java.util.*;
import javax.swing.*;
import org.apache.log4j.Logger;
import org.jdom.Element;
import se.sics.cooja.*;
/**
* Mote 3D position.
*
* <p>
* This observable notifies when the position is changed.
*
* @author Fredrik Osterlind
*/
@ClassDescription("Position")
public class Position extends MoteInterface {
private static Logger logger = Logger.getLogger(Position.class);
private Mote mote = null;
private double[] coords = new double[3];
/**
* Creates a position for given mote with coordinates (x=0, y=0, z=0).
*
* @param mote
* Led's mote.
* @see Mote
* @see se.sics.cooja.MoteInterfaceHandler
*/
public Position(Mote mote) {
this.mote = mote;
coords[0] = 0.0f;
coords[1] = 0.0f;
coords[2] = 0.0f;
}
/**
* Set position to (x,y,z).
*
* @param x New X coordinate
* @param y New Y coordinate
* @param z New Z coordinate
*/
public void setCoordinates(double x, double y, double z) {
coords[0] = x;
coords[1] = y;
coords[2] = z;
this.setChanged();
this.notifyObservers(mote);
}
/**
* @return X coordinate
*/
public double getXCoordinate() {
return coords[0];
}
/**
* @return Y coordinate
*/
public double getYCoordinate() {
return coords[1];
}
/**
* @return Z coordinate
*/
public double getZCoordinate() {
return coords[2];
}
/**
* Calculates distance from this position to given position.
*
* @param pos Compared position
* @return Distance
*/
public double getDistanceTo(Position pos) {
return Math.sqrt(Math.abs(coords[0] - pos.getXCoordinate())
* Math.abs(coords[0] - pos.getXCoordinate())
+ Math.abs(coords[1] - pos.getYCoordinate())
* Math.abs(coords[1] - pos.getYCoordinate())
+ Math.abs(coords[2] - pos.getZCoordinate())
* Math.abs(coords[2] - pos.getZCoordinate()));
}
/**
* Calculates distance from associated mote to another mote.
*
* @param m Another mote
* @return Distance
*/
public double getDistanceTo(Mote m) {
return getDistanceTo(m.getInterfaces().getPosition());
}
public JPanel getInterfaceVisualizer() {
JPanel panel = new JPanel();
panel.setLayout(new BoxLayout(panel, BoxLayout.Y_AXIS));
final NumberFormat form = NumberFormat.getNumberInstance();
final JLabel positionLabel = new JLabel();
positionLabel.setText("x=" + form.format(getXCoordinate()) + " "
+ "y=" + form.format(getYCoordinate()) + " "
+ "z=" + form.format(getZCoordinate()));
panel.add(positionLabel);
Observer observer;
this.addObserver(observer = new Observer() {
public void update(Observable obs, Object obj) {
positionLabel.setText("x=" + form.format(getXCoordinate()) + " "
+ "y=" + form.format(getYCoordinate()) + " "
+ "z=" + form.format(getZCoordinate()));
}
});
// Saving observer reference for releaseInterfaceVisualizer
panel.putClientProperty("intf_obs", observer);
return panel;
}
public void releaseInterfaceVisualizer(JPanel panel) {
Observer observer = (Observer) panel.getClientProperty("intf_obs");
if (observer == null) {
logger.fatal("Error when releasing panel, observer is null");
return;
}
this.deleteObserver(observer);
}
public Collection<Element> getConfigXML() {
Vector<Element> config = new Vector<Element>();
Element element;
// X coordinate
element = new Element("x");
element.setText(Double.toString(getXCoordinate()));
config.add(element);
// Y coordinate
element = new Element("y");
element.setText(Double.toString(getYCoordinate()));
config.add(element);
// Z coordinate
element = new Element("z");
element.setText(Double.toString(getZCoordinate()));
config.add(element);
return config;
}
public void setConfigXML(Collection<Element> configXML, boolean visAvailable) {
double x = 0, y = 0, z = 0;
for (Element element : configXML) {
if (element.getName().equals("x")) {
x = Double.parseDouble(element.getText());
}
if (element.getName().equals("y")) {
y = Double.parseDouble(element.getText());
}
if (element.getName().equals("z")) {
z = Double.parseDouble(element.getText());
}
}
setCoordinates(x, y, z);
}
}
|
/*
* Copyright 2019-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.r2dbc.mssql;
import io.r2dbc.spi.IsolationLevel;
/**
* SQL Server-specific transaction isolation levels.
* <p>
* For more information check:
* <a href="https://docs.microsoft.com/en-us/sql/t-sql/statements/set-transaction-isolation-level-transact-sql?view=sql-server-2017">SQL Server Isolation Levels</a>
*
* @author Hebert Coelho
* @author Mark Paluch
* @see IsolationLevel
*/
public final class MssqlIsolationLevel {
private MssqlIsolationLevel() {
}
/**
* The read committed isolation level.
*/
public static final IsolationLevel READ_COMMITTED = IsolationLevel.READ_COMMITTED;
/**
* The read uncommitted isolation level.
*/
public static final IsolationLevel READ_UNCOMMITTED = IsolationLevel.READ_UNCOMMITTED;
/**
* The repeatable read isolation level.
*/
public static final IsolationLevel REPEATABLE_READ = IsolationLevel.REPEATABLE_READ;
/**
* The serializable isolation level.
*/
public static final IsolationLevel SERIALIZABLE = IsolationLevel.SERIALIZABLE;
/**
* The snapshot isolation level.
*/
public static final IsolationLevel SNAPSHOT = IsolationLevel.valueOf("SNAPSHOT");
}
|
package org.cloudfoundry.autoscaler.scheduler.util;
public class ScalingEngineUtil {
public static String getScalingEngineActiveSchedulePath(
String scalingEngineUrl, String appId, Long scheduleId) {
return scalingEngineUrl + "/v1/apps/" + appId + "/active_schedules/" + scheduleId;
}
}
|
/*
* Copyright 2018 The Feast Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package feast.ingestion.model;
import feast.types.FeatureProto.Feature;
import feast.types.ValueProto.Value;
public class Features {
private Features() {}
public static Feature of(String id, Value value) {
return Feature.newBuilder().setId(id).setValue(value).build();
}
}
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.ops4j.pax.web.itest.tomcat;
import static org.ops4j.pax.exam.CoreOptions.mavenBundle;
import static org.ops4j.pax.exam.OptionUtils.combine;
import org.junit.runner.RunWith;
import org.ops4j.pax.exam.Configuration;
import org.ops4j.pax.exam.Option;
import org.ops4j.pax.exam.junit.PaxExam;
import org.ops4j.pax.web.itest.base.VersionUtil;
import org.ops4j.pax.web.itest.common.AbstractWarBasicAuthIntegrationTest;
/**
* @author Achim Nierbeck
*/
@RunWith(PaxExam.class)
public class WarBasicAuthIntegrationTest extends AbstractWarBasicAuthIntegrationTest {
@Configuration
public Option[] configuration() {
return combine(
configureTomcat(),
mavenBundle().groupId("org.ops4j.pax.web.samples")
.artifactId("tomcat-auth-config-fragment")
.version(VersionUtil.getProjectVersion()).noStart());
}
}
|
package com.hungry.hotel.hungryhoteladmin.orders.model;
public class Dish {
private int dishId;
private String dishName;
private double dishPrice;
public Dish() {
}
public Dish(int dishId, String dishName, double dishPrice) {
this.dishId = dishId;
this.dishName = dishName;
this.dishPrice = dishPrice;
}
public int getDishId() {
return dishId;
}
public void setDishId(int dishId) {
this.dishId = dishId;
}
public String getDishName() {
return dishName;
}
public void setDishName(String dishName) {
this.dishName = dishName;
}
public double getDishPrice() {
return dishPrice;
}
public void setDishPrice(double dishPrice) {
this.dishPrice = dishPrice;
}
}
|
package com.example.networkstatus;
public interface OnConnectionListener {
void notifyApplication(String state, String status, boolean response);
}
|
/*
* Copyright 2010-2019 Boxfuse GmbH
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.flywaydb.core.internal.logging.slf4j;
import org.flywaydb.core.api.logging.Log;
import org.flywaydb.core.api.logging.LogCreator;
import org.slf4j.LoggerFactory;
/**
* Log Creator for Slf4j.
*/
public class Slf4jLogCreator implements LogCreator {
public Log createLogger(Class<?> clazz) {
return new Slf4jLog(LoggerFactory.getLogger(clazz));
}
}
|
/**
*/
package top.failureLogic;
import org.eclipse.emf.ecore.EAttribute;
import org.eclipse.emf.ecore.EClass;
import org.eclipse.emf.ecore.EEnum;
import org.eclipse.emf.ecore.EPackage;
import org.eclipse.emf.ecore.EReference;
import top.integration.IntegrationPackage;
import top.odeBase.OdeBasePackage;
/**
* <!-- begin-user-doc -->
* The <b>Package</b> for the model.
* It contains accessors for the meta objects to represent
* <ul>
* <li>each class,</li>
* <li>each feature of each class,</li>
* <li>each operation of each class,</li>
* <li>each enum,</li>
* <li>and each data type</li>
* </ul>
* <!-- end-user-doc -->
* @see top.failureLogic.FailureLogic_Factory
* @model kind="package"
* @generated
*/
public interface FailureLogic_Package extends EPackage {
/**
* The package name.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
String eNAME = "failureLogic";
/**
* The package namespace URI.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
String eNS_URI = "http://www.deis-project.eu/ode/mergedODE/failureLogic";
/**
* The package namespace name.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
String eNS_PREFIX = "failureLogic_";
/**
* The singleton instance of the package.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
FailureLogic_Package eINSTANCE = top.failureLogic.impl.FailureLogic_PackageImpl.init();
/**
* The meta object id for the '{@link top.failureLogic.impl.FailureLogicPackageImpl <em>Failure Logic Package</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.FailureLogicPackageImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getFailureLogicPackage()
* @generated
*/
int FAILURE_LOGIC_PACKAGE = 0;
/**
* The feature id for the '<em><b>Key Value Maps</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE_LOGIC_PACKAGE__KEY_VALUE_MAPS = IntegrationPackage.ODE_PRODUCT_PACKAGE__KEY_VALUE_MAPS;
/**
* The feature id for the '<em><b>Id</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE_LOGIC_PACKAGE__ID = IntegrationPackage.ODE_PRODUCT_PACKAGE__ID;
/**
* The feature id for the '<em><b>Name</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE_LOGIC_PACKAGE__NAME = IntegrationPackage.ODE_PRODUCT_PACKAGE__NAME;
/**
* The feature id for the '<em><b>Description</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE_LOGIC_PACKAGE__DESCRIPTION = IntegrationPackage.ODE_PRODUCT_PACKAGE__DESCRIPTION;
/**
* The feature id for the '<em><b>Failure Models</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE_LOGIC_PACKAGE__FAILURE_MODELS = IntegrationPackage.ODE_PRODUCT_PACKAGE_FEATURE_COUNT + 0;
/**
* The number of structural features of the '<em>Failure Logic Package</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE_LOGIC_PACKAGE_FEATURE_COUNT = IntegrationPackage.ODE_PRODUCT_PACKAGE_FEATURE_COUNT + 1;
/**
* The number of operations of the '<em>Failure Logic Package</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE_LOGIC_PACKAGE_OPERATION_COUNT = IntegrationPackage.ODE_PRODUCT_PACKAGE_OPERATION_COUNT + 0;
/**
* The meta object id for the '{@link top.failureLogic.impl.FailureImpl <em>Failure</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.FailureImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getFailure()
* @generated
*/
int FAILURE = 1;
/**
* The feature id for the '<em><b>Key Value Maps</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE__KEY_VALUE_MAPS = OdeBasePackage.BASE_ELEMENT__KEY_VALUE_MAPS;
/**
* The feature id for the '<em><b>Id</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE__ID = OdeBasePackage.BASE_ELEMENT__ID;
/**
* The feature id for the '<em><b>Name</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE__NAME = OdeBasePackage.BASE_ELEMENT__NAME;
/**
* The feature id for the '<em><b>Description</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE__DESCRIPTION = OdeBasePackage.BASE_ELEMENT__DESCRIPTION;
/**
* The feature id for the '<em><b>Origin Type</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE__ORIGIN_TYPE = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 0;
/**
* The feature id for the '<em><b>Failure Class</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE__FAILURE_CLASS = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 1;
/**
* The feature id for the '<em><b>Failure Rate</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE__FAILURE_RATE = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 2;
/**
* The feature id for the '<em><b>Is Ccf</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE__IS_CCF = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 3;
/**
* The feature id for the '<em><b>Failure Prob Distribution</b></em>' containment reference.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE__FAILURE_PROB_DISTRIBUTION = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 4;
/**
* The feature id for the '<em><b>Ccf Failures</b></em>' reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE__CCF_FAILURES = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 5;
/**
* The number of structural features of the '<em>Failure</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE_FEATURE_COUNT = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 6;
/**
* The number of operations of the '<em>Failure</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE_OPERATION_COUNT = OdeBasePackage.BASE_ELEMENT_OPERATION_COUNT + 0;
/**
* The meta object id for the '{@link top.failureLogic.impl.SecurityViolationImpl <em>Security Violation</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.SecurityViolationImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getSecurityViolation()
* @generated
*/
int SECURITY_VIOLATION = 2;
/**
* The feature id for the '<em><b>Key Value Maps</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int SECURITY_VIOLATION__KEY_VALUE_MAPS = FAILURE__KEY_VALUE_MAPS;
/**
* The feature id for the '<em><b>Id</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int SECURITY_VIOLATION__ID = FAILURE__ID;
/**
* The feature id for the '<em><b>Name</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int SECURITY_VIOLATION__NAME = FAILURE__NAME;
/**
* The feature id for the '<em><b>Description</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int SECURITY_VIOLATION__DESCRIPTION = FAILURE__DESCRIPTION;
/**
* The feature id for the '<em><b>Origin Type</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int SECURITY_VIOLATION__ORIGIN_TYPE = FAILURE__ORIGIN_TYPE;
/**
* The feature id for the '<em><b>Failure Class</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int SECURITY_VIOLATION__FAILURE_CLASS = FAILURE__FAILURE_CLASS;
/**
* The feature id for the '<em><b>Failure Rate</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int SECURITY_VIOLATION__FAILURE_RATE = FAILURE__FAILURE_RATE;
/**
* The feature id for the '<em><b>Is Ccf</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int SECURITY_VIOLATION__IS_CCF = FAILURE__IS_CCF;
/**
* The feature id for the '<em><b>Failure Prob Distribution</b></em>' containment reference.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int SECURITY_VIOLATION__FAILURE_PROB_DISTRIBUTION = FAILURE__FAILURE_PROB_DISTRIBUTION;
/**
* The feature id for the '<em><b>Ccf Failures</b></em>' reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int SECURITY_VIOLATION__CCF_FAILURES = FAILURE__CCF_FAILURES;
/**
* The number of structural features of the '<em>Security Violation</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int SECURITY_VIOLATION_FEATURE_COUNT = FAILURE_FEATURE_COUNT + 0;
/**
* The number of operations of the '<em>Security Violation</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int SECURITY_VIOLATION_OPERATION_COUNT = FAILURE_OPERATION_COUNT + 0;
/**
* The meta object id for the '{@link top.failureLogic.impl.FailureModelImpl <em>Failure Model</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.FailureModelImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getFailureModel()
* @generated
*/
int FAILURE_MODEL = 3;
/**
* The feature id for the '<em><b>Key Value Maps</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE_MODEL__KEY_VALUE_MAPS = OdeBasePackage.BASE_ELEMENT__KEY_VALUE_MAPS;
/**
* The feature id for the '<em><b>Id</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE_MODEL__ID = OdeBasePackage.BASE_ELEMENT__ID;
/**
* The feature id for the '<em><b>Name</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE_MODEL__NAME = OdeBasePackage.BASE_ELEMENT__NAME;
/**
* The feature id for the '<em><b>Description</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE_MODEL__DESCRIPTION = OdeBasePackage.BASE_ELEMENT__DESCRIPTION;
/**
* The feature id for the '<em><b>Minimal Cutsets</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE_MODEL__MINIMAL_CUTSETS = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 0;
/**
* The feature id for the '<em><b>Failures</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE_MODEL__FAILURES = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 1;
/**
* The feature id for the '<em><b>Sub Models</b></em>' reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE_MODEL__SUB_MODELS = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 2;
/**
* The number of structural features of the '<em>Failure Model</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE_MODEL_FEATURE_COUNT = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 3;
/**
* The number of operations of the '<em>Failure Model</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAILURE_MODEL_OPERATION_COUNT = OdeBasePackage.BASE_ELEMENT_OPERATION_COUNT + 0;
/**
* The meta object id for the '{@link top.failureLogic.impl.MinimalCutSetsImpl <em>Minimal Cut Sets</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.MinimalCutSetsImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getMinimalCutSets()
* @generated
*/
int MINIMAL_CUT_SETS = 4;
/**
* The feature id for the '<em><b>Key Value Maps</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MINIMAL_CUT_SETS__KEY_VALUE_MAPS = OdeBasePackage.BASE_ELEMENT__KEY_VALUE_MAPS;
/**
* The feature id for the '<em><b>Id</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MINIMAL_CUT_SETS__ID = OdeBasePackage.BASE_ELEMENT__ID;
/**
* The feature id for the '<em><b>Name</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MINIMAL_CUT_SETS__NAME = OdeBasePackage.BASE_ELEMENT__NAME;
/**
* The feature id for the '<em><b>Description</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MINIMAL_CUT_SETS__DESCRIPTION = OdeBasePackage.BASE_ELEMENT__DESCRIPTION;
/**
* The feature id for the '<em><b>Cutsets</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MINIMAL_CUT_SETS__CUTSETS = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 0;
/**
* The feature id for the '<em><b>Failures</b></em>' reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MINIMAL_CUT_SETS__FAILURES = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 1;
/**
* The number of structural features of the '<em>Minimal Cut Sets</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MINIMAL_CUT_SETS_FEATURE_COUNT = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 2;
/**
* The number of operations of the '<em>Minimal Cut Sets</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MINIMAL_CUT_SETS_OPERATION_COUNT = OdeBasePackage.BASE_ELEMENT_OPERATION_COUNT + 0;
/**
* The meta object id for the '{@link top.failureLogic.impl.MinimalCutsetImpl <em>Minimal Cutset</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.MinimalCutsetImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getMinimalCutset()
* @generated
*/
int MINIMAL_CUTSET = 5;
/**
* The feature id for the '<em><b>Key Value Maps</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MINIMAL_CUTSET__KEY_VALUE_MAPS = OdeBasePackage.BASE_ELEMENT__KEY_VALUE_MAPS;
/**
* The feature id for the '<em><b>Id</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MINIMAL_CUTSET__ID = OdeBasePackage.BASE_ELEMENT__ID;
/**
* The feature id for the '<em><b>Name</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MINIMAL_CUTSET__NAME = OdeBasePackage.BASE_ELEMENT__NAME;
/**
* The feature id for the '<em><b>Description</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MINIMAL_CUTSET__DESCRIPTION = OdeBasePackage.BASE_ELEMENT__DESCRIPTION;
/**
* The feature id for the '<em><b>Failures</b></em>' reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MINIMAL_CUTSET__FAILURES = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 0;
/**
* The number of structural features of the '<em>Minimal Cutset</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MINIMAL_CUTSET_FEATURE_COUNT = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 1;
/**
* The number of operations of the '<em>Minimal Cutset</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MINIMAL_CUTSET_OPERATION_COUNT = OdeBasePackage.BASE_ELEMENT_OPERATION_COUNT + 0;
/**
* The meta object id for the '{@link top.failureLogic.impl.ProbDistImpl <em>Prob Dist</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.ProbDistImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getProbDist()
* @generated
*/
int PROB_DIST = 6;
/**
* The feature id for the '<em><b>Key Value Maps</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int PROB_DIST__KEY_VALUE_MAPS = OdeBasePackage.BASE_ELEMENT__KEY_VALUE_MAPS;
/**
* The feature id for the '<em><b>Id</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int PROB_DIST__ID = OdeBasePackage.BASE_ELEMENT__ID;
/**
* The feature id for the '<em><b>Name</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int PROB_DIST__NAME = OdeBasePackage.BASE_ELEMENT__NAME;
/**
* The feature id for the '<em><b>Description</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int PROB_DIST__DESCRIPTION = OdeBasePackage.BASE_ELEMENT__DESCRIPTION;
/**
* The feature id for the '<em><b>Type</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int PROB_DIST__TYPE = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 0;
/**
* The feature id for the '<em><b>Parameters</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int PROB_DIST__PARAMETERS = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 1;
/**
* The number of structural features of the '<em>Prob Dist</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int PROB_DIST_FEATURE_COUNT = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 2;
/**
* The number of operations of the '<em>Prob Dist</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int PROB_DIST_OPERATION_COUNT = OdeBasePackage.BASE_ELEMENT_OPERATION_COUNT + 0;
/**
* The meta object id for the '{@link top.failureLogic.impl.ProbDistParamImpl <em>Prob Dist Param</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.ProbDistParamImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getProbDistParam()
* @generated
*/
int PROB_DIST_PARAM = 7;
/**
* The feature id for the '<em><b>Key Value Maps</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int PROB_DIST_PARAM__KEY_VALUE_MAPS = OdeBasePackage.BASE_ELEMENT__KEY_VALUE_MAPS;
/**
* The feature id for the '<em><b>Id</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int PROB_DIST_PARAM__ID = OdeBasePackage.BASE_ELEMENT__ID;
/**
* The feature id for the '<em><b>Name</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int PROB_DIST_PARAM__NAME = OdeBasePackage.BASE_ELEMENT__NAME;
/**
* The feature id for the '<em><b>Description</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int PROB_DIST_PARAM__DESCRIPTION = OdeBasePackage.BASE_ELEMENT__DESCRIPTION;
/**
* The feature id for the '<em><b>Value</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int PROB_DIST_PARAM__VALUE = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 0;
/**
* The number of structural features of the '<em>Prob Dist Param</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int PROB_DIST_PARAM_FEATURE_COUNT = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 1;
/**
* The number of operations of the '<em>Prob Dist Param</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int PROB_DIST_PARAM_OPERATION_COUNT = OdeBasePackage.BASE_ELEMENT_OPERATION_COUNT + 0;
/**
* The meta object id for the '{@link top.failureLogic.impl.FaultTreeImpl <em>Fault Tree</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.FaultTreeImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getFaultTree()
* @generated
*/
int FAULT_TREE = 8;
/**
* The feature id for the '<em><b>Key Value Maps</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAULT_TREE__KEY_VALUE_MAPS = FAILURE_MODEL__KEY_VALUE_MAPS;
/**
* The feature id for the '<em><b>Id</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAULT_TREE__ID = FAILURE_MODEL__ID;
/**
* The feature id for the '<em><b>Name</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAULT_TREE__NAME = FAILURE_MODEL__NAME;
/**
* The feature id for the '<em><b>Description</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAULT_TREE__DESCRIPTION = FAILURE_MODEL__DESCRIPTION;
/**
* The feature id for the '<em><b>Minimal Cutsets</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAULT_TREE__MINIMAL_CUTSETS = FAILURE_MODEL__MINIMAL_CUTSETS;
/**
* The feature id for the '<em><b>Failures</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAULT_TREE__FAILURES = FAILURE_MODEL__FAILURES;
/**
* The feature id for the '<em><b>Sub Models</b></em>' reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAULT_TREE__SUB_MODELS = FAILURE_MODEL__SUB_MODELS;
/**
* The feature id for the '<em><b>Causes</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAULT_TREE__CAUSES = FAILURE_MODEL_FEATURE_COUNT + 0;
/**
* The number of structural features of the '<em>Fault Tree</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAULT_TREE_FEATURE_COUNT = FAILURE_MODEL_FEATURE_COUNT + 1;
/**
* The number of operations of the '<em>Fault Tree</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FAULT_TREE_OPERATION_COUNT = FAILURE_MODEL_OPERATION_COUNT + 0;
/**
* The meta object id for the '{@link top.failureLogic.impl.CauseImpl <em>Cause</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.CauseImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getCause()
* @generated
*/
int CAUSE = 9;
/**
* The feature id for the '<em><b>Key Value Maps</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int CAUSE__KEY_VALUE_MAPS = OdeBasePackage.BASE_ELEMENT__KEY_VALUE_MAPS;
/**
* The feature id for the '<em><b>Id</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int CAUSE__ID = OdeBasePackage.BASE_ELEMENT__ID;
/**
* The feature id for the '<em><b>Name</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int CAUSE__NAME = OdeBasePackage.BASE_ELEMENT__NAME;
/**
* The feature id for the '<em><b>Description</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int CAUSE__DESCRIPTION = OdeBasePackage.BASE_ELEMENT__DESCRIPTION;
/**
* The feature id for the '<em><b>Cause Type</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int CAUSE__CAUSE_TYPE = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 0;
/**
* The feature id for the '<em><b>Failure</b></em>' reference.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int CAUSE__FAILURE = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 1;
/**
* The number of structural features of the '<em>Cause</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int CAUSE_FEATURE_COUNT = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 2;
/**
* The number of operations of the '<em>Cause</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int CAUSE_OPERATION_COUNT = OdeBasePackage.BASE_ELEMENT_OPERATION_COUNT + 0;
/**
* The meta object id for the '{@link top.failureLogic.impl.GateImpl <em>Gate</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.GateImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getGate()
* @generated
*/
int GATE = 10;
/**
* The feature id for the '<em><b>Key Value Maps</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int GATE__KEY_VALUE_MAPS = CAUSE__KEY_VALUE_MAPS;
/**
* The feature id for the '<em><b>Id</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int GATE__ID = CAUSE__ID;
/**
* The feature id for the '<em><b>Name</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int GATE__NAME = CAUSE__NAME;
/**
* The feature id for the '<em><b>Description</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int GATE__DESCRIPTION = CAUSE__DESCRIPTION;
/**
* The feature id for the '<em><b>Cause Type</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int GATE__CAUSE_TYPE = CAUSE__CAUSE_TYPE;
/**
* The feature id for the '<em><b>Failure</b></em>' reference.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int GATE__FAILURE = CAUSE__FAILURE;
/**
* The feature id for the '<em><b>Gate Type</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int GATE__GATE_TYPE = CAUSE_FEATURE_COUNT + 0;
/**
* The feature id for the '<em><b>Causes</b></em>' reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int GATE__CAUSES = CAUSE_FEATURE_COUNT + 1;
/**
* The number of structural features of the '<em>Gate</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int GATE_FEATURE_COUNT = CAUSE_FEATURE_COUNT + 2;
/**
* The number of operations of the '<em>Gate</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int GATE_OPERATION_COUNT = CAUSE_OPERATION_COUNT + 0;
/**
* The meta object id for the '{@link top.failureLogic.impl.MarkovChainImpl <em>Markov Chain</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.MarkovChainImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getMarkovChain()
* @generated
*/
int MARKOV_CHAIN = 11;
/**
* The feature id for the '<em><b>Key Value Maps</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MARKOV_CHAIN__KEY_VALUE_MAPS = FAILURE_MODEL__KEY_VALUE_MAPS;
/**
* The feature id for the '<em><b>Id</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MARKOV_CHAIN__ID = FAILURE_MODEL__ID;
/**
* The feature id for the '<em><b>Name</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MARKOV_CHAIN__NAME = FAILURE_MODEL__NAME;
/**
* The feature id for the '<em><b>Description</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MARKOV_CHAIN__DESCRIPTION = FAILURE_MODEL__DESCRIPTION;
/**
* The feature id for the '<em><b>Minimal Cutsets</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MARKOV_CHAIN__MINIMAL_CUTSETS = FAILURE_MODEL__MINIMAL_CUTSETS;
/**
* The feature id for the '<em><b>Failures</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MARKOV_CHAIN__FAILURES = FAILURE_MODEL__FAILURES;
/**
* The feature id for the '<em><b>Sub Models</b></em>' reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MARKOV_CHAIN__SUB_MODELS = FAILURE_MODEL__SUB_MODELS;
/**
* The feature id for the '<em><b>Transitions</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MARKOV_CHAIN__TRANSITIONS = FAILURE_MODEL_FEATURE_COUNT + 0;
/**
* The feature id for the '<em><b>States</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MARKOV_CHAIN__STATES = FAILURE_MODEL_FEATURE_COUNT + 1;
/**
* The number of structural features of the '<em>Markov Chain</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MARKOV_CHAIN_FEATURE_COUNT = FAILURE_MODEL_FEATURE_COUNT + 2;
/**
* The number of operations of the '<em>Markov Chain</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int MARKOV_CHAIN_OPERATION_COUNT = FAILURE_MODEL_OPERATION_COUNT + 0;
/**
* The meta object id for the '{@link top.failureLogic.impl.StateImpl <em>State</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.StateImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getState()
* @generated
*/
int STATE = 12;
/**
* The feature id for the '<em><b>Key Value Maps</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int STATE__KEY_VALUE_MAPS = OdeBasePackage.BASE_ELEMENT__KEY_VALUE_MAPS;
/**
* The feature id for the '<em><b>Id</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int STATE__ID = OdeBasePackage.BASE_ELEMENT__ID;
/**
* The feature id for the '<em><b>Name</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int STATE__NAME = OdeBasePackage.BASE_ELEMENT__NAME;
/**
* The feature id for the '<em><b>Description</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int STATE__DESCRIPTION = OdeBasePackage.BASE_ELEMENT__DESCRIPTION;
/**
* The feature id for the '<em><b>Is Initial State</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int STATE__IS_INITIAL_STATE = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 0;
/**
* The feature id for the '<em><b>Is Fail State</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int STATE__IS_FAIL_STATE = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 1;
/**
* The feature id for the '<em><b>Fail State</b></em>' reference.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int STATE__FAIL_STATE = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 2;
/**
* The number of structural features of the '<em>State</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int STATE_FEATURE_COUNT = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 3;
/**
* The number of operations of the '<em>State</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int STATE_OPERATION_COUNT = OdeBasePackage.BASE_ELEMENT_OPERATION_COUNT + 0;
/**
* The meta object id for the '{@link top.failureLogic.impl.TransitionImpl <em>Transition</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.TransitionImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getTransition()
* @generated
*/
int TRANSITION = 13;
/**
* The feature id for the '<em><b>Key Value Maps</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int TRANSITION__KEY_VALUE_MAPS = OdeBasePackage.BASE_ELEMENT__KEY_VALUE_MAPS;
/**
* The feature id for the '<em><b>Id</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int TRANSITION__ID = OdeBasePackage.BASE_ELEMENT__ID;
/**
* The feature id for the '<em><b>Name</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int TRANSITION__NAME = OdeBasePackage.BASE_ELEMENT__NAME;
/**
* The feature id for the '<em><b>Description</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int TRANSITION__DESCRIPTION = OdeBasePackage.BASE_ELEMENT__DESCRIPTION;
/**
* The feature id for the '<em><b>Transition</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int TRANSITION__TRANSITION = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 0;
/**
* The feature id for the '<em><b>Transition Prob Distribution</b></em>' containment reference.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int TRANSITION__TRANSITION_PROB_DISTRIBUTION = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 1;
/**
* The feature id for the '<em><b>From States</b></em>' reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int TRANSITION__FROM_STATES = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 2;
/**
* The feature id for the '<em><b>To States</b></em>' reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int TRANSITION__TO_STATES = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 3;
/**
* The number of structural features of the '<em>Transition</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int TRANSITION_FEATURE_COUNT = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 4;
/**
* The number of operations of the '<em>Transition</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int TRANSITION_OPERATION_COUNT = OdeBasePackage.BASE_ELEMENT_OPERATION_COUNT + 0;
/**
* The meta object id for the '{@link top.failureLogic.impl.FMEAImpl <em>FMEA</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.FMEAImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getFMEA()
* @generated
*/
int FMEA = 14;
/**
* The feature id for the '<em><b>Key Value Maps</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEA__KEY_VALUE_MAPS = FAILURE_MODEL__KEY_VALUE_MAPS;
/**
* The feature id for the '<em><b>Id</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEA__ID = FAILURE_MODEL__ID;
/**
* The feature id for the '<em><b>Name</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEA__NAME = FAILURE_MODEL__NAME;
/**
* The feature id for the '<em><b>Description</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEA__DESCRIPTION = FAILURE_MODEL__DESCRIPTION;
/**
* The feature id for the '<em><b>Minimal Cutsets</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEA__MINIMAL_CUTSETS = FAILURE_MODEL__MINIMAL_CUTSETS;
/**
* The feature id for the '<em><b>Failures</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEA__FAILURES = FAILURE_MODEL__FAILURES;
/**
* The feature id for the '<em><b>Sub Models</b></em>' reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEA__SUB_MODELS = FAILURE_MODEL__SUB_MODELS;
/**
* The feature id for the '<em><b>Type</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEA__TYPE = FAILURE_MODEL_FEATURE_COUNT + 0;
/**
* The feature id for the '<em><b>Entries</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEA__ENTRIES = FAILURE_MODEL_FEATURE_COUNT + 1;
/**
* The number of structural features of the '<em>FMEA</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEA_FEATURE_COUNT = FAILURE_MODEL_FEATURE_COUNT + 2;
/**
* The number of operations of the '<em>FMEA</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEA_OPERATION_COUNT = FAILURE_MODEL_OPERATION_COUNT + 0;
/**
* The meta object id for the '{@link top.failureLogic.impl.FMEAEntryImpl <em>FMEA Entry</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.FMEAEntryImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getFMEAEntry()
* @generated
*/
int FMEA_ENTRY = 15;
/**
* The feature id for the '<em><b>Key Value Maps</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEA_ENTRY__KEY_VALUE_MAPS = OdeBasePackage.BASE_ELEMENT__KEY_VALUE_MAPS;
/**
* The feature id for the '<em><b>Id</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEA_ENTRY__ID = OdeBasePackage.BASE_ELEMENT__ID;
/**
* The feature id for the '<em><b>Name</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEA_ENTRY__NAME = OdeBasePackage.BASE_ELEMENT__NAME;
/**
* The feature id for the '<em><b>Description</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEA_ENTRY__DESCRIPTION = OdeBasePackage.BASE_ELEMENT__DESCRIPTION;
/**
* The feature id for the '<em><b>Effect</b></em>' reference.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEA_ENTRY__EFFECT = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 0;
/**
* The feature id for the '<em><b>Mode</b></em>' reference.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEA_ENTRY__MODE = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 1;
/**
* The number of structural features of the '<em>FMEA Entry</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEA_ENTRY_FEATURE_COUNT = OdeBasePackage.BASE_ELEMENT_FEATURE_COUNT + 2;
/**
* The number of operations of the '<em>FMEA Entry</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEA_ENTRY_OPERATION_COUNT = OdeBasePackage.BASE_ELEMENT_OPERATION_COUNT + 0;
/**
* The meta object id for the '{@link top.failureLogic.impl.FMEDAEntryImpl <em>FMEDA Entry</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.FMEDAEntryImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getFMEDAEntry()
* @generated
*/
int FMEDA_ENTRY = 16;
/**
* The feature id for the '<em><b>Key Value Maps</b></em>' containment reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEDA_ENTRY__KEY_VALUE_MAPS = FMEA_ENTRY__KEY_VALUE_MAPS;
/**
* The feature id for the '<em><b>Id</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEDA_ENTRY__ID = FMEA_ENTRY__ID;
/**
* The feature id for the '<em><b>Name</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEDA_ENTRY__NAME = FMEA_ENTRY__NAME;
/**
* The feature id for the '<em><b>Description</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEDA_ENTRY__DESCRIPTION = FMEA_ENTRY__DESCRIPTION;
/**
* The feature id for the '<em><b>Effect</b></em>' reference.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEDA_ENTRY__EFFECT = FMEA_ENTRY__EFFECT;
/**
* The feature id for the '<em><b>Mode</b></em>' reference.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEDA_ENTRY__MODE = FMEA_ENTRY__MODE;
/**
* The feature id for the '<em><b>Diagnosis Rate</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEDA_ENTRY__DIAGNOSIS_RATE = FMEA_ENTRY_FEATURE_COUNT + 0;
/**
* The feature id for the '<em><b>Diagnosis Prob Distribution</b></em>' containment reference.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEDA_ENTRY__DIAGNOSIS_PROB_DISTRIBUTION = FMEA_ENTRY_FEATURE_COUNT + 1;
/**
* The number of structural features of the '<em>FMEDA Entry</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEDA_ENTRY_FEATURE_COUNT = FMEA_ENTRY_FEATURE_COUNT + 2;
/**
* The number of operations of the '<em>FMEDA Entry</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
int FMEDA_ENTRY_OPERATION_COUNT = FMEA_ENTRY_OPERATION_COUNT + 0;
/**
* The meta object id for the '{@link top.failureLogic.FailureOriginType <em>Failure Origin Type</em>}' enum.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.FailureOriginType
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getFailureOriginType()
* @generated
*/
int FAILURE_ORIGIN_TYPE = 17;
/**
* The meta object id for the '{@link top.failureLogic.GateType <em>Gate Type</em>}' enum.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.GateType
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getGateType()
* @generated
*/
int GATE_TYPE = 18;
/**
* The meta object id for the '{@link top.failureLogic.CauseType <em>Cause Type</em>}' enum.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.CauseType
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getCauseType()
* @generated
*/
int CAUSE_TYPE = 19;
/**
* The meta object id for the '{@link top.failureLogic.FMEAType <em>FMEA Type</em>}' enum.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.FMEAType
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getFMEAType()
* @generated
*/
int FMEA_TYPE = 20;
/**
* Returns the meta object for class '{@link top.failureLogic.FailureLogicPackage <em>Failure Logic Package</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for class '<em>Failure Logic Package</em>'.
* @see top.failureLogic.FailureLogicPackage
* @generated
*/
EClass getFailureLogicPackage();
/**
* Returns the meta object for the containment reference list '{@link top.failureLogic.FailureLogicPackage#getFailureModels <em>Failure Models</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the containment reference list '<em>Failure Models</em>'.
* @see top.failureLogic.FailureLogicPackage#getFailureModels()
* @see #getFailureLogicPackage()
* @generated
*/
EReference getFailureLogicPackage_FailureModels();
/**
* Returns the meta object for class '{@link top.failureLogic.Failure <em>Failure</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for class '<em>Failure</em>'.
* @see top.failureLogic.Failure
* @generated
*/
EClass getFailure();
/**
* Returns the meta object for the attribute '{@link top.failureLogic.Failure#getOriginType <em>Origin Type</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the attribute '<em>Origin Type</em>'.
* @see top.failureLogic.Failure#getOriginType()
* @see #getFailure()
* @generated
*/
EAttribute getFailure_OriginType();
/**
* Returns the meta object for the attribute '{@link top.failureLogic.Failure#getFailureClass <em>Failure Class</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the attribute '<em>Failure Class</em>'.
* @see top.failureLogic.Failure#getFailureClass()
* @see #getFailure()
* @generated
*/
EAttribute getFailure_FailureClass();
/**
* Returns the meta object for the attribute '{@link top.failureLogic.Failure#getFailureRate <em>Failure Rate</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the attribute '<em>Failure Rate</em>'.
* @see top.failureLogic.Failure#getFailureRate()
* @see #getFailure()
* @generated
*/
EAttribute getFailure_FailureRate();
/**
* Returns the meta object for the attribute '{@link top.failureLogic.Failure#isIsCcf <em>Is Ccf</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the attribute '<em>Is Ccf</em>'.
* @see top.failureLogic.Failure#isIsCcf()
* @see #getFailure()
* @generated
*/
EAttribute getFailure_IsCcf();
/**
* Returns the meta object for the containment reference '{@link top.failureLogic.Failure#getFailureProbDistribution <em>Failure Prob Distribution</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the containment reference '<em>Failure Prob Distribution</em>'.
* @see top.failureLogic.Failure#getFailureProbDistribution()
* @see #getFailure()
* @generated
*/
EReference getFailure_FailureProbDistribution();
/**
* Returns the meta object for the reference list '{@link top.failureLogic.Failure#getCcfFailures <em>Ccf Failures</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the reference list '<em>Ccf Failures</em>'.
* @see top.failureLogic.Failure#getCcfFailures()
* @see #getFailure()
* @generated
*/
EReference getFailure_CcfFailures();
/**
* Returns the meta object for class '{@link top.failureLogic.SecurityViolation <em>Security Violation</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for class '<em>Security Violation</em>'.
* @see top.failureLogic.SecurityViolation
* @generated
*/
EClass getSecurityViolation();
/**
* Returns the meta object for class '{@link top.failureLogic.FailureModel <em>Failure Model</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for class '<em>Failure Model</em>'.
* @see top.failureLogic.FailureModel
* @generated
*/
EClass getFailureModel();
/**
* Returns the meta object for the containment reference list '{@link top.failureLogic.FailureModel#getMinimalCutsets <em>Minimal Cutsets</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the containment reference list '<em>Minimal Cutsets</em>'.
* @see top.failureLogic.FailureModel#getMinimalCutsets()
* @see #getFailureModel()
* @generated
*/
EReference getFailureModel_MinimalCutsets();
/**
* Returns the meta object for the containment reference list '{@link top.failureLogic.FailureModel#getFailures <em>Failures</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the containment reference list '<em>Failures</em>'.
* @see top.failureLogic.FailureModel#getFailures()
* @see #getFailureModel()
* @generated
*/
EReference getFailureModel_Failures();
/**
* Returns the meta object for the reference list '{@link top.failureLogic.FailureModel#getSubModels <em>Sub Models</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the reference list '<em>Sub Models</em>'.
* @see top.failureLogic.FailureModel#getSubModels()
* @see #getFailureModel()
* @generated
*/
EReference getFailureModel_SubModels();
/**
* Returns the meta object for class '{@link top.failureLogic.MinimalCutSets <em>Minimal Cut Sets</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for class '<em>Minimal Cut Sets</em>'.
* @see top.failureLogic.MinimalCutSets
* @generated
*/
EClass getMinimalCutSets();
/**
* Returns the meta object for the containment reference list '{@link top.failureLogic.MinimalCutSets#getCutsets <em>Cutsets</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the containment reference list '<em>Cutsets</em>'.
* @see top.failureLogic.MinimalCutSets#getCutsets()
* @see #getMinimalCutSets()
* @generated
*/
EReference getMinimalCutSets_Cutsets();
/**
* Returns the meta object for the reference list '{@link top.failureLogic.MinimalCutSets#getFailures <em>Failures</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the reference list '<em>Failures</em>'.
* @see top.failureLogic.MinimalCutSets#getFailures()
* @see #getMinimalCutSets()
* @generated
*/
EReference getMinimalCutSets_Failures();
/**
* Returns the meta object for class '{@link top.failureLogic.MinimalCutset <em>Minimal Cutset</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for class '<em>Minimal Cutset</em>'.
* @see top.failureLogic.MinimalCutset
* @generated
*/
EClass getMinimalCutset();
/**
* Returns the meta object for the reference list '{@link top.failureLogic.MinimalCutset#getFailures <em>Failures</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the reference list '<em>Failures</em>'.
* @see top.failureLogic.MinimalCutset#getFailures()
* @see #getMinimalCutset()
* @generated
*/
EReference getMinimalCutset_Failures();
/**
* Returns the meta object for class '{@link top.failureLogic.ProbDist <em>Prob Dist</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for class '<em>Prob Dist</em>'.
* @see top.failureLogic.ProbDist
* @generated
*/
EClass getProbDist();
/**
* Returns the meta object for the attribute '{@link top.failureLogic.ProbDist#getType <em>Type</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the attribute '<em>Type</em>'.
* @see top.failureLogic.ProbDist#getType()
* @see #getProbDist()
* @generated
*/
EAttribute getProbDist_Type();
/**
* Returns the meta object for the containment reference list '{@link top.failureLogic.ProbDist#getParameters <em>Parameters</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the containment reference list '<em>Parameters</em>'.
* @see top.failureLogic.ProbDist#getParameters()
* @see #getProbDist()
* @generated
*/
EReference getProbDist_Parameters();
/**
* Returns the meta object for class '{@link top.failureLogic.ProbDistParam <em>Prob Dist Param</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for class '<em>Prob Dist Param</em>'.
* @see top.failureLogic.ProbDistParam
* @generated
*/
EClass getProbDistParam();
/**
* Returns the meta object for the attribute '{@link top.failureLogic.ProbDistParam#getValue <em>Value</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the attribute '<em>Value</em>'.
* @see top.failureLogic.ProbDistParam#getValue()
* @see #getProbDistParam()
* @generated
*/
EAttribute getProbDistParam_Value();
/**
* Returns the meta object for class '{@link top.failureLogic.FaultTree <em>Fault Tree</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for class '<em>Fault Tree</em>'.
* @see top.failureLogic.FaultTree
* @generated
*/
EClass getFaultTree();
/**
* Returns the meta object for the containment reference list '{@link top.failureLogic.FaultTree#getCauses <em>Causes</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the containment reference list '<em>Causes</em>'.
* @see top.failureLogic.FaultTree#getCauses()
* @see #getFaultTree()
* @generated
*/
EReference getFaultTree_Causes();
/**
* Returns the meta object for class '{@link top.failureLogic.Cause <em>Cause</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for class '<em>Cause</em>'.
* @see top.failureLogic.Cause
* @generated
*/
EClass getCause();
/**
* Returns the meta object for the attribute '{@link top.failureLogic.Cause#getCauseType <em>Cause Type</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the attribute '<em>Cause Type</em>'.
* @see top.failureLogic.Cause#getCauseType()
* @see #getCause()
* @generated
*/
EAttribute getCause_CauseType();
/**
* Returns the meta object for the reference '{@link top.failureLogic.Cause#getFailure <em>Failure</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the reference '<em>Failure</em>'.
* @see top.failureLogic.Cause#getFailure()
* @see #getCause()
* @generated
*/
EReference getCause_Failure();
/**
* Returns the meta object for class '{@link top.failureLogic.Gate <em>Gate</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for class '<em>Gate</em>'.
* @see top.failureLogic.Gate
* @generated
*/
EClass getGate();
/**
* Returns the meta object for the attribute '{@link top.failureLogic.Gate#getGateType <em>Gate Type</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the attribute '<em>Gate Type</em>'.
* @see top.failureLogic.Gate#getGateType()
* @see #getGate()
* @generated
*/
EAttribute getGate_GateType();
/**
* Returns the meta object for the reference list '{@link top.failureLogic.Gate#getCauses <em>Causes</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the reference list '<em>Causes</em>'.
* @see top.failureLogic.Gate#getCauses()
* @see #getGate()
* @generated
*/
EReference getGate_Causes();
/**
* Returns the meta object for class '{@link top.failureLogic.MarkovChain <em>Markov Chain</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for class '<em>Markov Chain</em>'.
* @see top.failureLogic.MarkovChain
* @generated
*/
EClass getMarkovChain();
/**
* Returns the meta object for the containment reference list '{@link top.failureLogic.MarkovChain#getTransitions <em>Transitions</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the containment reference list '<em>Transitions</em>'.
* @see top.failureLogic.MarkovChain#getTransitions()
* @see #getMarkovChain()
* @generated
*/
EReference getMarkovChain_Transitions();
/**
* Returns the meta object for the containment reference list '{@link top.failureLogic.MarkovChain#getStates <em>States</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the containment reference list '<em>States</em>'.
* @see top.failureLogic.MarkovChain#getStates()
* @see #getMarkovChain()
* @generated
*/
EReference getMarkovChain_States();
/**
* Returns the meta object for class '{@link top.failureLogic.State <em>State</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for class '<em>State</em>'.
* @see top.failureLogic.State
* @generated
*/
EClass getState();
/**
* Returns the meta object for the attribute '{@link top.failureLogic.State#isIsInitialState <em>Is Initial State</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the attribute '<em>Is Initial State</em>'.
* @see top.failureLogic.State#isIsInitialState()
* @see #getState()
* @generated
*/
EAttribute getState_IsInitialState();
/**
* Returns the meta object for the attribute '{@link top.failureLogic.State#isIsFailState <em>Is Fail State</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the attribute '<em>Is Fail State</em>'.
* @see top.failureLogic.State#isIsFailState()
* @see #getState()
* @generated
*/
EAttribute getState_IsFailState();
/**
* Returns the meta object for the reference '{@link top.failureLogic.State#getFailState <em>Fail State</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the reference '<em>Fail State</em>'.
* @see top.failureLogic.State#getFailState()
* @see #getState()
* @generated
*/
EReference getState_FailState();
/**
* Returns the meta object for class '{@link top.failureLogic.Transition <em>Transition</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for class '<em>Transition</em>'.
* @see top.failureLogic.Transition
* @generated
*/
EClass getTransition();
/**
* Returns the meta object for the attribute '{@link top.failureLogic.Transition#getTransition <em>Transition</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the attribute '<em>Transition</em>'.
* @see top.failureLogic.Transition#getTransition()
* @see #getTransition()
* @generated
*/
EAttribute getTransition_Transition();
/**
* Returns the meta object for the containment reference '{@link top.failureLogic.Transition#getTransitionProbDistribution <em>Transition Prob Distribution</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the containment reference '<em>Transition Prob Distribution</em>'.
* @see top.failureLogic.Transition#getTransitionProbDistribution()
* @see #getTransition()
* @generated
*/
EReference getTransition_TransitionProbDistribution();
/**
* Returns the meta object for the reference list '{@link top.failureLogic.Transition#getFromStates <em>From States</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the reference list '<em>From States</em>'.
* @see top.failureLogic.Transition#getFromStates()
* @see #getTransition()
* @generated
*/
EReference getTransition_FromStates();
/**
* Returns the meta object for the reference list '{@link top.failureLogic.Transition#getToStates <em>To States</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the reference list '<em>To States</em>'.
* @see top.failureLogic.Transition#getToStates()
* @see #getTransition()
* @generated
*/
EReference getTransition_ToStates();
/**
* Returns the meta object for class '{@link top.failureLogic.FMEA <em>FMEA</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for class '<em>FMEA</em>'.
* @see top.failureLogic.FMEA
* @generated
*/
EClass getFMEA();
/**
* Returns the meta object for the attribute '{@link top.failureLogic.FMEA#getType <em>Type</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the attribute '<em>Type</em>'.
* @see top.failureLogic.FMEA#getType()
* @see #getFMEA()
* @generated
*/
EAttribute getFMEA_Type();
/**
* Returns the meta object for the containment reference list '{@link top.failureLogic.FMEA#getEntries <em>Entries</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the containment reference list '<em>Entries</em>'.
* @see top.failureLogic.FMEA#getEntries()
* @see #getFMEA()
* @generated
*/
EReference getFMEA_Entries();
/**
* Returns the meta object for class '{@link top.failureLogic.FMEAEntry <em>FMEA Entry</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for class '<em>FMEA Entry</em>'.
* @see top.failureLogic.FMEAEntry
* @generated
*/
EClass getFMEAEntry();
/**
* Returns the meta object for the reference '{@link top.failureLogic.FMEAEntry#getEffect <em>Effect</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the reference '<em>Effect</em>'.
* @see top.failureLogic.FMEAEntry#getEffect()
* @see #getFMEAEntry()
* @generated
*/
EReference getFMEAEntry_Effect();
/**
* Returns the meta object for the reference '{@link top.failureLogic.FMEAEntry#getMode <em>Mode</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the reference '<em>Mode</em>'.
* @see top.failureLogic.FMEAEntry#getMode()
* @see #getFMEAEntry()
* @generated
*/
EReference getFMEAEntry_Mode();
/**
* Returns the meta object for class '{@link top.failureLogic.FMEDAEntry <em>FMEDA Entry</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for class '<em>FMEDA Entry</em>'.
* @see top.failureLogic.FMEDAEntry
* @generated
*/
EClass getFMEDAEntry();
/**
* Returns the meta object for the attribute '{@link top.failureLogic.FMEDAEntry#getDiagnosisRate <em>Diagnosis Rate</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the attribute '<em>Diagnosis Rate</em>'.
* @see top.failureLogic.FMEDAEntry#getDiagnosisRate()
* @see #getFMEDAEntry()
* @generated
*/
EAttribute getFMEDAEntry_DiagnosisRate();
/**
* Returns the meta object for the containment reference '{@link top.failureLogic.FMEDAEntry#getDiagnosisProbDistribution <em>Diagnosis Prob Distribution</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for the containment reference '<em>Diagnosis Prob Distribution</em>'.
* @see top.failureLogic.FMEDAEntry#getDiagnosisProbDistribution()
* @see #getFMEDAEntry()
* @generated
*/
EReference getFMEDAEntry_DiagnosisProbDistribution();
/**
* Returns the meta object for enum '{@link top.failureLogic.FailureOriginType <em>Failure Origin Type</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for enum '<em>Failure Origin Type</em>'.
* @see top.failureLogic.FailureOriginType
* @generated
*/
EEnum getFailureOriginType();
/**
* Returns the meta object for enum '{@link top.failureLogic.GateType <em>Gate Type</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for enum '<em>Gate Type</em>'.
* @see top.failureLogic.GateType
* @generated
*/
EEnum getGateType();
/**
* Returns the meta object for enum '{@link top.failureLogic.CauseType <em>Cause Type</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for enum '<em>Cause Type</em>'.
* @see top.failureLogic.CauseType
* @generated
*/
EEnum getCauseType();
/**
* Returns the meta object for enum '{@link top.failureLogic.FMEAType <em>FMEA Type</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the meta object for enum '<em>FMEA Type</em>'.
* @see top.failureLogic.FMEAType
* @generated
*/
EEnum getFMEAType();
/**
* Returns the factory that creates the instances of the model.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the factory that creates the instances of the model.
* @generated
*/
FailureLogic_Factory getFailureLogic_Factory();
/**
* <!-- begin-user-doc -->
* Defines literals for the meta objects that represent
* <ul>
* <li>each class,</li>
* <li>each feature of each class,</li>
* <li>each operation of each class,</li>
* <li>each enum,</li>
* <li>and each data type</li>
* </ul>
* <!-- end-user-doc -->
* @generated
*/
interface Literals {
/**
* The meta object literal for the '{@link top.failureLogic.impl.FailureLogicPackageImpl <em>Failure Logic Package</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.FailureLogicPackageImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getFailureLogicPackage()
* @generated
*/
EClass FAILURE_LOGIC_PACKAGE = eINSTANCE.getFailureLogicPackage();
/**
* The meta object literal for the '<em><b>Failure Models</b></em>' containment reference list feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference FAILURE_LOGIC_PACKAGE__FAILURE_MODELS = eINSTANCE.getFailureLogicPackage_FailureModels();
/**
* The meta object literal for the '{@link top.failureLogic.impl.FailureImpl <em>Failure</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.FailureImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getFailure()
* @generated
*/
EClass FAILURE = eINSTANCE.getFailure();
/**
* The meta object literal for the '<em><b>Origin Type</b></em>' attribute feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EAttribute FAILURE__ORIGIN_TYPE = eINSTANCE.getFailure_OriginType();
/**
* The meta object literal for the '<em><b>Failure Class</b></em>' attribute feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EAttribute FAILURE__FAILURE_CLASS = eINSTANCE.getFailure_FailureClass();
/**
* The meta object literal for the '<em><b>Failure Rate</b></em>' attribute feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EAttribute FAILURE__FAILURE_RATE = eINSTANCE.getFailure_FailureRate();
/**
* The meta object literal for the '<em><b>Is Ccf</b></em>' attribute feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EAttribute FAILURE__IS_CCF = eINSTANCE.getFailure_IsCcf();
/**
* The meta object literal for the '<em><b>Failure Prob Distribution</b></em>' containment reference feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference FAILURE__FAILURE_PROB_DISTRIBUTION = eINSTANCE.getFailure_FailureProbDistribution();
/**
* The meta object literal for the '<em><b>Ccf Failures</b></em>' reference list feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference FAILURE__CCF_FAILURES = eINSTANCE.getFailure_CcfFailures();
/**
* The meta object literal for the '{@link top.failureLogic.impl.SecurityViolationImpl <em>Security Violation</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.SecurityViolationImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getSecurityViolation()
* @generated
*/
EClass SECURITY_VIOLATION = eINSTANCE.getSecurityViolation();
/**
* The meta object literal for the '{@link top.failureLogic.impl.FailureModelImpl <em>Failure Model</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.FailureModelImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getFailureModel()
* @generated
*/
EClass FAILURE_MODEL = eINSTANCE.getFailureModel();
/**
* The meta object literal for the '<em><b>Minimal Cutsets</b></em>' containment reference list feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference FAILURE_MODEL__MINIMAL_CUTSETS = eINSTANCE.getFailureModel_MinimalCutsets();
/**
* The meta object literal for the '<em><b>Failures</b></em>' containment reference list feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference FAILURE_MODEL__FAILURES = eINSTANCE.getFailureModel_Failures();
/**
* The meta object literal for the '<em><b>Sub Models</b></em>' reference list feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference FAILURE_MODEL__SUB_MODELS = eINSTANCE.getFailureModel_SubModels();
/**
* The meta object literal for the '{@link top.failureLogic.impl.MinimalCutSetsImpl <em>Minimal Cut Sets</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.MinimalCutSetsImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getMinimalCutSets()
* @generated
*/
EClass MINIMAL_CUT_SETS = eINSTANCE.getMinimalCutSets();
/**
* The meta object literal for the '<em><b>Cutsets</b></em>' containment reference list feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference MINIMAL_CUT_SETS__CUTSETS = eINSTANCE.getMinimalCutSets_Cutsets();
/**
* The meta object literal for the '<em><b>Failures</b></em>' reference list feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference MINIMAL_CUT_SETS__FAILURES = eINSTANCE.getMinimalCutSets_Failures();
/**
* The meta object literal for the '{@link top.failureLogic.impl.MinimalCutsetImpl <em>Minimal Cutset</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.MinimalCutsetImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getMinimalCutset()
* @generated
*/
EClass MINIMAL_CUTSET = eINSTANCE.getMinimalCutset();
/**
* The meta object literal for the '<em><b>Failures</b></em>' reference list feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference MINIMAL_CUTSET__FAILURES = eINSTANCE.getMinimalCutset_Failures();
/**
* The meta object literal for the '{@link top.failureLogic.impl.ProbDistImpl <em>Prob Dist</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.ProbDistImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getProbDist()
* @generated
*/
EClass PROB_DIST = eINSTANCE.getProbDist();
/**
* The meta object literal for the '<em><b>Type</b></em>' attribute feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EAttribute PROB_DIST__TYPE = eINSTANCE.getProbDist_Type();
/**
* The meta object literal for the '<em><b>Parameters</b></em>' containment reference list feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference PROB_DIST__PARAMETERS = eINSTANCE.getProbDist_Parameters();
/**
* The meta object literal for the '{@link top.failureLogic.impl.ProbDistParamImpl <em>Prob Dist Param</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.ProbDistParamImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getProbDistParam()
* @generated
*/
EClass PROB_DIST_PARAM = eINSTANCE.getProbDistParam();
/**
* The meta object literal for the '<em><b>Value</b></em>' attribute feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EAttribute PROB_DIST_PARAM__VALUE = eINSTANCE.getProbDistParam_Value();
/**
* The meta object literal for the '{@link top.failureLogic.impl.FaultTreeImpl <em>Fault Tree</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.FaultTreeImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getFaultTree()
* @generated
*/
EClass FAULT_TREE = eINSTANCE.getFaultTree();
/**
* The meta object literal for the '<em><b>Causes</b></em>' containment reference list feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference FAULT_TREE__CAUSES = eINSTANCE.getFaultTree_Causes();
/**
* The meta object literal for the '{@link top.failureLogic.impl.CauseImpl <em>Cause</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.CauseImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getCause()
* @generated
*/
EClass CAUSE = eINSTANCE.getCause();
/**
* The meta object literal for the '<em><b>Cause Type</b></em>' attribute feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EAttribute CAUSE__CAUSE_TYPE = eINSTANCE.getCause_CauseType();
/**
* The meta object literal for the '<em><b>Failure</b></em>' reference feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference CAUSE__FAILURE = eINSTANCE.getCause_Failure();
/**
* The meta object literal for the '{@link top.failureLogic.impl.GateImpl <em>Gate</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.GateImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getGate()
* @generated
*/
EClass GATE = eINSTANCE.getGate();
/**
* The meta object literal for the '<em><b>Gate Type</b></em>' attribute feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EAttribute GATE__GATE_TYPE = eINSTANCE.getGate_GateType();
/**
* The meta object literal for the '<em><b>Causes</b></em>' reference list feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference GATE__CAUSES = eINSTANCE.getGate_Causes();
/**
* The meta object literal for the '{@link top.failureLogic.impl.MarkovChainImpl <em>Markov Chain</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.MarkovChainImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getMarkovChain()
* @generated
*/
EClass MARKOV_CHAIN = eINSTANCE.getMarkovChain();
/**
* The meta object literal for the '<em><b>Transitions</b></em>' containment reference list feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference MARKOV_CHAIN__TRANSITIONS = eINSTANCE.getMarkovChain_Transitions();
/**
* The meta object literal for the '<em><b>States</b></em>' containment reference list feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference MARKOV_CHAIN__STATES = eINSTANCE.getMarkovChain_States();
/**
* The meta object literal for the '{@link top.failureLogic.impl.StateImpl <em>State</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.StateImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getState()
* @generated
*/
EClass STATE = eINSTANCE.getState();
/**
* The meta object literal for the '<em><b>Is Initial State</b></em>' attribute feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EAttribute STATE__IS_INITIAL_STATE = eINSTANCE.getState_IsInitialState();
/**
* The meta object literal for the '<em><b>Is Fail State</b></em>' attribute feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EAttribute STATE__IS_FAIL_STATE = eINSTANCE.getState_IsFailState();
/**
* The meta object literal for the '<em><b>Fail State</b></em>' reference feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference STATE__FAIL_STATE = eINSTANCE.getState_FailState();
/**
* The meta object literal for the '{@link top.failureLogic.impl.TransitionImpl <em>Transition</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.TransitionImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getTransition()
* @generated
*/
EClass TRANSITION = eINSTANCE.getTransition();
/**
* The meta object literal for the '<em><b>Transition</b></em>' attribute feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EAttribute TRANSITION__TRANSITION = eINSTANCE.getTransition_Transition();
/**
* The meta object literal for the '<em><b>Transition Prob Distribution</b></em>' containment reference feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference TRANSITION__TRANSITION_PROB_DISTRIBUTION = eINSTANCE.getTransition_TransitionProbDistribution();
/**
* The meta object literal for the '<em><b>From States</b></em>' reference list feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference TRANSITION__FROM_STATES = eINSTANCE.getTransition_FromStates();
/**
* The meta object literal for the '<em><b>To States</b></em>' reference list feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference TRANSITION__TO_STATES = eINSTANCE.getTransition_ToStates();
/**
* The meta object literal for the '{@link top.failureLogic.impl.FMEAImpl <em>FMEA</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.FMEAImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getFMEA()
* @generated
*/
EClass FMEA = eINSTANCE.getFMEA();
/**
* The meta object literal for the '<em><b>Type</b></em>' attribute feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EAttribute FMEA__TYPE = eINSTANCE.getFMEA_Type();
/**
* The meta object literal for the '<em><b>Entries</b></em>' containment reference list feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference FMEA__ENTRIES = eINSTANCE.getFMEA_Entries();
/**
* The meta object literal for the '{@link top.failureLogic.impl.FMEAEntryImpl <em>FMEA Entry</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.FMEAEntryImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getFMEAEntry()
* @generated
*/
EClass FMEA_ENTRY = eINSTANCE.getFMEAEntry();
/**
* The meta object literal for the '<em><b>Effect</b></em>' reference feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference FMEA_ENTRY__EFFECT = eINSTANCE.getFMEAEntry_Effect();
/**
* The meta object literal for the '<em><b>Mode</b></em>' reference feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference FMEA_ENTRY__MODE = eINSTANCE.getFMEAEntry_Mode();
/**
* The meta object literal for the '{@link top.failureLogic.impl.FMEDAEntryImpl <em>FMEDA Entry</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.impl.FMEDAEntryImpl
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getFMEDAEntry()
* @generated
*/
EClass FMEDA_ENTRY = eINSTANCE.getFMEDAEntry();
/**
* The meta object literal for the '<em><b>Diagnosis Rate</b></em>' attribute feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EAttribute FMEDA_ENTRY__DIAGNOSIS_RATE = eINSTANCE.getFMEDAEntry_DiagnosisRate();
/**
* The meta object literal for the '<em><b>Diagnosis Prob Distribution</b></em>' containment reference feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
EReference FMEDA_ENTRY__DIAGNOSIS_PROB_DISTRIBUTION = eINSTANCE.getFMEDAEntry_DiagnosisProbDistribution();
/**
* The meta object literal for the '{@link top.failureLogic.FailureOriginType <em>Failure Origin Type</em>}' enum.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.FailureOriginType
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getFailureOriginType()
* @generated
*/
EEnum FAILURE_ORIGIN_TYPE = eINSTANCE.getFailureOriginType();
/**
* The meta object literal for the '{@link top.failureLogic.GateType <em>Gate Type</em>}' enum.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.GateType
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getGateType()
* @generated
*/
EEnum GATE_TYPE = eINSTANCE.getGateType();
/**
* The meta object literal for the '{@link top.failureLogic.CauseType <em>Cause Type</em>}' enum.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.CauseType
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getCauseType()
* @generated
*/
EEnum CAUSE_TYPE = eINSTANCE.getCauseType();
/**
* The meta object literal for the '{@link top.failureLogic.FMEAType <em>FMEA Type</em>}' enum.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see top.failureLogic.FMEAType
* @see top.failureLogic.impl.FailureLogic_PackageImpl#getFMEAType()
* @generated
*/
EEnum FMEA_TYPE = eINSTANCE.getFMEAType();
}
} //FailureLogic_Package
|
package br.com.strn.ec.database.dao.client;
import br.com.strn.ec.database.dao.GenericDAO;
import br.com.strn.ec.database.entities.client.ConditionAdvice;
import org.hibernate.Session;
import javax.inject.Inject;
public class ConditionAdviceDAO extends GenericDAO<ConditionAdvice> {
/**
* @deprecated CDI eyes only
*/
protected ConditionAdviceDAO() {
this(null);
}
@Inject
public ConditionAdviceDAO(Session session) {
super(session);
}
}
|
package uk.gov.hmcts.reform.dataextractor.service;
public interface BlobOutputValidator {
boolean isValid(String input);
boolean isNotValid(String input);
}
|
/*
* Copyright (c) 2007, Red Hat Middleware, LLC. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use, modify,
* copy, or redistribute it subject to the terms and conditions of the GNU
* Lesser General Public License, v. 2.1. This program is distributed in the
* hope that it will be useful, but WITHOUT A WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details. You should have received a
* copy of the GNU Lesser General Public License, v.2.1 along with this
* distribution; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Red Hat Author(s): Steve Ebersole
*/
package org.hibernate.test.jdbc;
/**
* Boat implementation
*
* @author Steve Ebersole
*/
public class Boat {
private Long id;
private String tag;
private Person driver;
private Person boarder;
public Boat() {
}
public Boat(String tag, Person driver, Person boarder) {
this.tag = tag;
this.driver = driver;
this.boarder = boarder;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getTag() {
return tag;
}
public void setTag(String tag) {
this.tag = tag;
}
public Person getDriver() {
return driver;
}
public void setDriver(Person driver) {
this.driver = driver;
}
public Person getBoarder() {
return boarder;
}
public void setBoarder(Person boarder) {
this.boarder = boarder;
}
}
|
package hr.fer.zemris.optjava.dz3.neighborhood;
import hr.fer.zemris.optjava.dz3.solution.DoubleArraySolution;
import java.util.Arrays;
import java.util.Random;
/**
* An implementation of {@link INeighborhood} interface which provides <i>Gaussian</i>
* neighborhood.
*/
public class DoubleArrayNormNeighborhood implements INeighborhood<DoubleArraySolution> {
private static final Random rand = new Random();
private double[] deltas;
/**
* Constructor.
*
* @param deltas deltas.
*/
public DoubleArrayNormNeighborhood(double[] deltas) {
this.deltas = deltas;
}
@Override
public DoubleArraySolution randomNeighbor(DoubleArraySolution solution) {
DoubleArraySolution neighbor = solution.duplicate();
double[] bounds = Arrays.stream(deltas).map(d -> rand.nextGaussian() * d).toArray();
neighbor.randomize(rand, Arrays.stream(bounds).map(d -> d * -1).toArray(), bounds);
return neighbor;
}
}
|
/***
* ASM: a very small and fast Java bytecode manipulation framework
* Copyright (c) 2000-2011 INRIA, France Telecom
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
package scouter.org.objectweb.asm.commons;
import scouter.org.objectweb.asm.Handle;
import scouter.org.objectweb.asm.Type;
import scouter.org.objectweb.asm.signature.SignatureReader;
import scouter.org.objectweb.asm.signature.SignatureVisitor;
import scouter.org.objectweb.asm.signature.SignatureWriter;
/**
* A class responsible for remapping types and names. Subclasses can override
* the following methods:
*
* <ul>
* <li>{@link #map(String)} - map type</li>
* <li>{@link #mapFieldName(String, String, String)} - map field name</li>
* <li>{@link #mapMethodName(String, String, String)} - map method name</li>
* </ul>
*
* @author Eugene Kuleshov
*/
public abstract class Remapper {
public String mapDesc(String desc) {
Type t = Type.getType(desc);
switch (t.getSort()) {
case Type.ARRAY:
String s = mapDesc(t.getElementType().getDescriptor());
for (int i = 0; i < t.getDimensions(); ++i) {
s = '[' + s;
}
return s;
case Type.OBJECT:
String newType = map(t.getInternalName());
if (newType != null) {
return 'L' + newType + ';';
}
}
return desc;
}
private Type mapType(Type t) {
switch (t.getSort()) {
case Type.ARRAY:
String s = mapDesc(t.getElementType().getDescriptor());
for (int i = 0; i < t.getDimensions(); ++i) {
s = '[' + s;
}
return Type.getType(s);
case Type.OBJECT:
s = map(t.getInternalName());
return s != null ? Type.getObjectType(s) : t;
case Type.METHOD:
return Type.getMethodType(mapMethodDesc(t.getDescriptor()));
}
return t;
}
public String mapType(String type) {
if (type == null) {
return null;
}
return mapType(Type.getObjectType(type)).getInternalName();
}
public String[] mapTypes(String[] types) {
String[] newTypes = null;
boolean needMapping = false;
for (int i = 0; i < types.length; i++) {
String type = types[i];
String newType = map(type);
if (newType != null && newTypes == null) {
newTypes = new String[types.length];
if (i > 0) {
System.arraycopy(types, 0, newTypes, 0, i);
}
needMapping = true;
}
if (needMapping) {
newTypes[i] = newType == null ? type : newType;
}
}
return needMapping ? newTypes : types;
}
public String mapMethodDesc(String desc) {
if ("()V".equals(desc)) {
return desc;
}
Type[] args = Type.getArgumentTypes(desc);
StringBuilder sb = new StringBuilder("(");
for (int i = 0; i < args.length; i++) {
sb.append(mapDesc(args[i].getDescriptor()));
}
Type returnType = Type.getReturnType(desc);
if (returnType == Type.VOID_TYPE) {
sb.append(")V");
return sb.toString();
}
sb.append(')').append(mapDesc(returnType.getDescriptor()));
return sb.toString();
}
public Object mapValue(Object value) {
if (value instanceof Type) {
return mapType((Type) value);
}
if (value instanceof Handle) {
Handle h = (Handle) value;
return new Handle(h.getTag(), mapType(h.getOwner()), mapMethodName(
h.getOwner(), h.getName(), h.getDesc()),
mapMethodDesc(h.getDesc()), h.isInterface());
}
return value;
}
/**
* @param signature
* signature for mapper
* @param typeSignature
* true if signature is a FieldTypeSignature, such as the
* signature parameter of the ClassVisitor.visitField or
* MethodVisitor.visitLocalVariable methods
* @return signature rewritten as a string
*/
public String mapSignature(String signature, boolean typeSignature) {
if (signature == null) {
return null;
}
SignatureReader r = new SignatureReader(signature);
SignatureWriter w = new SignatureWriter();
SignatureVisitor a = createSignatureRemapper(w);
if (typeSignature) {
r.acceptType(a);
} else {
r.accept(a);
}
return w.toString();
}
/**
* @deprecated use {@link #createSignatureRemapper} instead.
*/
@Deprecated
protected SignatureVisitor createRemappingSignatureAdapter(
SignatureVisitor v) {
return new SignatureRemapper(v, this);
}
protected SignatureVisitor createSignatureRemapper(
SignatureVisitor v) {
return createRemappingSignatureAdapter(v);
}
/**
* Map method name to the new name. Subclasses can override.
*
* @param owner
* owner of the method.
* @param name
* name of the method.
* @param desc
* descriptor of the method.
* @return new name of the method
*/
public String mapMethodName(String owner, String name, String desc) {
return name;
}
/**
* Map invokedynamic method name to the new name. Subclasses can override.
*
* @param name
* name of the invokedynamic.
* @param desc
* descriptor of the invokedynamic.
* @return new invokdynamic name.
*/
public String mapInvokeDynamicMethodName(String name, String desc) {
return name;
}
/**
* Map field name to the new name. Subclasses can override.
*
* @param owner
* owner of the field.
* @param name
* name of the field
* @param desc
* descriptor of the field
* @return new name of the field.
*/
public String mapFieldName(String owner, String name, String desc) {
return name;
}
/**
* Map type name to the new name. Subclasses can override.
*
* @param typeName
* the type name
* @return new name, default implementation is the identity.
*/
public String map(String typeName) {
return typeName;
}
}
|
package org.wso2.developerstudio.eclipse.gmf.esb.diagram.custom.deserializer;
import static org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage.Literals.SPRING_MEDIATOR__CONFIGURATION_KEY;
import static org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage.Literals.SPRING_MEDIATOR__BEAN_NAME;
import org.apache.synapse.mediators.AbstractMediator;
import org.eclipse.core.runtime.Assert;
import org.eclipse.gmf.runtime.diagram.ui.editparts.IGraphicalEditPart;
import org.eclipse.ui.forms.editor.FormEditor;
import org.wso2.developerstudio.eclipse.gmf.esb.EsbFactory;
import org.wso2.developerstudio.eclipse.gmf.esb.RegistryKeyProperty;
import org.wso2.developerstudio.eclipse.gmf.esb.SpringMediator;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.providers.EsbElementTypes;
public class SpringMediatorDeserializer extends AbstractEsbNodeDeserializer<AbstractMediator, SpringMediator> {
public SpringMediator createNode(IGraphicalEditPart part, AbstractMediator mediator) {
Assert.isTrue(mediator instanceof org.apache.synapse.mediators.spring.SpringMediator,
"Unsupported mediator passed in for deserialization at " + this.getClass());
org.apache.synapse.mediators.spring.SpringMediator springMediator = (org.apache.synapse.mediators.spring.SpringMediator) mediator;
SpringMediator visualSpringMediator = (SpringMediator) DeserializerUtils.createNode(part,
EsbElementTypes.SpringMediator_3507);
setElementToEdit(visualSpringMediator);
setCommonProperties(springMediator, visualSpringMediator);
if (springMediator.getBeanName() != null && !springMediator.getBeanName().equals("")) {
executeSetValueCommand(SPRING_MEDIATOR__BEAN_NAME, springMediator.getBeanName());
}
// Setting ConfigKey
if (springMediator.getConfigKey() != null && !springMediator.getConfigKey().equals("")) {
RegistryKeyProperty regkey = EsbFactory.eINSTANCE.createRegistryKeyProperty();
regkey.setKeyValue(springMediator.getConfigKey());
// visualSpringMediator.setConfigurationKey(value)
executeSetValueCommand(SPRING_MEDIATOR__CONFIGURATION_KEY, regkey);
}
return visualSpringMediator;
}
}
|
/*
* Copyright 2008 The Closure Compiler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.javascript.jscomp;
import com.google.javascript.jscomp.CodingConvention.AssertionFunctionLookup;
import com.google.javascript.jscomp.NodeTraversal.AbstractPostOrderCallback;
import com.google.javascript.jscomp.NodeTraversal.Callback;
import com.google.javascript.rhino.Node;
import java.util.ArrayList;
import java.util.List;
/**
* <p>Compiler pass that removes Closure-specific code patterns.</p>
*
* <p>Currently does the following:</p>
*
* <ul>
* <li> Instead of setting abstract methods to a function that throws an
* informative error, this pass allows some binary size reduction by
* removing these methods altogether for production builds.</li>
* <li> Remove calls to assertion functions (like goog.asserts.assert).
* If the return value of the assertion function is used, then
* the first argument (the asserted value) will be directly inlined.
* Otherwise, the entire call will be removed. It is well-known that
* this is not provably safe, much like the equivalent assert
* statement in Java.</li>
* </ul>
*
* @author robbyw@google.com (Robby Walker)
*/
final class ClosureCodeRemoval implements CompilerPass {
/** Reference to the JS compiler */
private final AbstractCompiler compiler;
/** Name used to denote an abstract function */
static final String ABSTRACT_METHOD_NAME = "goog.abstractMethod";
private final boolean removeAbstractMethods;
private final boolean removeAssertionCalls;
/**
* List of names referenced in successive generations of finding referenced
* nodes.
*/
private final List<RemovableAssignment> abstractMethodAssignmentNodes =
new ArrayList<>();
/** List of member function definition nodes annotated with @abstract. */
private final List<Node> abstractMemberFunctionNodes = new ArrayList<>();
/**
* List of assertion functions.
*/
private final List<Node> assertionCalls = new ArrayList<>();
/**
* Utility class to track a node and its parent.
*/
private class RemovableAssignment {
/**
* The node
*/
final Node node;
/**
* Its parent
*/
final Node parent;
/**
* Full chain of ASSIGN ancestors
*/
final List<Node> assignAncestors = new ArrayList<>();
/**
* The last ancestor
*/
final Node lastAncestor;
/**
* Data structure for information about a removable assignment.
*
* @param nameNode The LHS
* @param assignNode The parent ASSIGN node
* @param traversal Access to further levels, assumed to start at 1
*/
public RemovableAssignment(Node nameNode, Node assignNode,
NodeTraversal traversal) {
this.node = nameNode;
this.parent = assignNode;
Node ancestor = assignNode;
do {
ancestor = ancestor.getParent();
assignAncestors.add(ancestor);
} while (ancestor.isAssign() &&
ancestor.getFirstChild().isQualifiedName());
lastAncestor = ancestor.getParent();
}
/**
* Remove this node.
*/
public void remove() {
Node rhs = node.getNext();
Node last = parent;
for (Node ancestor : assignAncestors) {
if (ancestor.isExprResult()) {
lastAncestor.removeChild(ancestor);
NodeUtil.markFunctionsDeleted(ancestor, compiler);
} else {
rhs.detach();
ancestor.replaceChild(last, rhs);
}
last = ancestor;
}
compiler.reportChangeToEnclosingScope(lastAncestor);
}
}
/**
* Identifies all assignments of the abstract method to a variable and all methods annotated with
* "@abstract" in their JSDoc.
*/
private class FindAbstractMethods extends AbstractPostOrderCallback {
@Override
public void visit(NodeTraversal t, Node n, Node parent) {
if (n.isAssign()) {
Node nameNode = n.getFirstChild();
Node valueNode = n.getLastChild();
if (nameNode.isQualifiedName() &&
valueNode.isQualifiedName() &&
valueNode.matchesQualifiedName(ABSTRACT_METHOD_NAME)) {
// Foo.prototype.bar = goog.abstractMethod
abstractMethodAssignmentNodes.add(
new RemovableAssignment(n.getFirstChild(), n, t));
} else if (n.getJSDocInfo() != null
&& n.getJSDocInfo().isAbstract()
&& !(n.getJSDocInfo().isConstructor() || valueNode.isClass())) {
// @abstract
abstractMethodAssignmentNodes.add(
new RemovableAssignment(n.getFirstChild(), n, t));
}
} else if (n.isMemberFunctionDef() && parent.isClassMembers()) {
if (n.getJSDocInfo() != null && n.getJSDocInfo().isAbstract()) {
abstractMemberFunctionNodes.add(n);
}
}
}
}
/**
* Identifies all assertion calls.
*/
private class FindAssertionCalls extends AbstractPostOrderCallback {
final AssertionFunctionLookup assertionNames;
FindAssertionCalls() {
assertionNames =
AssertionFunctionLookup.of(compiler.getCodingConvention().getAssertionFunctions());
}
@Override
public void visit(NodeTraversal t, Node n, Node parent) {
if (n.isCall() && assertionNames.lookupByCallee(n.getFirstChild()) != null) {
assertionCalls.add(n);
}
}
}
/**
* Creates a Closure code remover.
*
* @param compiler The AbstractCompiler
* @param removeAbstractMethods Remove declarations of abstract methods.
* @param removeAssertionCalls Remove calls to goog.assert functions.
*/
ClosureCodeRemoval(AbstractCompiler compiler, boolean removeAbstractMethods,
boolean removeAssertionCalls) {
this.compiler = compiler;
this.removeAbstractMethods = removeAbstractMethods;
this.removeAssertionCalls = removeAssertionCalls;
}
@Override
public void process(Node externs, Node root) {
List<Callback> passes = new ArrayList<>();
if (removeAbstractMethods) {
passes.add(new FindAbstractMethods());
}
if (removeAssertionCalls) {
passes.add(new FindAssertionCalls());
}
CombinedCompilerPass.traverse(compiler, root, passes);
for (RemovableAssignment assignment : abstractMethodAssignmentNodes) {
assignment.remove();
}
for (Node memberFunction : abstractMemberFunctionNodes) {
compiler.reportFunctionDeleted(memberFunction.getFirstChild());
Node parent = memberFunction.getParent();
parent.removeChild(memberFunction);
compiler.reportChangeToEnclosingScope(parent);
}
for (Node call : assertionCalls) {
// If the assertion is an expression, just strip the whole thing.
compiler.reportChangeToEnclosingScope(call);
Node parent = call.getParent();
if (parent.isExprResult()) {
parent.detach();
NodeUtil.markFunctionsDeleted(parent, compiler);
} else {
// Otherwise, replace the assertion with its first argument,
// which is the return value of the assertion.
Node firstArg = call.getSecondChild();
if (firstArg == null) {
parent.replaceChild(call, NodeUtil.newUndefinedNode(call));
} else {
Node replacement = firstArg.detach();
replacement.setJSType(call.getJSType());
parent.replaceChild(call, replacement);
}
NodeUtil.markFunctionsDeleted(call, compiler);
}
}
}
}
|
/*
* Copyright 2000-2018 Vaadin Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.vaadin.client;
import com.google.gwt.core.client.GWT;
import com.google.gwt.core.client.JavaScriptObject;
import com.vaadin.client.flow.dom.DomApi;
import elemental.client.Browser;
import elemental.dom.Element;
import elemental.html.AnchorElement;
import elemental.json.JsonObject;
import elemental.json.JsonValue;
/**
* Utility methods which are related to client side code only.
*/
public class WidgetUtil {
/**
* Refreshes the browser.
*/
public static void refresh() {
redirect(null);
}
/**
* Redirects the browser to the given url or refreshes the page if url is
* null
*
* @param url
* The url to redirect to or null to refresh
*/
public static native void redirect(String url)
/*-{
if (url) {
$wnd.location = url;
} else {
$wnd.location.reload(false);
}
}-*/;
/**
* Resolve a relative URL to an absolute URL based on the current document's
* location.
*
* @param url
* a string with the relative URL to resolve
* @return the corresponding absolute URL as a string
*/
public static String getAbsoluteUrl(String url) {
AnchorElement a = (AnchorElement) Browser.getDocument()
.createElement("a");
a.setHref(url);
return a.getHref();
}
/**
* Anything in, anything out. It's JavaScript after all. This method just
* makes the Java compiler accept the fact.
*
* @param value
* anything
* @param <T>
* the object type
* @return the same stuff
*/
public static native <T> T crazyJsCast(Object value)
/*-{
return value;
}-*/;
/**
* Anything in, JSO out. It's JavaScript after all. This method just makes
* the Java compiler accept the fact. The regular crazy cast doesn't work
* for JSOs since the generics still makes the compiler insert a JSO check.
*
* @param value
* anything
* @param <T>
* the object type
* @return the same stuff
*/
public static native <T extends JavaScriptObject> T crazyJsoCast(
Object value)
/*-{
return value;
}-*/;
/**
* Converts a JSON value to a formatted string.
*
*
* @param json
* the JSON value to stringify
* @return the JSON string
*/
public static String toPrettyJson(JsonValue json) {
if (GWT.isScript()) {
return toPrettyJsonJsni(json);
} else {
// Don't use JsonUtil.stringify here or SDM will break
return json.toJson();
}
}
/**
* Updates the {@code attribute} value for the {@code element} to the given
* {@code value}.
* <p>
* If {@code value} is {@code null} then {@code attribute} is removed,
* otherwise {@code value.toString()} is set as its value.
*
* @param element
* the DOM element owning attribute
* @param attribute
* the attribute to update
* @param value
* the value to update
*/
public static void updateAttribute(Element element, String attribute,
Object value) {
if (value == null) {
DomApi.wrap(element).removeAttribute(attribute);
} else {
DomApi.wrap(element).setAttribute(attribute, value.toString());
}
}
// JsJsonValue.toJson with indentation set to 4
private static native String toPrettyJsonJsni(JsonValue value)
/*-{
// skip hashCode field
return $wnd.JSON.stringify(value, function(keyName, value) {
if (keyName == "$H") {
return undefined; // skip hashCode property
}
return value;
}, 4);
}-*/;
/**
* Assigns a value as JavaScript property of an object.
*
*
* @param object
* the target object
* @param name
* the property name
* @param value
* the property value
*/
public static native void setJsProperty(Object object, String name,
Object value)
/*-{
object[name] = value;
}-*/;
/**
* Retrieves the value of a JavaScript property.
*
* @param object
* the target object
* @param name
* the property name
* @return the value
*/
public static native Object getJsProperty(Object object, String name)
/*-{
return object[name];
}-*/;
/**
* Checks whether the provided object itself has a JavaScript property with
* the given name. Inherited properties are not taken into account.
*
* @see #hasJsProperty(Object, String)
*
* @param object
* the target object
* @param name
* the name of the property
* @return <code>true</code> if the object itself has the named property;
* <code>false</code> if it doesn't have the property of if the
* property is inherited
*/
public static native boolean hasOwnJsProperty(Object object, String name)
/*-{
return Object.prototype.hasOwnProperty.call(object, name);
}-*/;
/**
* Checks whether the provided object has or inherits a JavaScript property
* with the given name.
*
* @see #hasOwnJsProperty(Object, String)
*
* @param object
* the target object
* @param name
* the name of the property
* @return <code>true</code> if the object itself has or inherits the named
* property; <code>false</code> otherwise
*/
public static native boolean hasJsProperty(Object object, String name)
/*-{
return name in object;
}-*/;
/**
* Checks if the given value is explicitly undefined. <code>null</code>
* values returns <code>false</code>.
*
* @param property
* the value to be verified
* @return <code>true</code> is the value is explicitly undefined,
* <code>false</code> otherwise
*/
public static native boolean isUndefined(Object property)
/*-{
return property === undefined;
}-*/;
/**
* Removes a JavaScript property from an object.
*
* @param object
* the object from which to remove the property
* @param name
* the name of the property to remove
*/
public static native void deleteJsProperty(Object object, String name)
/*-{
delete object[name];
}-*/;
/**
* Creates a new {@link JsonObject} without any JavaScript prototype at all.
* Not having any prototype is only relevant for objects that are displayed
* through the browser console.
*
*
* @return a new json object
*/
public static native JsonObject createJsonObjectWithoutPrototype()
/*-{
return $wnd.Object.create(null);
}-*/;
/**
* Creates a new {@link JsonObject} with the JavaScript prototype.
*
* @return a new json object
*/
public static native JsonObject createJsonObject()
/*-{
return {};
}-*/;
/**
* Gets the boolean value of the provided value based on JavaScript
* semantics.
*
* @param value
* the value to check for truthness
* @return <code>true</code> if the provided value is trueish according to
* JavaScript semantics, otherwise <code>false</code>
*/
public static native boolean isTrueish(Object value)
/*-{
return !!value;
}-*/;
/**
* Gets all JavaScript property names of the given object. This directly
* calls <code>Object.keys</code>.
*
* @param value
* the value to get keys for
* @return an array of key names
*/
public static native String[] getKeys(Object value)
/*-{
return Object.keys(value);
}-*/;
/**
* When serializing the JsonObject we check the values for dom nodes and
* throw and exception if one is found as they should not be synced and may
* create cyclic dependencies.
*
* @param payload
* JsonObject to stringify
* @return json string of given object
*/
public static native String stringify(JsonObject payload) /*-{
return JSON.stringify(payload, function(key, value) {
if(value instanceof Node){
throw "Message JsonObject contained a dom node reference which " +
"should not be sent to the server and can cause a cyclic dependecy.";
}
return value;
});
}-*/;
}
|
package cn.jsfund.devtools.sys.mapper;
import cn.jsfund.devtools.sys.entity.SysDept;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
/**
* <p>
* 部门表 Mapper 接口
* </p>
*
* @author hutx
* @since 2019-01-21
*/
public interface SysDeptMapper extends BaseMapper<SysDept> {
}
|
/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
~ Copyright 2019 Adobe
~
~ Licensed under the Apache License, Version 2.0 (the "License");
~ you may not use this file except in compliance with the License.
~ You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
package com.adobe.cq.wcm.core.components.internal.servlets.embed;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import org.apache.sling.api.SlingHttpServletRequest;
import org.apache.sling.api.resource.Resource;
import org.apache.sling.api.resource.ResourceResolver;
import org.apache.sling.api.resource.ValueMap;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.extension.ExtendWith;
import com.adobe.cq.wcm.core.components.context.CoreComponentTestContext;
import com.adobe.cq.wcm.core.components.internal.servlets.TextValueDataResourceSource;
import com.adobe.cq.wcm.core.components.internal.servlets.embed.EmbeddablesDataSourceServlet.EmbeddableDescription;
import com.adobe.cq.wcm.core.components.internal.servlets.embed.EmbeddablesDataSourceServlet.EmbeddableDataResourceSource;
import com.adobe.granite.ui.components.ds.DataSource;
import com.adobe.granite.ui.components.ds.SimpleDataSource;
import io.wcm.testing.mock.aem.junit5.AemContext;
import io.wcm.testing.mock.aem.junit5.AemContextExtension;
import org.mockito.Mockito;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
@ExtendWith(AemContextExtension.class)
public class EmbeddablesDataSourceServletTest {
private static final String TEST_BASE = "/embed/v1/datasources/embeddables";
private static final String APPS_ROOT = "/apps";
public final AemContext context = CoreComponentTestContext.newAemContext();
private SlingHttpServletRequest request;
List<Resource> embeddableResources = new ArrayList<>();
@BeforeEach
public void setUp() {
context.load().json(TEST_BASE + CoreComponentTestContext.TEST_CONTENT_JSON, APPS_ROOT);
Resource embeddable1 = Objects.requireNonNull(context.resourceResolver().getResource("/apps/my-app/youtube"));
Resource embeddable2 = Objects.requireNonNull(context.resourceResolver().getResource("/apps/my-app/chatbot"));
Resource embeddable3 = Objects.requireNonNull(context.resourceResolver().getResource("/apps/my-app/social"));
embeddableResources.add(embeddable1);
embeddableResources.add(embeddable2);
embeddableResources.add(embeddable3);
request = Mockito.spy(context.request());
ResourceResolver resolver = Mockito.spy(context.resourceResolver());
when(request.getResourceResolver()).thenReturn(resolver);
final String rt = embeddable1.getPath().substring("/apps".length() + 1);
List<Resource> outputResources = new ArrayList<>();
outputResources.add(new EmbeddableDataResourceSource(
new EmbeddableDescription(rt, embeddable1.getName(), embeddable1.getValueMap()), resolver));
outputResources.add(new EmbeddableDataResourceSource(
new EmbeddableDescription(rt, embeddable2.getName(), embeddable2.getValueMap()), resolver));
context.request().setAttribute(DataSource.class.getName(), new SimpleDataSource(outputResources.iterator()));
when(resolver.findResources(any(), any())).thenReturn(embeddableResources.iterator());
when(resolver.getSearchPath()).thenReturn(context.resourceResolver().getSearchPath());
}
@Test
public void testEmbeddablesDataSourceServlet() {
context.currentResource("/apps/embeddablesdatasource");
EmbeddablesDataSourceServlet dataSourceServlet = new EmbeddablesDataSourceServlet();
dataSourceServlet.doGet(request, context.response());
DataSource dataSource = (com.adobe.granite.ui.components.ds.DataSource) request.getAttribute(DataSource.class
.getName());
assertNotNull(dataSource);
Resource resource = dataSource.iterator().next();
ValueMap valueMap = resource.getValueMap();
assertEquals("YouTube", valueMap.get(TextValueDataResourceSource.PN_TEXT, String.class));
assertEquals("my-app/youtube", valueMap.get(TextValueDataResourceSource.PN_VALUE, String.class));
EmbeddableDescription embed1 = new EmbeddableDescription(null, embeddableResources.get(1).getName(),
embeddableResources.get(1).getValueMap());
EmbeddableDescription embed2 = new EmbeddableDescription(null, embeddableResources.get(0).getName(),
embeddableResources.get(0).getValueMap());
assertNotEquals(embed2, embed1);
assertNotNull(embed1.hashCode());
}
}
|
/*
* Copyright 2012-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example;
import java.io.File;
import java.lang.management.ManagementFactory;
/**
* Very basic application used for testing {@code BootRun}.
*
* @author Andy Wilkinson
*/
public class BootRunApplication {
protected BootRunApplication() {
}
public static void main(String[] args) {
int i = 1;
for (String entry : ManagementFactory.getRuntimeMXBean().getClassPath().split(File.pathSeparator)) {
System.out.println(i++ + ". " + entry);
}
}
}
|
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.translate.model.transform;
import java.math.*;
import javax.annotation.Generated;
import com.amazonaws.services.translate.model.*;
import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*;
import com.amazonaws.transform.*;
import com.fasterxml.jackson.core.JsonToken;
import static com.fasterxml.jackson.core.JsonToken.*;
/**
* EncryptionKey JSON Unmarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class EncryptionKeyJsonUnmarshaller implements Unmarshaller<EncryptionKey, JsonUnmarshallerContext> {
public EncryptionKey unmarshall(JsonUnmarshallerContext context) throws Exception {
EncryptionKey encryptionKey = new EncryptionKey();
int originalDepth = context.getCurrentDepth();
String currentParentElement = context.getCurrentParentElement();
int targetDepth = originalDepth + 1;
JsonToken token = context.getCurrentToken();
if (token == null)
token = context.nextToken();
if (token == VALUE_NULL) {
return null;
}
while (true) {
if (token == null)
break;
if (token == FIELD_NAME || token == START_OBJECT) {
if (context.testExpression("Type", targetDepth)) {
context.nextToken();
encryptionKey.setType(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("Id", targetDepth)) {
context.nextToken();
encryptionKey.setId(context.getUnmarshaller(String.class).unmarshall(context));
}
} else if (token == END_ARRAY || token == END_OBJECT) {
if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) {
if (context.getCurrentDepth() <= originalDepth)
break;
}
}
token = context.nextToken();
}
return encryptionKey;
}
private static EncryptionKeyJsonUnmarshaller instance;
public static EncryptionKeyJsonUnmarshaller getInstance() {
if (instance == null)
instance = new EncryptionKeyJsonUnmarshaller();
return instance;
}
}
|
/*
* Copyright 2014 TWO SIGMA OPEN SOURCE, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twosigma.beakerx.jvm.serialization;
import com.twosigma.beakerx.CodeCell;
import com.twosigma.beakerx.chart.Color;
import com.twosigma.beakerx.easyform.EasyForm;
import com.twosigma.beakerx.jvm.object.BeakerDashboard;
import com.twosigma.beakerx.jvm.object.CyclingOutputContainerLayoutManager;
import com.twosigma.beakerx.jvm.object.EvaluationResult;
import com.twosigma.beakerx.jvm.object.GridOutputContainerLayoutManager;
import com.twosigma.beakerx.jvm.object.OutputContainer;
import com.twosigma.beakerx.jvm.object.OutputContainerCell;
import com.twosigma.beakerx.jvm.object.TabbedOutputContainerLayoutManager;
import com.twosigma.beakerx.table.TableDisplay;
import com.twosigma.beakerx.jvm.object.UpdatableEvaluationResult;
import com.twosigma.beakerx.jvm.object.DashboardLayoutManager;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.swing.*;
import java.awt.image.BufferedImage;
import java.io.IOException;
import java.lang.reflect.Array;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
public class BasicObjectSerializer implements BeakerObjectConverter {
public static final String TYPE_INTEGER = "integer";
public static final String TYPE_LONG = "int64";
public static final String TYPE_BIGINT = "bigint";
public static final String TYPE_DOUBLE = "double";
public static final String TYPE_STRING = "string";
public static final String TYPE_BOOLEAN = "boolean";
public static final String TYPE_TIME = "time";
public static final String TYPE_SELECT = "select";
private final static Logger logger = LoggerFactory.getLogger(BasicObjectSerializer.class.getName());
protected final Map<String, String> types;
protected final List<String> knownBeakerTypes;
protected final List<ObjectDeserializer> supportedDeserializers;
protected final List<ObjectSerializer> supportedSerializers;
protected final ThreadLocal<Map<String, String>> threadTypes;
protected final ThreadLocal<List<ObjectDeserializer>> threadDeserializers;
protected final ThreadLocal<List<ObjectSerializer>> threadSerializers;
protected boolean isListOfPrimitiveTypeMaps(Object o) {
if (!(o instanceof Collection<?>))
return false;
Collection<?> c = (Collection<?>) o;
if (c.isEmpty())
return false;
for (Object obj : c) {
if (obj != null && !isPrimitiveTypeMap(obj)) {
return false;
}
}
return true;
}
protected boolean isPrimitiveTypeMap(Object o) {
if (!(o instanceof Map<?, ?>))
return false;
Map<?, ?> m = (Map<?, ?>) o;
Set<?> eset = m.entrySet();
for (Object entry : eset) {
Entry<?, ?> e = (Entry<?, ?>) entry;
if (e.getValue() != null && !isPrimitiveType(e.getValue().getClass().getName()))
return false;
}
return true;
}
protected boolean isPrimitiveTypeListOfList(Object o) {
if (!(o instanceof Collection<?>))
return false;
Collection<?> m = (Collection<?>) o;
int max = 0;
for (Object entry : m) {
if (!(entry instanceof Collection<?>))
return false;
Collection<?> e = (Collection<?>) entry;
for (Object ei : e) {
if (ei != null && !isPrimitiveType(ei.getClass().getName()))
return false;
}
if (max < e.size())
max = e.size();
}
return max >= 2 && m.size() >= 2;
}
public BasicObjectSerializer() {
types = new HashMap<String, String>();
threadDeserializers = new ThreadLocal<List<ObjectDeserializer>>();
threadSerializers = new ThreadLocal<List<ObjectSerializer>>();
supportedDeserializers = new ArrayList<ObjectDeserializer>();
supportedSerializers = new ArrayList<ObjectSerializer>();
threadTypes = new ThreadLocal<Map<String, String>>();
knownBeakerTypes = new ArrayList<String>();
addTypeConversion("java.lang.Boolean", TYPE_BOOLEAN);
addTypeConversion("java.lang.Byte", TYPE_INTEGER);
addTypeConversion("java.lang.Character", TYPE_STRING);
addTypeConversion("java.lang.Double", TYPE_DOUBLE);
addTypeConversion("java.lang.Enum", TYPE_SELECT);
addTypeConversion("java.lang.Float", TYPE_DOUBLE);
addTypeConversion("java.lang.Integer", TYPE_INTEGER);
addTypeConversion("java.lang.Long", TYPE_LONG);
addTypeConversion("java.lang.Short", TYPE_INTEGER);
addTypeConversion("java.lang.String", TYPE_STRING);
addTypeConversion("java.lang.StringBuffer", TYPE_STRING);
addTypeConversion("java.lang.StringBuilder", TYPE_STRING);
addTypeConversion("java.util.Date", TYPE_TIME);
addTypeConversion("java.util.concurrent.atomic.AtomicInteger", TYPE_INTEGER);
addTypeConversion("java.util.concurrent.atomic.AtomicLong", TYPE_INTEGER);
addTypeConversion("java.math.BigDecimal", TYPE_DOUBLE);
addTypeConversion("java.math.BigInteger", TYPE_BIGINT);
addTypeConversion("org.codehaus.groovy.runtime.GStringImpl", TYPE_STRING);
addTypeSerializer(new PrimitiveTypeSerializer());
addTypeSerializer(new ListOfPrimitiveTypeMapsSerializer(this));
addTypeSerializer(new PrimitiveTypeListOfListSerializer());
addTypeSerializer(new PrimitiveTypeMapSerializer());
addTypeSerializer(new ArraySerializer(this));
addTypeSerializer(new CollectionSerializer(this));
addTypeSerializer(new MapSerializer(this));
}
@Override
public String convertType(String tn) {
if (threadTypes.get()!=null && threadTypes.get().containsKey(tn))
return threadTypes.get().get(tn);
if (types.containsKey(tn))
return types.get(tn);
return "";
}
@Override
public boolean isPrimitiveType(String tn) {
return types.containsKey(tn) || (threadTypes.get()!=null && threadTypes.get().containsKey(tn));
}
@Override
public boolean writeObject(Object obj, JsonGenerator jgen, boolean expand)
throws IOException {
if (obj == null) {
jgen.writeNull();
} else if ((obj instanceof TableDisplay) ||
(obj instanceof EvaluationResult) ||
(obj instanceof UpdatableEvaluationResult) ||
(obj instanceof CodeCell) ||
(obj instanceof ImageIcon) ||
(obj instanceof Date) ||
(obj instanceof BeakerDashboard) ||
(obj instanceof BufferedImage) ||
(obj instanceof TabbedOutputContainerLayoutManager) ||
(obj instanceof GridOutputContainerLayoutManager) ||
(obj instanceof CyclingOutputContainerLayoutManager) ||
(obj instanceof DashboardLayoutManager) ||
(obj instanceof OutputContainerCell) ||
(obj instanceof OutputContainer) ||
(obj instanceof EasyForm) ||
(obj instanceof Color)) {
logger.debug("basic object");
jgen.writeObject(obj);
} else
return runThreadSerializers(obj, jgen, expand) || runConfiguredSerializers(obj,
jgen,
expand);
return true;
}
public boolean runThreadSerializers(Object obj, JsonGenerator jgen, boolean expand) throws IOException, JsonProcessingException {
if (threadSerializers.get() == null)
return false;
for (ObjectSerializer s : threadSerializers.get()) {
try {
if (s.canBeUsed(obj, expand) && s.writeObject(obj, jgen, expand)) {
logger.debug("used thread serialization");
return true;
}
} catch (Exception e) {
logger.error("exception in thread serialization", e);
}
}
return false;
}
public boolean runConfiguredSerializers(Object obj, JsonGenerator jgen, boolean expand) throws IOException, JsonProcessingException {
for (ObjectSerializer s : supportedSerializers) {
if (s.canBeUsed(obj, expand) && s.writeObject(obj, jgen, expand))
return true;
}
return false;
}
@Override
public Object deserialize(JsonNode n, ObjectMapper mapper) {
if (n==null)
return null;
Object obj = null;
if(threadDeserializers.get()!=null) {
for (ObjectDeserializer d : threadDeserializers.get()) {
try {
if (d.canBeUsed(n)) {
obj = d.deserialize(n, mapper);
if (obj != null) {
logger.debug("used thread deserialization");
break;
}
}
} catch (Exception e) {
logger.error("exception in thread deserialization",e);
obj = null;
}
}
}
if (obj!=null)
return obj;
for (ObjectDeserializer d : supportedDeserializers) {
try {
if (d.canBeUsed(n)) {
obj = d.deserialize(n, mapper);
if (obj != null) {
logger.debug("used custom deserialization");
break;
}
}
} catch (Exception e) {
logger.error("exception in deserialization",e);
obj = null;
}
}
if (obj==null) {
logger.debug("using standard deserialization");
try {
obj = mapper.readValue(n.asText(), Object.class);
} catch (Exception e) {
logger.error("exception in auto deserialization",e);
obj = null;
}
}
return obj;
}
/*
* (non-Javadoc)
* These implement module behavior modification
*/
@Override
public void addTypeConversion(String from, String to) {
types.put(from,to);
}
@Override
public void addTypeDeserializer(ObjectDeserializer o) {
supportedDeserializers.add(o);
}
@Override
public void addTypeSerializer(ObjectSerializer o) {
supportedSerializers.add(o);
}
@Override
public void addfTypeDeserializer(ObjectDeserializer o) {
supportedDeserializers.add(0,o);
}
@Override
public void addfTypeSerializer(ObjectSerializer o) {
supportedSerializers.add(0,o);
}
/*
* (non-Javadoc)
* These implement thread specific module behavior modification
*/
@Override
public void addThreadSpecificTypeConversion(String from, String to) {
if (threadTypes.get()==null)
threadTypes.set(new HashMap<String,String>());
threadTypes.get().put(from, to);
}
@Override
public void addThreadSpecificTypeDeserializer(ObjectDeserializer o) {
if (threadDeserializers.get()==null)
threadDeserializers.set(new ArrayList<ObjectDeserializer>());
threadDeserializers.get().add(o);
}
@Override
public void addThreadSpecificTypeSerializer(ObjectSerializer o) {
if (threadSerializers.get()==null)
threadSerializers.set(new ArrayList<ObjectSerializer>());
threadSerializers.get().add(o);
}
/*
* (non-Javadoc)
* These are the default auto-transforming serializers
*/
class PrimitiveTypeSerializer implements ObjectSerializer {
@Override
public boolean canBeUsed(Object obj, boolean expand) {
return isPrimitiveType(obj.getClass().getName());
}
@Override
public boolean writeObject(Object obj, JsonGenerator jgen, boolean expand) throws JsonProcessingException, IOException {
jgen.writeObject(obj);
return true;
}
}
class ListOfPrimitiveTypeMapsSerializer implements ObjectSerializer {
private final BasicObjectSerializer parent;
public ListOfPrimitiveTypeMapsSerializer(BasicObjectSerializer p) {
parent = p;
}
@Override
public boolean canBeUsed(Object obj, boolean expand) {
return expand && isListOfPrimitiveTypeMaps(obj);
}
@Override
public boolean writeObject(Object obj, JsonGenerator jgen, boolean expand) throws JsonProcessingException, IOException {
logger.debug("list of maps");
try {
// convert this 'on the fly' to a datatable
@SuppressWarnings("unchecked")
Collection<Map<String, Object>> co = (Collection<Map<String, Object>>) obj;
TableDisplay t = new TableDisplay(co,parent);
jgen.writeObject(t);
return true;
} catch(Exception e) {
return false;
}
}
}
class PrimitiveTypeListOfListSerializer implements ObjectSerializer {
@Override
public boolean canBeUsed(Object obj, boolean expand) {
return expand && isPrimitiveTypeListOfList(obj);
}
@Override
public boolean writeObject(Object obj, JsonGenerator jgen, boolean expand) throws JsonProcessingException, IOException {
logger.debug("collection of collections");
Collection<?> m = (Collection<?>) obj;
int max = 0;
for (Object entry : m) {
Collection<?> e = (Collection<?>) entry;
if (max < e.size())
max = e.size();
}
List<String> columns = new ArrayList<String>();
for (int i=0; i<max; i++)
columns.add("c"+i);
List<List<?>> values = new ArrayList<List<?>>();
for (Object entry : m) {
Collection<?> e = (Collection<?>) entry;
List<Object> l2 = new ArrayList<Object>(e);
if (l2.size() < max) {
for (int i=l2.size(); i<max; i++)
l2.add(null);
}
values.add(l2);
}
jgen.writeStartObject();
jgen.writeObjectField("type", "TableDisplay");
jgen.writeObjectField("columnNames", columns);
jgen.writeObjectField("values", values);
jgen.writeObjectField("subtype", TableDisplay.MATRIX_SUBTYPE);
jgen.writeEndObject();
return true;
}
}
class PrimitiveTypeMapSerializer implements ObjectSerializer {
@Override
public boolean canBeUsed(Object obj, boolean expand) {
return expand && isPrimitiveTypeMap(obj);
}
@Override
public boolean writeObject(Object obj, JsonGenerator jgen, boolean expand) throws JsonProcessingException, IOException {
logger.debug("primitive type map");
Map<?,?> m = (Map<?,?>) obj;
List<String> columns = new ArrayList<String>();
columns.add("Key");
columns.add("Value");
List<List<?>> values = new ArrayList<List<?>>();
Set<?> eset = m.entrySet();
for (Object entry : eset) {
Entry<?,?> e = (Entry<?, ?>) entry;
List<Object> l = new ArrayList<Object>();
Object o = e.getKey();
l.add(null==o?"null":o.toString());
l.add(e.getValue());
values.add(l);
}
jgen.writeStartObject();
jgen.writeObjectField("type", "TableDisplay");
jgen.writeObjectField("columnNames", columns);
jgen.writeObjectField("values", values);
jgen.writeObjectField("subtype", TableDisplay.DICTIONARY_SUBTYPE);
jgen.writeEndObject();
return true;
}
}
class ArraySerializer implements ObjectSerializer {
private final BasicObjectSerializer parent;
public ArraySerializer(BasicObjectSerializer p) {
parent = p;
}
@Override
public boolean canBeUsed(Object obj, boolean expand) {
return obj.getClass().isArray();
}
@Override
public boolean writeObject(Object obj, JsonGenerator jgen, boolean expand) throws JsonProcessingException, IOException {
logger.debug("array");
// write out an array of objects.
jgen.writeStartArray();
final int length = Array.getLength(obj);
for (int i = 0; i < length; ++i) {
Object o = Array.get(obj, i);
if (!parent.writeObject(o, jgen, false)) {
jgen.writeObject(o.toString());
}
}
jgen.writeEndArray();
return true;
}
}
class CollectionSerializer implements ObjectSerializer {
private final BasicObjectSerializer parent;
public CollectionSerializer(BasicObjectSerializer p) {
parent = p;
}
@Override
public boolean canBeUsed(Object obj, boolean expand) {
return obj instanceof Collection<?>;
}
@Override
public boolean writeObject(Object obj, JsonGenerator jgen, boolean expand) throws JsonProcessingException, IOException {
logger.debug("collection");
// convert this 'on the fly' to an array of objects
Collection<?> c = (Collection<?>) obj;
jgen.writeStartArray();
for(Object o : c) {
if (!parent.writeObject(o, jgen, false))
jgen.writeObject(o.toString());
}
jgen.writeEndArray();
return true;
}
}
class MapSerializer implements ObjectSerializer {
private final BasicObjectSerializer parent;
public MapSerializer(BasicObjectSerializer p) {
parent = p;
}
@Override
public boolean canBeUsed(Object obj, boolean expand) {
return obj instanceof Map<?,?>;
}
@Override
public boolean writeObject(Object obj, JsonGenerator jgen, boolean expand) throws JsonProcessingException, IOException {
logger.debug("generic map");
// convert this 'on the fly' to a map of objects
Map<?,?> m = (Map<?,?>) obj;
Set<?> kset = m.keySet();
if (kset.size()==0 || !(kset.iterator().next() instanceof String))
jgen.writeObject(obj.toString());
else {
jgen.writeStartObject();
for (Object k : kset) {
jgen.writeFieldName((null==k)?"null":k.toString());
if (!parent.writeObject(m.get(k), jgen, false))
jgen.writeObject(m.get(k)!=null ? (m.get(k).toString()) : "null");
}
jgen.writeEndObject();
}
return true;
}
}
@Override
public void addKnownBeakerType(String t) {
knownBeakerTypes.add(t);
}
@Override
public boolean isKnownBeakerType(String t) {
return knownBeakerTypes.contains(t);
}
}
|
package org.beanpod.switchboard.repository;
import java.util.List;
import org.beanpod.switchboard.entity.LogEntity;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.data.jpa.repository.Query;
import org.springframework.data.repository.query.Param;
import org.springframework.stereotype.Repository;
@Repository
public interface LogRepository extends JpaRepository<LogEntity, Long> {
LogEntity save(LogEntity logEntity);
@Query(
value =
"select lo.id, lo.level, lo.message, lo.serial_number, lo.date_time "
+ "from log lo, user us, device de "
+ "where us.id = de.user_id and us.id = :id "
+ "and de.serial_number = lo.serial_number",
nativeQuery = true)
List<LogEntity> findAll(@Param("id") Long userId);
@Query(
value =
"select lo.id, lo.level, lo.message, lo.serial_number, lo.date_time "
+ "from log lo, user us, device de "
+ "where us.id = de.user_id and us.id = :id "
+ "and de.serial_number = lo.serial_number and lo.serial_number= :serial",
nativeQuery = true)
List<LogEntity> findBySerialNumber(
@Param("serial") String serialNumber, @Param("id") Long userId);
}
|
package uk.ac.ucl.eidp.auth;
import uk.ac.ucl.eidp.auth.model.UserE;
import javax.ejb.Stateless;
import javax.persistence.EntityManager;
import javax.persistence.PersistenceContext;
import javax.persistence.TypedQuery;
/**
*
* @author David Guzman {@literal d.guzman at ucl.ac.uk}
*/
@Stateless
public class UserController {
@PersistenceContext(unitName = "eidpauthPU")
EntityManager em;
/**
* Searches for a specific user and password combination.
* @param login User id
* @param password User credential
* @return Entity containing information about the authenticated user
*/
public UserE findUser(String login, String password) {
TypedQuery<UserE> typedQuery = em.createNamedQuery(UserE.FIND_BY_LOGIN_PASSWORD, UserE.class);
typedQuery.setParameter("login", login);
typedQuery.setParameter("password", password);
return typedQuery.getSingleResult();
}
public void save(UserE user) {
}
public UserE findByUsernameAndAuthToken(String authId, String authToken) {
//TODO
return null;
}
}
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.example.camel.transport;
import org.apache.hello_world_soap_http.Greeter;
import org.apache.hello_world_soap_http.PingMeFault;
import org.apache.hello_world_soap_http.types.FaultDetail;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@javax.jws.WebService(portName = "CamelPort", serviceName = "CamelService",
targetNamespace = "http://apache.org/hello_world_soap_http",
endpointInterface = "org.apache.hello_world_soap_http.Greeter")
public class GreeterImpl implements Greeter {
private static final Logger LOG = LoggerFactory.getLogger(GreeterImpl.class);
private String suffix;
public GreeterImpl() {
}
public void setSuffix(String suffix) {
this.suffix = suffix;
}
/* (non-Javadoc)
* @see org.apache.hello_world_soap_http.Greeter#greetMe(java.lang.String)
*/
public String greetMe(String me) {
LOG.info("Executing operation greetMe");
System.out.println("Executing operation greetMe");
System.out.println("Message received: " + me + "\n");
return "Hello " + me + " from " + suffix;
}
/* (non-Javadoc)
* @see org.apache.hello_world_soap_http.Greeter#greetMeOneWay(java.lang.String)
*/
public void greetMeOneWay(String me) {
LOG.info("Executing operation greetMeOneWay");
System.out.println("Executing operation greetMeOneWay\n");
System.out.println("Hello there " + me);
}
/* (non-Javadoc)
* @see org.apache.hello_world_soap_http.Greeter#sayHi()
*/
public String sayHi() {
LOG.info("Executing operation sayHi");
System.out.println("Executing operation sayHi\n");
return "Bonjour from " + suffix;
}
public void pingMe(String messageIn) throws PingMeFault {
FaultDetail faultDetail = new FaultDetail();
faultDetail.setMajor((short)2);
faultDetail.setMinor((short)1);
LOG.info("Executing operation pingMe, throwing PingMeFault exception, message = "
+ messageIn);
System.out.println("Executing operation pingMe, throwing PingMeFault exception\n");
throw new PingMeFault("PingMeFault raised by server " + suffix, faultDetail);
}
}
|
package mars.venus;
import mars.*;
import java.awt.*;
import java.awt.event.*;
import javax.swing.*;
import java.io.*;
import java.awt.print.*;
import java.util.*;
/*
Copyright (c) 2003-2006, Pete Sanderson and Kenneth Vollmar
Developed by Pete Sanderson (psanderson@otterbein.edu)
and Kenneth Vollmar (kenvollmar@missouristate.edu)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject
to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
(MIT license, http://www.opensource.org/licenses/mit-license.html)
*/
/**
* Action for the File -> Print menu item
*/
public class FilePrintAction extends GuiAction {
public FilePrintAction(String name, Icon icon, String descrip,
Integer mnemonic, KeyStroke accel, VenusUI gui) {
super(name, icon, descrip, mnemonic, accel, gui);
}
/**
* Uses the HardcopyWriter class developed by David Flanagan for the book
* "Java Examples in a Nutshell". It will do basic printing of multipage
* text documents. It displays a print dialog but does not act on any
* changes the user may have specified there, such as number of copies.
*
* @param e component triggering this call
*/
public void actionPerformed(ActionEvent e) {
EditPane editPane = mainUI.getMainPane().getEditPane();
if (editPane == null) return;
int fontsize = 10; // fixed at 10 point
double margins = .5; // all margins (left,right,top,bottom) fixed at .5"
HardcopyWriter out;
try {
out = new HardcopyWriter(mainUI, editPane.getFilename(),
fontsize, margins, margins, margins, margins);
} catch (HardcopyWriter.PrintCanceledException pce) {
return;
}
BufferedReader in = new BufferedReader(new StringReader(editPane.getSource()));
int lineNumberDigits = new Integer(editPane.getSourceLineCount()).toString().length();
String line;
String lineNumberString = "";
int lineNumber = 0;
int numchars;
try {
line = in.readLine();
while (line != null) {
if (editPane.showingLineNumbers()) {
lineNumber++;
lineNumberString = new Integer(lineNumber).toString() + ": ";
while (lineNumberString.length() < lineNumberDigits) {
lineNumberString = lineNumberString + " ";
}
}
line = lineNumberString + line + "\n";
out.write(line.toCharArray(), 0, line.length());
line = in.readLine();
}
in.close();
out.close();
} catch (IOException ioe) {
}
return;
}
}
|
package www.juyun.net.gsjcommon.photolib.event;
import java.util.List;
import www.juyun.net.gsjcommon.photolib.entity.Photo;
/**
* Created by donglua on 15/6/30.
*/
public interface Selectable {
/**
* Indicates if the item at position position is selected
*
* @param photo Photo of the item to check
* @return true if the item is selected, false otherwise
*/
boolean isSelected(Photo photo);
/**
* Toggle the selection status of the item at a given position
*
* @param photo Photo of the item to toggle the selection status for
*/
void toggleSelection(Photo photo);
/**
* Clear the selection status for all items
*/
void clearSelection();
/**
* Count the selected items
*
* @return Selected items count
*/
int getSelectedItemCount();
/**
* Indicates the list of selected photos
*
* @return List of selected photos
*/
List<Photo> getSelectedPhotos();
}
|
package de.jpaw.bonaparte.util.impl;
import de.jpaw.bonaparte.core.BonaPortable;
import de.jpaw.bonaparte.core.JsonComposer;
import de.jpaw.bonaparte.core.MapParser;
import de.jpaw.bonaparte.core.MimeTypes;
import de.jpaw.bonaparte.core.StaticMeta;
import de.jpaw.bonaparte.util.IMarshaller;
import de.jpaw.json.JsonParser;
import de.jpaw.util.ApplicationException;
import de.jpaw.util.ByteArray;
import de.jpaw.util.ByteBuilder;
public class RecordMarshallerJson implements IMarshaller {
@Override
public String getContentType() {
return MimeTypes.MIME_TYPE_JSON;
}
@Override
public ByteArray marshal(BonaPortable request) {
return ByteArray.fromString(JsonComposer.toJsonString(request));
// return ByteArray.fromString(BonaparteJsonEscaper.asJson(request));
}
@Override
public BonaPortable unmarshal(ByteBuilder buffer) throws ApplicationException {
final JsonParser jp = new JsonParser(new String(buffer.getCurrentBuffer(), 0, buffer.length(), ByteArray.CHARSET_UTF8), false);
return MapParser.asBonaPortable(jp.parseObject(), StaticMeta.OUTER_BONAPORTABLE);
}
}
|
package org.apereo.cas.config;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apereo.cas.CipherExecutor;
import org.apereo.cas.StringBean;
import org.apereo.cas.configuration.CasConfigurationProperties;
import org.apereo.cas.configuration.model.support.jms.JmsTicketRegistryProperties;
import org.apereo.cas.ticket.registry.JmsTicketRegistry;
import org.apereo.cas.ticket.registry.JmsTicketRegistryReceiver;
import org.apereo.cas.ticket.registry.TicketRegistry;
import org.apereo.cas.util.CoreTicketUtils;
import org.apereo.cas.util.serialization.AbstractJacksonBackedStringSerializer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.jms.DefaultJmsListenerContainerFactoryConfigurer;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.jms.annotation.EnableJms;
import org.springframework.jms.config.DefaultJmsListenerContainerFactory;
import org.springframework.jms.config.JmsListenerContainerFactory;
import org.springframework.jms.core.JmsTemplate;
import org.springframework.jms.support.converter.MappingJackson2MessageConverter;
import org.springframework.jms.support.converter.MessageConverter;
import org.springframework.jms.support.converter.MessageType;
import javax.jms.ConnectionFactory;
/**
* This is {@link JmsTicketRegistryConfiguration}.
*
* @author Misagh Moayyed
* @since 5.2.0
*/
@Configuration("jmsTicketRegistryConfiguration")
@EnableConfigurationProperties(CasConfigurationProperties.class)
@EnableJms
public class JmsTicketRegistryConfiguration {
@Autowired
private CasConfigurationProperties casProperties;
@Autowired
private JmsTemplate jmsTemplate;
@Bean
public StringBean messageQueueTicketRegistryIdentifier() {
return new StringBean();
}
@Bean
public JmsTicketRegistryReceiver messageQueueTicketRegistryReceiver() {
return new JmsTicketRegistryReceiver(ticketRegistry(), messageQueueTicketRegistryIdentifier());
}
@Bean
public TicketRegistry ticketRegistry() {
final JmsTicketRegistryProperties jms = casProperties.getTicket().getRegistry().getJms();
final CipherExecutor cipher = CoreTicketUtils.newTicketRegistryCipherExecutor(jms.getCrypto(), "jms");
return new JmsTicketRegistry(this.jmsTemplate, messageQueueTicketRegistryIdentifier(), cipher);
}
@Autowired
@Bean
public JmsListenerContainerFactory<?> messageQueueTicketRegistryFactory(final ConnectionFactory connectionFactory,
final DefaultJmsListenerContainerFactoryConfigurer configurer) {
final DefaultJmsListenerContainerFactory factory = new DefaultJmsListenerContainerFactory();
configurer.configure(factory, connectionFactory);
return factory;
}
@Bean
public MessageConverter jacksonJmsMessageConverter() {
final MappingJackson2MessageConverter converter = new MappingJackson2MessageConverter();
converter.setTargetType(MessageType.TEXT);
converter.setTypeIdPropertyName("_type");
new AbstractJacksonBackedStringSerializer<Object>() {
private static final long serialVersionUID = 1466569521275630254L;
@Override
protected Class getTypeToSerialize() {
return Object.class;
}
@Override
protected ObjectMapper initializeObjectMapper() {
final ObjectMapper mapper = super.initializeObjectMapper();
converter.setObjectMapper(mapper);
return mapper;
}
};
return converter;
}
}
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.aliyuncs.cdn.model.v20180510;
import com.aliyuncs.RpcAcsRequest;
import com.aliyuncs.http.MethodType;
import com.aliyuncs.cdn.Endpoint;
/**
* @author auto create
* @version
*/
public class DescribeDomainRealTimeDetailDataRequest extends RpcAcsRequest<DescribeDomainRealTimeDetailDataResponse> {
private String locationNameEn;
private String startTime;
private String ispNameEn;
private String merge;
private String domainName;
private String endTime;
private String mergeLocIsp;
private Long ownerId;
private String field;
public DescribeDomainRealTimeDetailDataRequest() {
super("Cdn", "2018-05-10", "DescribeDomainRealTimeDetailData");
setMethod(MethodType.GET);
try {
com.aliyuncs.AcsRequest.class.getDeclaredField("productEndpointMap").set(this, Endpoint.endpointMap);
com.aliyuncs.AcsRequest.class.getDeclaredField("productEndpointRegional").set(this, Endpoint.endpointRegionalType);
} catch (Exception e) {}
}
public String getLocationNameEn() {
return this.locationNameEn;
}
public void setLocationNameEn(String locationNameEn) {
this.locationNameEn = locationNameEn;
if(locationNameEn != null){
putQueryParameter("LocationNameEn", locationNameEn);
}
}
public String getStartTime() {
return this.startTime;
}
public void setStartTime(String startTime) {
this.startTime = startTime;
if(startTime != null){
putQueryParameter("StartTime", startTime);
}
}
public String getIspNameEn() {
return this.ispNameEn;
}
public void setIspNameEn(String ispNameEn) {
this.ispNameEn = ispNameEn;
if(ispNameEn != null){
putQueryParameter("IspNameEn", ispNameEn);
}
}
public String getMerge() {
return this.merge;
}
public void setMerge(String merge) {
this.merge = merge;
if(merge != null){
putQueryParameter("Merge", merge);
}
}
public String getDomainName() {
return this.domainName;
}
public void setDomainName(String domainName) {
this.domainName = domainName;
if(domainName != null){
putQueryParameter("DomainName", domainName);
}
}
public String getEndTime() {
return this.endTime;
}
public void setEndTime(String endTime) {
this.endTime = endTime;
if(endTime != null){
putQueryParameter("EndTime", endTime);
}
}
public String getMergeLocIsp() {
return this.mergeLocIsp;
}
public void setMergeLocIsp(String mergeLocIsp) {
this.mergeLocIsp = mergeLocIsp;
if(mergeLocIsp != null){
putQueryParameter("MergeLocIsp", mergeLocIsp);
}
}
public Long getOwnerId() {
return this.ownerId;
}
public void setOwnerId(Long ownerId) {
this.ownerId = ownerId;
if(ownerId != null){
putQueryParameter("OwnerId", ownerId.toString());
}
}
public String getField() {
return this.field;
}
public void setField(String field) {
this.field = field;
if(field != null){
putQueryParameter("Field", field);
}
}
@Override
public Class<DescribeDomainRealTimeDetailDataResponse> getResponseClass() {
return DescribeDomainRealTimeDetailDataResponse.class;
}
}
|
package org.grouporga.java.back.end.api.data.checks;
import com.yahoo.elide.security.ChangeSpec;
import com.yahoo.elide.security.RequestScope;
import com.yahoo.elide.security.checks.InlineCheck;
import org.grouporga.java.back.end.api.data.domain.OwnableEntity;
import org.grouporga.java.back.end.api.security.OrgaUserDetails;
import java.util.Optional;
public class IsUser {
public static final String EXPRESSION = "is user";
public static class Inline extends InlineCheck<OwnableEntity> {
@Override
public boolean ok(OwnableEntity entity, RequestScope requestScope, Optional<ChangeSpec> changeSpec) {
Object opaqueUser = requestScope.getUser().getOpaqueUser();
return opaqueUser instanceof OrgaUserDetails;
}
@Override
public boolean ok(com.yahoo.elide.security.User user) {
return false;
}
}
}
|
package org.keycloak.models.cache;
import org.keycloak.provider.Provider;
import org.keycloak.provider.ProviderFactory;
import org.keycloak.provider.Spi;
/**
* @author <a href="mailto:bill@burkecentral.com">Bill Burke</a>
* @version $Revision: 1 $
*/
public class CacheUserProviderSpi implements Spi {
@Override
public boolean isInternal() {
return true;
}
@Override
public String getName() {
return "userCache";
}
@Override
public Class<? extends Provider> getProviderClass() {
return CacheUserProvider.class;
}
@Override
public Class<? extends ProviderFactory> getProviderFactoryClass() {
return CacheUserProviderFactory.class;
}
}
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.appium.java_client;
public enum MobileSelector {
ACCESSIBILITY("accessibility id"),
ANDROID_UI_AUTOMATOR("-android uiautomator"),
IOS_UI_AUTOMATION("-ios uiautomation"),
IOS_PREDICATE_STRING("-ios predicate string"),
WINDOWS_UI_AUTOMATION("-windows uiautomation");
private final String selector;
MobileSelector(String selector) {
this.selector = selector;
}
@Override public String toString() {
return selector;
}
}
|
package org.tessell.tests.model.dsl;
import static joist.util.Copy.list;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import static org.tessell.model.properties.NewProperty.listProperty;
import java.util.ArrayList;
import joist.util.Copy;
import org.junit.Test;
import org.tessell.gwt.user.client.ui.IsWidget;
import org.tessell.gwt.user.client.ui.StubFlowPanel;
import org.tessell.gwt.user.client.ui.StubLabel;
import org.tessell.model.dsl.Binder;
import org.tessell.model.dsl.ListPropertyBinder.ListPresenterFactory;
import org.tessell.model.dsl.ListPropertyBinder.ListViewFactory;
import org.tessell.model.properties.ListProperty;
import org.tessell.presenter.BasicPresenter;
import org.tessell.presenter.Presenter;
public class ListPropertyBinderTest {
final Binder binder = new Binder();
final StubFlowPanel panel = new StubFlowPanel();
final ListProperty<String> names = listProperty("names");
final StubViewFactory viewFactory = new StubViewFactory();
final StubPresenterFactory presenterFactory = new StubPresenterFactory();
final ParentPresenter parent = bind(new ParentPresenter());
@Test
public void initialNamesAreAddedToPanel() {
names.add("one");
names.add("two");
binder.bind(names).to(panel, viewFactory);
assertLabel(panel.getIsWidget(0), "one");
assertLabel(panel.getIsWidget(1), "two");
}
@Test
public void newNamesAreAddedToPanel() {
binder.bind(names).to(panel, viewFactory);
assertThat(panel.getWidgetCount(), is(0));
names.add("one");
assertLabel(panel.getIsWidget(0), "one");
}
@Test
public void newNamesAreAddedWithIndexToPanel() {
binder.bind(names).to(panel, viewFactory);
names.add("one");
assertLabel(panel.getIsWidget(0), "one");
names.add(0, "two");
assertLabel(panel.getIsWidget(0), "two");
}
@Test
public void newNamesAreAddedAfterExistingContent() {
panel.add(viewFactory.create("existing"));
binder.bind(names).to(panel, viewFactory);
names.add("one");
assertLabel(panel.getIsWidget(0), "existing");
assertLabel(panel.getIsWidget(1), "one");
}
@Test
public void reordersToDoNotRequireCreatingNewViews() {
binder.bind(names).to(panel, viewFactory);
names.set(list("one", "two"));
assertThat(panel.getWidgetCount(), is(2));
assertLabel(panel.getIsWidget(0), "one");
assertThat(viewFactory.created, is(2));
names.set(list("two", "one"));
assertThat(panel.getWidgetCount(), is(2));
assertLabel(panel.getIsWidget(0), "two");
assertThat(viewFactory.created, is(2));
}
@Test
public void reordersToDoNotRequireCreatingNewPresenters() {
binder.bind(names).to(parent, panel, presenterFactory);
names.set(list("one", "two"));
assertThat(panel.getWidgetCount(), is(2));
assertLabel(panel.getIsWidget(0), "one");
assertThat(presenterFactory.created, is(2));
names.set(list("two", "one"));
assertThat(panel.getWidgetCount(), is(2));
assertLabel(panel.getIsWidget(0), "two");
assertThat(presenterFactory.created, is(2));
}
@Test
public void oldNamesAreRemovedFromPanel() {
binder.bind(names).to(panel, viewFactory);
names.add("one");
names.add("two");
assertThat(panel.getWidgetCount(), is(2));
names.remove("one");
assertThat(panel.getWidgetCount(), is(1));
assertLabel(panel.getIsWidget(0), "two");
}
@Test
public void initialPresentersAreAddedToPanel() {
names.add("one");
names.add("two");
binder.bind(names).to(parent, panel, presenterFactory);
assertLabel(panel.getIsWidget(0), "one");
assertLabel(panel.getIsWidget(1), "two");
assertThat(parent.getChildren().size(), is(2));
}
@Test
public void newPresentersAreAddedToPanel() {
binder.bind(names).to(parent, panel, presenterFactory);
assertThat(panel.getWidgetCount(), is(0));
names.add("one");
assertLabel(panel.getIsWidget(0), "one");
assertThat(parent.getChildren().size(), is(1));
}
@Test
public void oldPresentersAreRemovedFromPanel() {
binder.bind(names).to(parent, panel, presenterFactory);
names.add("one");
names.add("two");
assertThat(panel.getWidgetCount(), is(2));
assertThat(parent.getChildren().size(), is(2));
names.remove("one");
assertThat(panel.getWidgetCount(), is(1));
assertLabel(panel.getIsWidget(0), "two");
assertThat(parent.getChildren().size(), is(1));
}
@Test
public void bindDoesNotNullPointerExceptionForViews() {
names.set(null);
binder.bind(names).to(panel, viewFactory);
names.set(Copy.list("one"));
assertThat(panel.getWidgetCount(), is(1));
}
@Test
public void bindDoesNotNullPointerExceptionForPresenters() {
names.set(null);
ParentPresenter parent = bind(new ParentPresenter());
binder.bind(names).to(parent, panel, presenterFactory);
names.set(Copy.list("one"));
assertThat(panel.getWidgetCount(), is(1));
}
private static void assertLabel(IsWidget label, String text) {
assertThat(((StubLabel) label).getText(), is(text));
}
private final class StubPresenterFactory implements ListPresenterFactory<String> {
int created = 0;
public Presenter create(String value) {
created++;
StubLabel label = new StubLabel();
label.setText(value);
return new BasicPresenter<IsWidget>(label) {
};
}
}
private final class StubViewFactory implements ListViewFactory<String> {
int created = 0;
public IsWidget create(String value) {
created++;
StubLabel label = new StubLabel();
label.setText(value);
return label;
}
}
private static final class ParentPresenter extends BasicPresenter<IsWidget> {
public ParentPresenter() {
super(new StubFlowPanel());
}
private ArrayList<Presenter> getChildren() {
return children();
}
}
private static <P extends Presenter> P bind(P p) {
p.bind();
return p;
}
}
|
package krami;
import java.util.concurrent.atomic.AtomicInteger;
public class SharedMerit
{
private double merit = Double.NaN;
private AtomicInteger state = null;
//-1 locked
//0 ready and up to date
//1 awaiting next merit
//2 requesting update
public SharedMerit()
{
state = new AtomicInteger(1);
}
public void promise_merit( )
{
try
{
while( state.get() == -1 )
{
Thread.sleep(10);
}
}
catch(Exception e)
{
e.printStackTrace();
return;
}
state.set(1);
}
public double get_merit()
{
try
{
while( state.get() != 0 )
{ Thread.sleep(10);}
}
catch(Exception e)
{
e.printStackTrace();
return Double.NaN;
}
return merit;
}
public void set_merit(double nmerit)
{
try
{
while( state.get() == -1 )
{ Thread.sleep(10);}
}
catch(Exception e)
{
e.printStackTrace();
return;
}
if( state.get() == 1 )
{
merit = nmerit;
state.set(0);
}
}
public int set_state(int nstate)
{
if(nstate == state.get())
{ return nstate;}
try
{
while( state.get() == -1 )
{
if(nstate == -1)
{ return -1;}
Thread.sleep(10);
}
}
catch(Exception e)
{
e.printStackTrace();
return -2;
}
return state.getAndSet(nstate);
}
public int get_state()
{
return state.get();
}
}
|
package com.mit.campus.rest.algorithm.model;
/**
* 矩阵转置
* @author :lw
* @since : 2018-9-7
*/
public class Transpose {
private Transpose(){}
public static double[][] MatrixC;
public static double[][] tranpose(double[][] TestMatrix){
int Line = TestMatrix.length;
int List = TestMatrix[0].length;
double[][] MatrixC = new double[List][Line];
for (int i = 0; i < Line; i++) {
for (int j = 0; j < List; j++) {
MatrixC[j][i] = TestMatrix[i][j];
}
}
return MatrixC;
}
}
|
/**
* .
*
* GoOut
*
* !
*/
package io.renren.modules.sys.controller;
import io.renren.common.utils.PageUtils;
import io.renren.common.utils.R;
import io.renren.modules.sys.entity.GoOut;
import io.renren.modules.sys.service.GoOutService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
import java.util.Map;
@RestController
@RequestMapping("/sys/goOut")
public class GoOutController extends AbstractController {
@Autowired
private GoOutService GoOutService;
@GetMapping("/list")
public R list(@RequestParam Map<String, Object> params){
PageUtils page = GoOutService.queryPage(params);
return R.ok().put("page", page);
}
@GetMapping("/info/{id}")
public R info(@PathVariable("id") Long id){
GoOut goOut = GoOutService.getById(id);
return R.ok().put("goOut", goOut);
}
@PostMapping("/save")
public R save(@RequestBody GoOut goOut){
goOut.setStu("未审核");
GoOutService.saveOrUpdate(goOut);
return R.ok();
}
@PostMapping("/update")
public R update(@RequestBody GoOut goOut){
GoOutService.updateById(goOut);
return R.ok();
}
@PostMapping("/delete")
public R update(@RequestBody Long[] ids){
for (int i = 0; i < ids.length; i++) {
Long id=ids[i];
GoOutService.removeById(id);
}
return R.ok();
}
}
|
/*
* Phylontal - a tool for phylogenetic alignment of ontologies
* Copyright 2009-2011 Peter E. Midford
* This file is part of Phylontal.
*
* Phylontal is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Phylontal is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Phylontal. If not, see <http://www.gnu.org/licenses/>.
*
* Created on Jun 14, 2010
* Last updated on April 27, 2011
*
*/
package org.ethontos.phylontal.project.phylo.impl;
public interface MatrixBlock {
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.