text
stringlengths 7
1.01M
|
|---|
package modern.challenge.repository;
import java.io.Serializable;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.Random;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.persistence.EntityManager;
import javax.persistence.PersistenceContext;
import org.hibernate.Session;
import org.springframework.stereotype.Repository;
import org.springframework.transaction.annotation.Transactional;
@Repository
@Transactional
public class NumberRepository implements Serializable, Runnable {
private static final Logger logger = Logger.getLogger(NumberRepository.class.getName());
private static final String SQL_INSERT = "INSERT INTO ints (nr) VALUES (?)";
@PersistenceContext
private EntityManager entityManager;
@Override
public void run() {
Session hibernateSession = entityManager.unwrap(Session.class);
hibernateSession.doWork(this::insertSample);
}
public void insertSample(Connection connection) {
try ( PreparedStatement preparedStatement = connection.prepareStatement(SQL_INSERT)) {
preparedStatement.setInt(1, new Random().nextInt());
preparedStatement.execute();
Thread.sleep((int) (Math.random() * 1000));
logger.log(Level.INFO, "Processed by {0}", Thread.currentThread().getName());
} catch (SQLException e) {
logger.log(Level.SEVERE, "SQL exception", e);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
}
}
|
/*
* Copyright 2014 Alexey Andreev.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.teavm.platform.plugin;
import org.teavm.ast.InvocationExpr;
import org.teavm.backend.c.TeaVMCHost;
import org.teavm.backend.c.intrinsic.Intrinsic;
import org.teavm.backend.c.intrinsic.IntrinsicContext;
import org.teavm.backend.javascript.TeaVMJavaScriptHost;
import org.teavm.backend.wasm.TeaVMWasmHost;
import org.teavm.backend.wasm.intrinsics.WasmIntrinsic;
import org.teavm.backend.wasm.intrinsics.WasmIntrinsicManager;
import org.teavm.backend.wasm.model.expression.WasmExpression;
import org.teavm.interop.Async;
import org.teavm.interop.PlatformMarker;
import org.teavm.model.ClassReader;
import org.teavm.model.MethodReader;
import org.teavm.model.MethodReference;
import org.teavm.platform.Platform;
import org.teavm.platform.PlatformQueue;
import org.teavm.vm.TeaVMPluginUtil;
import org.teavm.vm.spi.TeaVMHost;
import org.teavm.vm.spi.TeaVMPlugin;
import org.teavm.platform.plugin.MetadataRegistration;
import org.teavm.platform.metadata.*;
import java.util.ArrayList;
import java.util.List;
public class PlatformPlugin implements TeaVMPlugin, MetadataRegistration {
private MetadataProviderTransformer metadataTransformer = new MetadataProviderTransformer();
private List<MetadataGeneratorConsumer> metadataGeneratorConsumers = new ArrayList<>();
@Override
public void install(TeaVMHost host) {
if (host.getExtension(TeaVMJavaScriptHost.class) != null) {
host.add(metadataTransformer);
host.add(new ResourceTransformer());
host.add(new ResourceAccessorTransformer(host));
host.add(new ResourceAccessorDependencyListener());
TeaVMJavaScriptHost jsHost = host.getExtension(TeaVMJavaScriptHost.class);
jsHost.addGeneratorProvider(context -> {
ClassReader cls = context.getClassSource().get(context.getMethod().getClassName());
if (cls == null) {
return null;
}
MethodReader method = cls.getMethod(context.getMethod().getDescriptor());
if (method == null) {
return null;
}
return method.getAnnotations().get(Async.class.getName()) != null
? new AsyncMethodGenerator() : null;
});
jsHost.addVirtualMethods(new AsyncMethodGenerator());
} else if (!isBootstrap()) {
host.add(new StringAmplifierTransformer());
}
if (!isBootstrap()) {
TeaVMWasmHost wasmHost = host.getExtension(TeaVMWasmHost.class);
if (wasmHost != null) {
wasmHost.add(ctx -> new MetadataIntrinsic(ctx.getClassSource(), ctx.getClassLoader(), ctx.getServices(),
ctx.getProperties()));
wasmHost.add(ctx -> new ResourceReadIntrinsic(ctx.getClassSource(), ctx.getClassLoader()));
wasmHost.add(ctx -> new WasmIntrinsic() {
@Override
public boolean isApplicable(MethodReference methodReference) {
return methodReference.getClassName().equals(StringAmplifier.class.getName());
}
@Override
public WasmExpression apply(InvocationExpr invocation, WasmIntrinsicManager manager) {
return manager.generate(invocation.getArguments().get(0));
}
});
}
TeaVMCHost cHost = host.getExtension(TeaVMCHost.class);
if (cHost != null) {
cHost.addIntrinsic(ctx -> new MetadataCIntrinsic(ctx.getClassSource(), ctx.getClassLoader(),
ctx.getServices(), ctx.getProperties(), ctx.getStructureCodeWriter(),
ctx.getStaticFieldsInitWriter()));
cHost.addIntrinsic(ctx -> new ResourceReadCIntrinsic(ctx.getClassSource()));
cHost.addIntrinsic(ctx -> new Intrinsic() {
@Override
public boolean canHandle(MethodReference method) {
return method.getClassName().equals(StringAmplifier.class.getName());
}
@Override
public void apply(IntrinsicContext context, InvocationExpr invocation) {
context.emit(invocation.getArguments().get(0));
}
});
}
}
host.add(new AsyncMethodProcessor());
host.add(new NewInstanceDependencySupport());
host.add(new ClassLookupDependencySupport());
host.add(new EnumDependencySupport());
host.add(new PlatformDependencyListener());
host.add(new AsyncDependencyListener());
TeaVMPluginUtil.handleNatives(host, Platform.class);
TeaVMPluginUtil.handleNatives(host, PlatformQueue.class);
host.registerService(MetadataRegistration.class, this);
}
@Override
public void register(MethodReference method, MetadataGenerator generator) {
MethodReference constructor = new MethodReference(method.getClassName(), method.getName() + "$$create",
method.getSignature());
for (MetadataGeneratorConsumer consumer : metadataGeneratorConsumers) {
consumer.consume(constructor, method, generator);
}
metadataTransformer.addMetadataMethod(method);
}
interface MetadataGeneratorConsumer {
void consume(MethodReference constructor, MethodReference target, MetadataGenerator generator);
}
@PlatformMarker
private static boolean isBootstrap() {
return false;
}
}
|
/*
* Copyright 2002-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.context.expression;
import java.util.Map;
import org.springframework.expression.Expression;
import org.springframework.expression.spel.standard.SpelExpressionParser;
import org.springframework.util.Assert;
import org.springframework.util.ObjectUtils;
/**
* Shared utility class used to evaluate and cache SpEL expressions that
* are defined on {@link java.lang.reflect.AnnotatedElement}.
*
* @author Stephane Nicoll
* @since 4.2
* @see AnnotatedElementKey
*/
public abstract class CachedExpressionEvaluator {
private final SpelExpressionParser parser;
/**
* Create a new instance with the specified {@link SpelExpressionParser}.
*/
protected CachedExpressionEvaluator(SpelExpressionParser parser) {
Assert.notNull(parser, "Parser must not be null");
this.parser = parser;
}
/**
* Create a new instance with a default {@link SpelExpressionParser}.
*/
protected CachedExpressionEvaluator() {
this(new SpelExpressionParser());
}
/**
* Return the {@link SpelExpressionParser} to use.
*/
protected SpelExpressionParser getParser() {
return this.parser;
}
/**
* Return the {@link Expression} for the specified SpEL value
* <p>Parse the expression if it hasn't been already.
* @param cache the cache to use
* @param elementKey the element on which the expression is defined
* @param expression the expression to parse
*/
protected Expression getExpression(Map<ExpressionKey, Expression> cache,
AnnotatedElementKey elementKey, String expression) {
ExpressionKey expressionKey = createKey(elementKey, expression);
Expression expr = cache.get(expressionKey);
if (expr == null) {
expr = getParser().parseExpression(expression);
cache.put(expressionKey, expr);
}
return expr;
}
private ExpressionKey createKey(AnnotatedElementKey elementKey, String expression) {
return new ExpressionKey(elementKey, expression);
}
protected static class ExpressionKey {
private final AnnotatedElementKey key;
private final String expression;
protected ExpressionKey(AnnotatedElementKey key, String expression) {
this.key = key;
this.expression = expression;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (!(other instanceof ExpressionKey)) {
return false;
}
ExpressionKey otherKey = (ExpressionKey) other;
return (this.key.equals(otherKey.key) &&
ObjectUtils.nullSafeEquals(this.expression, otherKey.expression));
}
@Override
public int hashCode() {
return this.key.hashCode() + (this.expression != null ? this.expression.hashCode() * 29 : 0);
}
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.runner.task;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.remote.command.TaskKillRequestCommand;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.server.master.dispatch.context.ExecutionContext;
import org.apache.dolphinscheduler.server.master.dispatch.enums.ExecutorType;
import org.apache.dolphinscheduler.server.master.dispatch.exceptions.ExecuteException;
import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.queue.TaskPriority;
import org.apache.dolphinscheduler.service.queue.TaskPriorityQueue;
import org.apache.dolphinscheduler.service.queue.TaskPriorityQueueImpl;
import org.apache.dolphinscheduler.service.queue.entity.TaskExecutionContext;
import org.apache.commons.lang.StringUtils;
import java.util.Date;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
/**
* common task processor
*/
@Service
public class CommonTaskProcessor extends BaseTaskProcessor {
@Autowired
private TaskPriorityQueue taskUpdateQueue;
@Autowired
NettyExecutorManager nettyExecutorManager;
@Override
public boolean submit(TaskInstance task, ProcessInstance processInstance, int maxRetryTimes, int commitInterval, boolean isTaskLogger) {
this.processInstance = processInstance;
this.taskInstance = processService.submitTaskWithRetry(processInstance, task, maxRetryTimes, commitInterval);
if (this.taskInstance == null) {
return false;
}
setTaskExecutionLogger(isTaskLogger);
int taskGroupId = task.getTaskGroupId();
if (taskGroupId > 0) {
boolean acquireTaskGroup = processService.acquireTaskGroup(task.getId(),
task.getName(),
taskGroupId,
task.getProcessInstanceId(),
task.getTaskInstancePriority().getCode());
if (!acquireTaskGroup) {
logger.info("submit task name :{}, but the first time to try to acquire task group failed", taskInstance.getName());
return true;
}
}
dispatchTask(taskInstance, processInstance);
return true;
}
@Override
public ExecutionStatus taskState() {
return this.taskInstance.getState();
}
@Override
public void dispatch(TaskInstance taskInstance, ProcessInstance processInstance) {
this.dispatchTask(taskInstance, processInstance);
}
@Override
public void run() {
}
@Override
protected boolean taskTimeout() {
return true;
}
/**
* common task cannot be paused
*/
@Override
protected boolean pauseTask() {
return true;
}
@Override
public String getType() {
return Constants.COMMON_TASK_TYPE;
}
private boolean dispatchTask(TaskInstance taskInstance, ProcessInstance processInstance) {
try {
if (taskUpdateQueue == null) {
this.initQueue();
}
if (taskInstance.getState().typeIsFinished()) {
logger.info(String.format("submit task , but task [%s] state [%s] is already finished. ", taskInstance.getName(), taskInstance.getState().toString()));
return true;
}
// task cannot be submitted because its execution state is RUNNING or DELAY.
if (taskInstance.getState() == ExecutionStatus.RUNNING_EXECUTION
|| taskInstance.getState() == ExecutionStatus.DELAY_EXECUTION) {
logger.info("submit task, but the status of the task {} is already running or delayed.", taskInstance.getName());
return true;
}
logger.info("task ready to submit: {}", taskInstance);
TaskPriority taskPriority = new TaskPriority(processInstance.getProcessInstancePriority().getCode(),
processInstance.getId(), taskInstance.getProcessInstancePriority().getCode(),
taskInstance.getId(), org.apache.dolphinscheduler.common.Constants.DEFAULT_WORKER_GROUP);
TaskExecutionContext taskExecutionContext = getTaskExecutionContext(taskInstance);
taskPriority.setTaskExecutionContext(taskExecutionContext);
taskUpdateQueue.put(taskPriority);
logger.info(String.format("master submit success, task : %s", taskInstance.getName()));
return true;
} catch (Exception e) {
logger.error("submit task Exception: ", e);
logger.error("task error : {}", JSONUtils.toJsonString(taskInstance));
return false;
}
}
public void initQueue() {
this.taskUpdateQueue = SpringApplicationContext.getBean(TaskPriorityQueueImpl.class);
}
@Override
public boolean killTask() {
try {
taskInstance = processService.findTaskInstanceById(taskInstance.getId());
if (taskInstance == null) {
return true;
}
if (taskInstance.getState().typeIsFinished()) {
return true;
}
if (StringUtils.isBlank(taskInstance.getHost())) {
taskInstance.setState(ExecutionStatus.KILL);
taskInstance.setEndTime(new Date());
processService.updateTaskInstance(taskInstance);
return true;
}
TaskKillRequestCommand killCommand = new TaskKillRequestCommand();
killCommand.setTaskInstanceId(taskInstance.getId());
ExecutionContext executionContext = new ExecutionContext(killCommand.convert2Command(), ExecutorType.WORKER);
Host host = Host.of(taskInstance.getHost());
executionContext.setHost(host);
nettyExecutorManager.executeDirectly(executionContext);
} catch (ExecuteException e) {
logger.error("kill task error:", e);
return false;
}
logger.info("master kill taskInstance name :{} taskInstance id:{}",
taskInstance.getName(), taskInstance.getId());
return true;
}
}
|
package org.asynchttpclient.ws;
import io.netty.util.concurrent.Future;
import io.netty.util.concurrent.FutureListener;
/**
* A listener for result of WebSocket write operations.
*/
public interface WebSocketWriteCompleteListener extends FutureListener<Void> {
/**
* Is called when a write operation completes, either successful or failing with an exception.
* @param result contains the result of the write operation
*/
void onComplete(WriteCompleteResult result);
@Override
default void operationComplete(Future<Void> future) throws Exception {
if (future.isSuccess()) {
onComplete(WriteCompleteResult.SUCCEEDED);
} else {
onComplete(WriteCompleteResult.failed(future.cause()));
}
}
/**
* The result of a write operation.
*/
interface WriteCompleteResult {
/**
* Constant for succeeded result.
*/
WriteCompleteResult SUCCEEDED = new WriteCompleteResult() {
@Override public Throwable getFailure() {
return null;
}
@Override public boolean isSuccess() {
return true;
}
@Override public boolean isFailed() {
return false;
}
};
/**
* @param t the exception that caused the failure.
* @return a failed result
*/
static WriteCompleteResult failed(Throwable t)
{
return new WriteCompleteResult() {
@Override public Throwable getFailure() {
return t;
}
@Override public boolean isSuccess() {
return false;
}
@Override public boolean isFailed() {
return true;
}
};
}
/**
* Return the exception in case the write operation failed, @{@code null} otherwise.
* @return the exception
*/
Throwable getFailure();
/**
* Return @{@code true} if the operation succeeded, {@code false} otherwise.
* @return true if success.
*/
boolean isSuccess();
/**
* Return @{@code true} if the operation failed, {@code false} otherwise.
* @return true if failed.
*/
boolean isFailed();
}
}
|
package coffeeMachine;
public enum CoffeeSize {
SMALL(50, 50), NORMAL(100, 75), DOUBLE(200, 100);
private int dosage;
private int price;
CoffeeSize(int dosage, int price) {
this.dosage = dosage;
this.price = price;
}
public int getDosage() {
return this.dosage;
}
public int getPrice() {
return this.price;
}
@Override
public String toString() {
return super.name().charAt(0) + super.name().substring(1).toLowerCase();
}
}
|
package io.swagger.client.api;
import io.swagger.client.CollectionFormats.*;
import retrofit.Callback;
import retrofit.http.*;
import retrofit.mime.*;
import io.swagger.client.model.Order;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public interface StoreApi {
/**
* Delete purchase order by ID
* Sync method
* For valid response try integer IDs with value < 1000. Anything above 1000 or nonintegers will generate API errors
* @param orderId ID of the order that needs to be deleted (required)
* @return Void
*/
@DELETE("/store/order/{order_id}")
Void deleteOrder(
@retrofit.http.Path("order_id") String orderId
);
/**
* Delete purchase order by ID
* Async method
* @param orderId ID of the order that needs to be deleted (required)
* @param cb callback method
*/
@DELETE("/store/order/{order_id}")
void deleteOrder(
@retrofit.http.Path("order_id") String orderId, Callback<Void> cb
);
/**
* Returns pet inventories by status
* Sync method
* Returns a map of status codes to quantities
* @return Map<String, Integer>
*/
@GET("/store/inventory")
Map<String, Integer> getInventory();
/**
* Returns pet inventories by status
* Async method
* @param cb callback method
*/
@GET("/store/inventory")
void getInventory(
Callback<Map<String, Integer>> cb
);
/**
* Find purchase order by ID
* Sync method
* For valid response try integer IDs with value <= 5 or > 10. Other values will generated exceptions
* @param orderId ID of pet that needs to be fetched (required)
* @return Order
*/
@GET("/store/order/{order_id}")
Order getOrderById(
@retrofit.http.Path("order_id") Long orderId
);
/**
* Find purchase order by ID
* Async method
* @param orderId ID of pet that needs to be fetched (required)
* @param cb callback method
*/
@GET("/store/order/{order_id}")
void getOrderById(
@retrofit.http.Path("order_id") Long orderId, Callback<Order> cb
);
/**
* Place an order for a pet
* Sync method
*
* @param body order placed for purchasing the pet (required)
* @return Order
*/
@POST("/store/order")
Order placeOrder(
@retrofit.http.Body Order body
);
/**
* Place an order for a pet
* Async method
* @param body order placed for purchasing the pet (required)
* @param cb callback method
*/
@POST("/store/order")
void placeOrder(
@retrofit.http.Body Order body, Callback<Order> cb
);
}
|
package de.unihamburg.informatik.nlp4web.tutorial.tut5.feature;
import de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Token;
import org.cleartk.ml.feature.extractor.CleartkExtractor;
import org.cleartk.ml.feature.extractor.CoveredTextExtractor;
import org.cleartk.ml.feature.extractor.FeatureExtractor1;
import org.cleartk.ml.feature.extractor.TypePathExtractor;
import org.cleartk.ml.feature.function.*;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
* Class to instantiate the selected Feature Extractors
* (I know this is not a 'real Factory' in terms of GoF)
*/
public class FeatureExtractorFactory {
/**
* Creates all the features extractors that will be used. To remove code redundancy the method is public static and
* therefore accessible in the Features2Xml class
*
* @return the list of feature extractors
*/
public static List<FeatureExtractor1<Token>> createAllFeatureExtractors() throws IOException {
//create all feature extractors
List<FeatureExtractor1<Token>> allFeatureExtractors = new ArrayList<>();
TypePathExtractor<Token> stemExtractor = FeatureExtractorFactory.createTokenTypePathExtractors();
FeatureExtractor1<Token> tokenFeatureExtractor = FeatureExtractorFactory.createTokenFeatureExtractors();
CleartkExtractor<Token, Token> contextFeatureExtractor = FeatureExtractorFactory.createTokenContextExtractors();
FeatureFunctionExtractor nameListExtractors = FeatureExtractorFactory.createNameListExtractors();
FeatureFunctionExtractor cityListExtractors = FeatureExtractorFactory.createCityListExtractors();
FeatureFunctionExtractor countryListExtractors = FeatureExtractorFactory.createCountryListExtractors();
FeatureFunctionExtractor miscListExtractors = FeatureExtractorFactory.createMiscListExtractors();
FeatureFunctionExtractor orgListExtractors = FeatureExtractorFactory.createOrgListExtractors();
FeatureFunctionExtractor locListExtractors = FeatureExtractorFactory.createLocListExtractors();
allFeatureExtractors.add(stemExtractor);
allFeatureExtractors.add(tokenFeatureExtractor);
allFeatureExtractors.add(contextFeatureExtractor);
allFeatureExtractors.add(nameListExtractors);
allFeatureExtractors.add(cityListExtractors);
allFeatureExtractors.add(countryListExtractors);
allFeatureExtractors.add(miscListExtractors);
allFeatureExtractors.add(orgListExtractors);
allFeatureExtractors.add(locListExtractors);
return allFeatureExtractors;
}
public static FeatureFunctionExtractor createLocListExtractors() throws IOException {
return new FeatureFunctionExtractor<>(
new CoveredTextExtractor<Token>(),
FeatureFunctionExtractor.BaseFeatures.EXCLUDE,
new NEListExtractor("src/main/resources/ner/eng_LOC.txt", "eng_LOC"),
new NEListExtractor("src/main/resources/ner/deu_LOC.txt", "deu_LOC"));
}
public static FeatureFunctionExtractor createOrgListExtractors() throws IOException {
return new FeatureFunctionExtractor<>(
new CoveredTextExtractor<Token>(),
FeatureFunctionExtractor.BaseFeatures.EXCLUDE,
new NEListExtractor("src/main/resources/ner/eng_ORG.txt", "eng_ORG"),
new NEListExtractor("src/main/resources/ner/deu_ORG.txt", "deu_ORG"));
}
public static FeatureFunctionExtractor createMiscListExtractors() throws IOException {
return new FeatureFunctionExtractor<>(
new CoveredTextExtractor<Token>(),
FeatureFunctionExtractor.BaseFeatures.EXCLUDE,
new NEListExtractor("src/main/resources/ner/eng_MISC.txt", "eng_MISC"),
new NEListExtractor("src/main/resources/ner/deu_MISC.txt", "deu_MISC"));
}
public static FeatureFunctionExtractor createCountryListExtractors() throws IOException {
return new FeatureFunctionExtractor<>(
new CoveredTextExtractor<Token>(),
FeatureFunctionExtractor.BaseFeatures.EXCLUDE,
new NEListExtractor("src/main/resources/ner/germanCountryNames.txt", "gerCountry_LOC"),
new NEListExtractor("src/main/resources/ner/englishCountryNames.txt", "engCountry_LOC"));
}
public static FeatureFunctionExtractor createCityListExtractors() throws IOException {
return new FeatureFunctionExtractor<>(
new CoveredTextExtractor<Token>(),
FeatureFunctionExtractor.BaseFeatures.EXCLUDE,
new NEListExtractor("src/main/resources/ner/germanCityNames.txt", "gerCity_LOC"),
new NEListExtractor("src/main/resources/ner/englishCityNames.txt", "engCity_LOC"));
}
public static FeatureFunctionExtractor createNameListExtractors() throws IOException {
return new FeatureFunctionExtractor<>(
new CoveredTextExtractor<Token>(),
FeatureFunctionExtractor.BaseFeatures.EXCLUDE,
new NEListExtractor("src/main/resources/ner/firstNames.txt", "firstName_PER"),
new NEListExtractor("src/main/resources/ner/lastNames.txt", "lastName_PER"));
}
public static CleartkExtractor<Token, Token> createTokenContextExtractors() {
// create a feature extractor that extracts the surrounding token texts (within the same sentence)
return new CleartkExtractor<>(Token.class,
// the FeatureExtractor that takes the token annotation from the JCas and produces the covered text
new CoveredTextExtractor<>(),
// also include the two preceding words
new CleartkExtractor.Preceding(2),
// and the two following words
new CleartkExtractor.Following(2));
}
public static FeatureExtractor1<Token> createTokenFeatureExtractors() {
// create a function feature extractor that creates features corresponding to the token
// Note the difference between feature extractors and feature functions here. Feature extractors take an Annotation
// from the JCas and extract features from it. Feature functions take the features produced by the feature extractor
// and generate new features from the old ones. Since feature functions don’t need to look up information in the JCas,
// they may be more efficient than feature extractors. So, the e.g. the CharacterNgramFeatureFunction simply extract
// suffixes from the text returned by the CoveredTextExtractor.
return new FeatureFunctionExtractor<>(
// the FeatureExtractor that takes the token annotation from the JCas and produces the covered text
new CoveredTextExtractor<Token>(),
// feature function that produces the lower cased word (based on the output of the CoveredTextExtractor)
new LowerCaseFeatureFunction(),
// feature function that produces the capitalization type of the word (e.g. all uppercase, all lowercase...)
new CapitalTypeFeatureFunction(),
// feature function that produces the numeric type of the word (numeric, alphanumeric...)
new NumericTypeFeatureFunction(),
// feature function that produces the suffix of the word as character bigram (last two chars of the word)
new CharacterNgramFeatureFunction(CharacterNgramFeatureFunction.Orientation.RIGHT_TO_LEFT, 0, 2),
// feature function that produces the suffix of the word as character trigram (last three chars of the word)
new CharacterNgramFeatureFunction(CharacterNgramFeatureFunction.Orientation.RIGHT_TO_LEFT, 0, 3),
// feature function that produces the Character Category Pattern (based on the Unicode Categories) for the Token
new CharacterCategoryPatternFunction());
}
public static TypePathExtractor<Token> createTokenTypePathExtractors() {
return new TypePathExtractor<>(Token.class, "stem/value");
}
}
|
package uk.gov.hmcts.reform.bulkscanccdeventhandler.caseupdate.controllers;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import org.mockito.Mockito;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.autoconfigure.web.servlet.WebMvcTest;
import org.springframework.boot.test.mock.mockito.MockBean;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.test.web.servlet.MockMvc;
import org.springframework.test.web.servlet.ResultActions;
import uk.gov.hmcts.reform.authorisation.exceptions.InvalidTokenException;
import uk.gov.hmcts.reform.bulkscanccdeventhandler.caseupdate.model.out.CaseUpdateDetails;
import uk.gov.hmcts.reform.bulkscanccdeventhandler.caseupdate.model.out.SuccessfulUpdateResponse;
import uk.gov.hmcts.reform.bulkscanccdeventhandler.caseupdate.services.CaseUpdater;
import uk.gov.hmcts.reform.bulkscanccdeventhandler.common.auth.AuthService;
import uk.gov.hmcts.reform.bulkscanccdeventhandler.common.auth.ForbiddenException;
import uk.gov.hmcts.reform.bulkscanccdeventhandler.common.auth.UnauthenticatedException;
import uk.gov.hmcts.reform.bulkscanccdeventhandler.common.model.out.Address;
import uk.gov.hmcts.reform.bulkscanccdeventhandler.common.model.out.DocumentUrl;
import uk.gov.hmcts.reform.bulkscanccdeventhandler.common.model.out.Item;
import uk.gov.hmcts.reform.bulkscanccdeventhandler.common.model.out.SampleCase;
import uk.gov.hmcts.reform.bulkscanccdeventhandler.common.model.out.ScannedDocument;
import java.time.LocalDateTime;
import java.util.stream.Stream;
import static java.time.format.DateTimeFormatter.ISO_DATE_TIME;
import static java.util.Arrays.asList;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.BDDMockito.given;
import static org.springframework.http.HttpStatus.FORBIDDEN;
import static org.springframework.http.HttpStatus.UNAUTHORIZED;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status;
@SuppressWarnings("checkstyle:lineLength")
@WebMvcTest(UpdateCaseController.class)
class UpdateCaseControllerTest {
@Autowired private transient MockMvc mockMvc;
@MockBean private CaseUpdater caseUpdater;
@MockBean private AuthService authService;
@BeforeEach
public void setUp() {
Mockito.reset(authService);
}
@ParameterizedTest
@MethodSource("exceptionsAndStatuses")
public void should_return_proper_status_codes_for_auth_exceptions(RuntimeException exc, HttpStatus status) throws Exception {
given(authService.authenticate(any())).willThrow(exc);
sendRequest("{}")
.andExpect(status().is(status.value()));
}
@Test
public void should_return_updated_case_details() throws Exception {
SampleCase sampleCase = new SampleCase(
"legacy-id",
"first-name",
"last-name",
"date-of-birth",
"contact-number",
"email",
new Address(
"address-line-1",
"address-line-2",
"address-line-3",
"post-code",
"post-town",
"county",
"country"
),
asList(
new Item<>(new ScannedDocument(
"type-1",
"subtype-1",
new DocumentUrl(
"url-1",
"binary-url-1",
"file-name-1"
),
"dcn-1",
"file-name-1",
LocalDateTime.parse("2011-12-03T10:15:30.123", ISO_DATE_TIME),
LocalDateTime.parse("2011-12-04T10:15:30.123", ISO_DATE_TIME),
"ref-1"
)),
new Item<>(new ScannedDocument(
"type-2",
"subtype-2",
new DocumentUrl(
"url-2",
"binary-url-2",
"file-name-2"
),
"dcn-2",
"file-name-2",
LocalDateTime.parse("2011-12-05T10:15:30.123", ISO_DATE_TIME),
LocalDateTime.parse("2011-12-06T10:15:30.123", ISO_DATE_TIME),
"ref-2"
))
),
"er-id"
);
given(caseUpdater.update(any()))
.willReturn(
new SuccessfulUpdateResponse(
new CaseUpdateDetails(
CaseUpdater.EVENT_ID,
sampleCase
),
asList("warning-1", "warning-2")
)
);
sendRequest("{}")
.andExpect(status().isOk())
.andExpect(jsonPath("$.case_update_details.event_id").value(CaseUpdater.EVENT_ID))
.andExpect(jsonPath("$.case_update_details.case_data.legacyId").value("legacy-id"))
.andExpect(jsonPath("$.case_update_details.case_data.firstName").value("first-name"))
.andExpect(jsonPath("$.case_update_details.case_data.lastName").value("last-name"))
.andExpect(jsonPath("$.case_update_details.case_data.dateOfBirth").value("date-of-birth"))
.andExpect(jsonPath("$.case_update_details.case_data.contactNumber").value("contact-number"))
.andExpect(jsonPath("$.case_update_details.case_data.email").value("email"))
.andExpect(jsonPath("$.case_update_details.case_data.address.addressLine1").value("address-line-1"))
.andExpect(jsonPath("$.case_update_details.case_data.address.addressLine2").value("address-line-2"))
.andExpect(jsonPath("$.case_update_details.case_data.address.addressLine3").value("address-line-3"))
.andExpect(jsonPath("$.case_update_details.case_data.address.postCode").value("post-code"))
.andExpect(jsonPath("$.case_update_details.case_data.address.postTown").value("post-town"))
.andExpect(jsonPath("$.case_update_details.case_data.address.county").value("county"))
.andExpect(jsonPath("$.case_update_details.case_data.address.country").value("country"))
.andExpect(jsonPath("$.case_update_details.case_data.scannedDocuments[0].value.type").value("type-1"))
.andExpect(jsonPath("$.case_update_details.case_data.scannedDocuments[0].value.subtype").value("subtype-1"))
.andExpect(jsonPath("$.case_update_details.case_data.scannedDocuments[0].value.url.document_url").value("url-1"))
.andExpect(jsonPath("$.case_update_details.case_data.scannedDocuments[0].value.url.document_binary_url").value("binary-url-1"))
.andExpect(jsonPath("$.case_update_details.case_data.scannedDocuments[0].value.url.document_filename").value("file-name-1"))
.andExpect(jsonPath("$.case_update_details.case_data.scannedDocuments[0].value.controlNumber").value("dcn-1"))
.andExpect(jsonPath("$.case_update_details.case_data.scannedDocuments[0].value.fileName").value("file-name-1"))
.andExpect(jsonPath("$.case_update_details.case_data.scannedDocuments[0].value.scannedDate").value("2011-12-03T10:15:30.123"))
.andExpect(jsonPath("$.case_update_details.case_data.scannedDocuments[0].value.deliveryDate").value("2011-12-04T10:15:30.123"))
.andExpect(jsonPath("$.case_update_details.case_data.scannedDocuments[0].value.exceptionRecordReference").value("ref-1"))
.andExpect(jsonPath("$.case_update_details.case_data.scannedDocuments[1].value.type").value("type-2"))
.andExpect(jsonPath("$.case_update_details.case_data.scannedDocuments[1].value.subtype").value("subtype-2"))
.andExpect(jsonPath("$.case_update_details.case_data.scannedDocuments[1].value.url.document_url").value("url-2"))
.andExpect(jsonPath("$.case_update_details.case_data.scannedDocuments[1].value.url.document_binary_url").value("binary-url-2"))
.andExpect(jsonPath("$.case_update_details.case_data.scannedDocuments[1].value.url.document_filename").value("file-name-2"))
.andExpect(jsonPath("$.case_update_details.case_data.scannedDocuments[1].value.controlNumber").value("dcn-2"))
.andExpect(jsonPath("$.case_update_details.case_data.scannedDocuments[1].value.fileName").value("file-name-2"))
.andExpect(jsonPath("$.case_update_details.case_data.scannedDocuments[1].value.scannedDate").value("2011-12-05T10:15:30.123"))
.andExpect(jsonPath("$.case_update_details.case_data.scannedDocuments[1].value.deliveryDate").value("2011-12-06T10:15:30.123"))
.andExpect(jsonPath("$.case_update_details.case_data.scannedDocuments[1].value.exceptionRecordReference").value("ref-2"))
.andExpect(jsonPath("$.case_update_details.case_data.bulkScanCaseReference").value("er-id"))
.andExpect(jsonPath("$.warnings[0]").value("warning-1"))
.andExpect(jsonPath("$.warnings[1]").value("warning-2"));
}
private static Stream<Arguments> exceptionsAndStatuses() {
return Stream.of(
Arguments.of(new UnauthenticatedException(null), UNAUTHORIZED),
Arguments.of(new InvalidTokenException(null, null), UNAUTHORIZED),
Arguments.of(new ForbiddenException(null), FORBIDDEN)
);
}
private ResultActions sendRequest(String body) throws Exception {
return mockMvc
.perform(
post("/update-case")
.contentType(MediaType.APPLICATION_JSON)
.content(body)
);
}
}
|
/*
* Copyright 2007 skynamics AG
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openbp.jaspira.gui.interaction;
import java.awt.datatransfer.DataFlavor;
import java.awt.datatransfer.Transferable;
import java.awt.datatransfer.UnsupportedFlavorException;
import java.io.IOException;
import org.openbp.common.ExceptionUtil;
/**
* Transferable for a single object.
*
* @author Heiko Erhardt
*/
public class SimpleTransferable
implements Transferable
{
//////////////////////////////////////////////////
// @@ Data members
//////////////////////////////////////////////////
/** Transferred object */
private Object object;
/** Data flavors supported by this transferable */
private DataFlavor [] dataflavors;
//////////////////////////////////////////////////
// @@ Construction
//////////////////////////////////////////////////
/**
* Constructor.
* @param object Object to transfer
* @param flavor Single flavor supported by this object
*/
public SimpleTransferable(Object object, DataFlavor flavor)
{
this.object = object;
dataflavors = new DataFlavor [] { flavor };
}
/**
* Constructor.
* @param object Object to transfer
* @param flavors Flavors supported by this object
*/
public SimpleTransferable(Object object, DataFlavor [] flavors)
{
this.object = object;
dataflavors = flavors;
}
//////////////////////////////////////////////////
// @@ Transferable implementation
//////////////////////////////////////////////////
/**
* @see java.awt.datatransfer.Transferable#getTransferDataFlavors()
*/
public DataFlavor [] getTransferDataFlavors()
{
return dataflavors;
}
/**
* @see java.awt.datatransfer.Transferable#isDataFlavorSupported(DataFlavor)
*/
public boolean isDataFlavorSupported(DataFlavor flavor)
{
for (int i = 0; i < dataflavors.length; i++)
{
if (dataflavors [i].equals(flavor))
{
return true;
}
}
return false;
}
/**
* @see java.awt.datatransfer.Transferable#getTransferData(DataFlavor)
*/
public Object getTransferData(DataFlavor flavor)
throws UnsupportedFlavorException, IOException
{
for (int i = 0; i < dataflavors.length; i++)
{
if (dataflavors [i].equals(flavor))
{
return object;
}
}
throw new UnsupportedFlavorException(flavor);
}
/**
* Gets the transfer data in the desired format (convenience method that supresses exceptions).
* You should call this method only if have previously checked if the flavor is supported
* by calling {@link #isDataFlavorSupported}.
*
* @param flavor Flavor to get
* @return The desired object or null if the flavor is not supported or an i/o error has occurred.
* In the latter case, the method prints a stack trace to stderr.
*/
public Object getSafeTransferData(DataFlavor flavor)
{
try
{
return getTransferData(flavor);
}
catch (UnsupportedFlavorException e)
{
// Silently ignore
}
catch (IOException e)
{
ExceptionUtil.printTrace(e);
}
return null;
}
}
|
package com.webmvc.test.handler.beanname;
import com.webmvc.test.handler.adpater.Control;
import javax.servlet.http.HttpServletRequest;
import java.util.HashMap;
import java.util.Map;
/**
* @projectName: spring
* @package: com.webmvc.test.handler.beanname
* @className: DControl
* @description:
* @author: zhi
* @date: 2021/9/27
* @version: 1.0
*/
@SuppressWarnings("serial")
public class DControl implements Control<Map<String, Object>> {
@Override
public Map<String, Object> process(HttpServletRequest req) throws Exception {
return new HashMap<String, Object>() {{
put("key", "Customer Control => " + req.getParameter("name"));
}};
}
}
|
/*
* Copyright 2016-2018 shardingsphere.io.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* </p>
*/
package io.shardingsphere.core.metadata;
import io.shardingsphere.core.constant.DatabaseType;
import io.shardingsphere.core.executor.ShardingExecuteEngine;
import io.shardingsphere.core.metadata.datasource.ShardingDataSourceMetaData;
import io.shardingsphere.core.metadata.table.ShardingTableMetaData;
import io.shardingsphere.core.metadata.table.executor.TableMetaDataConnectionManager;
import io.shardingsphere.core.metadata.table.executor.TableMetaDataInitializer;
import io.shardingsphere.core.rule.ShardingRule;
import lombok.Getter;
import java.util.Map;
/**
* Sharding meta data.
*
* @author zhangliang
*/
@Getter
public final class ShardingMetaData {
private final ShardingDataSourceMetaData dataSource;
private final ShardingTableMetaData table;
public ShardingMetaData(final Map<String, String> dataSourceURLs, final ShardingRule shardingRule, final DatabaseType databaseType, final ShardingExecuteEngine executeEngine,
final TableMetaDataConnectionManager connectionManager, final int maxConnectionsSizePerQuery, final boolean isCheckingMetaData) {
dataSource = new ShardingDataSourceMetaData(dataSourceURLs, shardingRule, databaseType);
table = new ShardingTableMetaData(new TableMetaDataInitializer(dataSource, executeEngine, connectionManager, maxConnectionsSizePerQuery, isCheckingMetaData).load(shardingRule));
}
}
|
/**
* This file is part of Words With Crosses.
*
* Copyright (this file) 2014 Wolfgang Groiss
*
* This file is free software: you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software
* Foundation, either version 3 of the License, or (at your option) any later
* version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*
**/
package com.adamrosenfield.wordswithcrosses.net.derstandard;
import java.io.Serializable;
import java.text.DateFormat;
import java.util.Calendar;
import com.adamrosenfield.wordswithcrosses.puz.Puzzle;
public class DerStandardPuzzleMetadata implements Serializable {
private static final long serialVersionUID = 2L;
private final int id;
private Calendar date;
private String puzzleUrl;
private String dateUrl;
private boolean puzzleAvailable = false;
private boolean solutionAvailable = false;
private transient Puzzle puzzle;
public DerStandardPuzzleMetadata(int id) {
this.id = id;
}
public String getPuzzleUrl(String relativeBase) {
return getUrl(puzzleUrl, relativeBase);
}
public void setPuzzleUrl(String puzzleUrl) {
this.puzzleUrl = puzzleUrl;
}
public String getDateUrl(String relativeBase) {
return getUrl(dateUrl, relativeBase);
}
private String getUrl(String url, String relativeBase) {
if (url.contains("://")) {
return url;
} else {
return relativeBase + url;
}
}
public void setDateUrl(String dateUrl) {
this.dateUrl = dateUrl;
}
public void setDate(Calendar date) {
this.date = date;
refreshPuzzleTitle();
}
public Calendar getDate() {
return date;
}
public int getId() {
return id;
}
public void setPuzzle(Puzzle p) {
this.puzzle = p;
refreshPuzzleTitle();
}
private void refreshPuzzleTitle() {
if (puzzle == null) {
return;
}
StringBuilder sb = new StringBuilder("Nr. ");
sb.append(id);
if (date != null) {
sb.append(" (");
sb.append(DateFormat.getDateInstance(DateFormat.MEDIUM).format(date.getTime()));
sb.append(")");
}
if (!solutionAvailable) {
sb.append(" [no Solution]");
}
puzzle.setTitle(sb.toString());
}
public Puzzle getPuzzle() {
return puzzle;
}
public boolean isPuzzleAvailable() {
return puzzle != null;
}
public boolean isSolutionAvailable() {
return solutionAvailable;
}
public void setSolutionAvailable(boolean solutionAvailable) {
this.solutionAvailable = solutionAvailable;
refreshPuzzleTitle();
}
@Override
public String toString() {
return "DerStandardPuzzleMetadata [id=" + id +
", date=" + date +
", puzzleUrl=" + puzzleUrl +
", dateUrl=" + dateUrl +
", puzzleAvailable=" + puzzleAvailable +
", solutionAvailable=" + solutionAvailable +
"]";
}
}
|
package snownee.cuisine.api.prefab;
import snownee.cuisine.api.Effect;
import snownee.cuisine.api.Form;
import snownee.cuisine.api.MaterialCategory;
import snownee.cuisine.internal.material.MaterialWithEffect;
import java.util.*;
/**
* Simple builder for a material
*/
public class MaterialBuilder
{
private final String id;
private int rawColor, cookedColor, waterValue, oilValue, heatValue;
private float saturationModifier;
private EnumSet<MaterialCategory> categories = EnumSet.noneOf(MaterialCategory.class);
private EnumSet<Form> validForms = EnumSet.noneOf(Form.class);
private Effect effect;
private float boilHeat = 90;
private int boilTime = 150;
public MaterialBuilder(String id)
{
this.id = id;
}
public static MaterialBuilder of(String id) {
return new MaterialBuilder(id);
}
public MaterialBuilder rawColor(int rawColor)
{
this.rawColor = rawColor;
return this;
}
public MaterialBuilder effect(Effect effect)
{
this.effect = effect;
return this;
}
public MaterialBuilder cookedColor(int cookedColor)
{
this.cookedColor = cookedColor;
return this;
}
public MaterialBuilder waterValue(int waterValue)
{
this.waterValue = waterValue;
return this;
}
public MaterialBuilder oilValue(int oilValue)
{
this.oilValue = oilValue;
return this;
}
public MaterialBuilder heatValue(int heatValue)
{
this.heatValue = heatValue;
return this;
}
public MaterialBuilder saturation(float saturationModifier)
{
this.saturationModifier = saturationModifier;
return this;
}
public MaterialBuilder form(Form... forms)
{
validForms.addAll(Arrays.asList(forms));
return this;
}
public MaterialBuilder form(Collection<? extends Form> forms)
{
validForms.addAll(forms);
return this;
}
public MaterialBuilder form(EnumSet<Form> forms)
{
validForms = forms;
return this;
}
public MaterialBuilder category(MaterialCategory... categories)
{
this.categories.addAll(Arrays.asList(categories));
return this;
}
public MaterialBuilder category(Collection<? extends MaterialCategory> categories)
{
this.categories.addAll(categories);
return this;
}
public MaterialBuilder boilHeat(float boilHeat)
{
this.boilHeat = boilHeat;
return this;
}
public MaterialBuilder boilTime(int boilTime)
{
this.boilTime = boilTime;
return this;
}
public SimpleMaterialImpl build()
{
if (effect != null)
return new MaterialWithEffect(id, effect, rawColor, cookedColor, waterValue, oilValue, heatValue, saturationModifier, boilHeat, boilTime, categories.toArray(new MaterialCategory[0])).setValidForms(validForms);
return new SimpleMaterialImpl(id, rawColor, cookedColor, waterValue, oilValue, heatValue, saturationModifier, boilHeat, boilTime, categories.toArray(new MaterialCategory[0])).setValidForms(validForms);
}
}
|
package cn.xpbootcamp.tdd;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class GameEngineTest {
private GameEngine gameEngine;
private int[] randoms = {1, 2, 3, 4};
@BeforeEach
void setUp() {
RandomNumber randomNumber = new RandomNumber();
randomNumber.setRandoms(randoms);
RandomGenerator randomGenerator = mock(RandomGenerator.class);
when(randomGenerator.generate()).thenReturn(randomNumber);
gameEngine = new GameEngine(randomGenerator);
}
@Test
void should_return_wrong_input_when_play_game_given_guess_number_12() {
int[] guessNumber = {1, 2};
String result = gameEngine.play(guessNumber);
assertThat(result).isEqualTo("Wrong Input,Input again");
}
@Test
void should_return_1A2B_and_can_play_when_play_game_given_guess_number_1425_and_random_1234() {
int[] guessNumber = {1, 4, 2, 5};
String result = gameEngine.play(guessNumber);
assertThat(result).isEqualTo("1A2B");
assertThat(gameEngine.canPlay()).isEqualTo(true);
}
@Test
void should_return_4A0B_and_can_not_play_when_play_game_given_guess_number_1234_and_random_1234() {
int[] guessNumber = {1, 2, 3, 4};
String result = gameEngine.play(guessNumber);
assertThat(result).isEqualTo("4A0B");
assertThat(gameEngine.canPlay()).isEqualTo(false);
}
@Test
void should_return_can_not_play_after_6_round_when_play_game_given_no_round_success() {
int[] guessNumber = {1, 2, 4, 5};
gameEngine.play(guessNumber);
gameEngine.play(guessNumber);
gameEngine.play(guessNumber);
gameEngine.play(guessNumber);
gameEngine.play(guessNumber);
gameEngine.play(guessNumber);
assertThat(gameEngine.canPlay()).isEqualTo(false);
}
}
|
package gui;
import javax.swing.table.DefaultTableModel;
public class HeapTree{
static HeapTreeNode root;
static int length;
static HeapArray ha;
static Object[] row=new Object[6];
public static DefaultTableModel model=new DefaultTableModel();
static Object[] columns={"Employee NO.","First Name","Last Name","Marital Status","Salary","Total Salary"};
public static HeapTreeNode levelWiseInsert(Employee[] h, HeapTreeNode root, int i){ //refresh
if(i<ha.counter){
HeapTreeNode tmp = new HeapTreeNode(h[i]);
root=tmp;
root.left=levelWiseInsert(h,root.left, 2*i);
root.right=levelWiseInsert(h,root.right, 2*i+1);
}
return root;
}
public static void preOrderTraverse(HeapTreeNode T){
if(T!=null){
addIntoTable(T.getItem());
preOrderTraverse(T.left);
preOrderTraverse(T.right);
} }
public static void inOrderTraverse(HeapTreeNode t){
if(t!=null){
inOrderTraverse(t.left);
addIntoTable(t.getItem());
inOrderTraverse(t.right);
} }
public static void postOrderTraverse(HeapTreeNode t){
if(t!=null){
postOrderTraverse(t.left);
postOrderTraverse(t.right);
addIntoTable(t.getItem());
} }
public static void preOrderTraversal(HeapTreeNode t){
Stack s = new Stack();
HeapTreeNode n;
s.push(t);
while(!s.isEmpty()){
n=(HeapTreeNode)s.pop();
if(n!=null){
addIntoTable(n.getItem());
s.push(n.right);
s.push(n.left);
}}
}
public static void levelOrderTraversal(HeapTreeNode t){
Queue q = new Queue();
HeapTreeNode n;
q.enqueue(t);
while(!q.isEmpty()){
n=(HeapTreeNode)q.dequeue();
if(n!=null){
addIntoTable(n.getItem());
q.enqueue(n.left);
q.enqueue(n.right);
}} }
public static void addIntoTable(Employee item){
model.setColumnIdentifiers(columns);
if(item!=null){
row[0]=item.getEmpNo();
row[1]=item.getFirstName();
row[2]=item.getLastName();
row[3]=item.getMaritalStatus();
row[4]=item.getBasicSalary();
row[5]=item.getTotalSalary();
model.addRow(row);
}
}
public static void clear(){
try{
model=new DefaultTableModel();
}catch(Exception ex){
}
}
public int size(){ return length; }
}
|
package com.google.api.ads.dfp.jaxws.v201306;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlType;
/**
*
* The action used for disapproving {@link Order} objects. All {@link LineItem}
* objects within the order will be disapproved as well.
*
*
* <p>Java class for DisapproveOrders complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="DisapproveOrders">
* <complexContent>
* <extension base="{https://www.google.com/apis/ads/publisher/v201306}OrderAction">
* <sequence>
* </sequence>
* </extension>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "DisapproveOrders")
public class DisapproveOrders
extends OrderAction
{
}
|
package com.semantalytics.stardog.kibble.geo.geohash;
import com.semantalytics.stardog.kibble.AbstractStardogTest;
import com.stardog.stark.Literal;
import com.stardog.stark.Value;
import com.stardog.stark.query.BindingSet;
import com.stardog.stark.query.SelectQueryResult;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class TestBottom extends AbstractStardogTest {
private final String sparqlPrefix = GeoHashVocabulary.sparqlPrefix("geohash");
@Test
public void testOneArg() {
final String aQuery = sparqlPrefix +
"select ?result where { bind(geohash:bottom(\"gbsuv7ztgzpt\") AS ?result) }";
try (final SelectQueryResult theResult = connection.select(aQuery).execute()) {
assertThat(theResult).hasNext();
final Value aValue = theResult.next().value("result").get();
assertThat(aValue).isInstanceOf(Literal.class);
final Literal aLiteral = (Literal)aValue;
assertThat(aLiteral.label()).isEqualTo("gbsuv7ztgzps");
assertThat(theResult).isExhausted();
}
}
@Test
public void testTooFewArgs() {
final String aQuery = sparqlPrefix +
"select ?result where { bind(geohash:bottom() as ?result) }";
try(final SelectQueryResult theResult = connection.select(aQuery).execute()) {
assertThat(theResult).hasNext();
final BindingSet aBindingSet = theResult.next();
assertThat(aBindingSet).withFailMessage("Should have no bindings").isEmpty();
assertThat(theResult).isExhausted();
}
}
@Test
public void testTooManyArgs() {
final String aQuery = sparqlPrefix +
"select ?result where { bind(geohash:bottom(\"one\", \"two\") as ?result) }";
try(final SelectQueryResult theResult = connection.select(aQuery).execute()) {
assertThat(theResult).hasNext();
final BindingSet aBindingSet = theResult.next();
assertThat(aBindingSet).withFailMessage("Should have no bindings").isEmpty();
assertThat(theResult).isExhausted();
}
}
@Test
public void testWrongTypeFirstArg() {
final String aQuery = sparqlPrefix +
"select ?result where { bind(geohash:bottom(1) as ?result) }";
try(final SelectQueryResult theResult = connection.select(aQuery).execute()) {
assertThat(theResult).hasNext();
final BindingSet aBindingSet = theResult.next();
assertThat(aBindingSet).withFailMessage("Should have no bindings").isEmpty();
assertThat(theResult).isExhausted();
}
}
}
|
package com.example.weekthree.controller.response;
import lombok.*;
@Setter
@Getter
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class MovieCreateResponse {
private Long id;
public static MovieCreateResponse convertToMovieResponse(Long id ){
return MovieCreateResponse.builder()
.id(id)
.build();
}
}
|
package org.traud.projecteuler;
import java.math.BigInteger;
/**
* Created by traud on 4/14/2017.
*/
public class P592_FactorialDigits2 {
public static BigInteger fac(long n) {
BigInteger f = BigInteger.ONE;
for (long k = 1; k <= n; ++k) {
f = f.multiply(BigInteger.valueOf(k));
}
return f;
}
public static BigInteger facMod(BigInteger n, long mod) {
// assume mod to be 2^k - 1 for some k
BigInteger f = BigInteger.ONE;
BigInteger FIFTEEN = BigInteger.valueOf(0xf);
BigInteger SIXTEEN = BigInteger.valueOf(16);
BigInteger m = BigInteger.valueOf(mod);
for (BigInteger k = BigInteger.ONE; k.compareTo(n) <= 0; k=k.add(BigInteger.ONE)) {
f = f.multiply(k);
while (f.and(FIFTEEN).equals(BigInteger.ZERO))
f = f.shiftRight(4);
f = f.and(m);
}
// while (f.mod(SIXTEEN).equals(BigInteger.ZERO))
// f = f.divide(SIXTEEN);
return f;
}
public static void main(String... args) {
// BigInteger f20 = fac(20);
// System.out.printf("%d! = %s (%s)", 20, f20, f20.toString(16));
// BigInteger fac2 = facMod(f20, 0xffff_ffff_ffffL);
// System.out.printf("%d! = %s\n", f20, fac2.toString(16).toUpperCase());
for (int N = 10; N < 25; ++N) {
BigInteger fac = fac(N);
BigInteger fac2 = facMod(BigInteger.valueOf(N), 0xffff_ffff_ffffL);
System.out.printf("%d! = %s (%s)\n", N, fac.toString(16), fac2.toString(16));
}
}
}
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.ipc;
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.BlockingRpcChannel;
import com.google.protobuf.Descriptors;
import com.google.protobuf.Message;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.codec.Codec;
import org.apache.hadoop.hbase.codec.KeyValueCodec;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.UserProvider;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.PoolMap;
import org.apache.hadoop.io.compress.CompressionCodec;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
/**
* Provides the basics for a RpcClient implementation like configuration and Logging.
*/
@InterfaceAudience.Private
public abstract class AbstractRpcClient implements RpcClient {
public static final Log LOG = LogFactory.getLog(AbstractRpcClient.class);
protected final Configuration conf;
protected String clusterId;
protected final SocketAddress localAddr;
protected UserProvider userProvider;
protected final IPCUtil ipcUtil;
protected final int minIdleTimeBeforeClose; // if the connection is idle for more than this
// time (in ms), it will be closed at any moment.
protected final int maxRetries; //the max. no. of retries for socket connections
protected final long failureSleep; // Time to sleep before retry on failure.
protected final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
protected final boolean tcpKeepAlive; // if T then use keepalives
protected final Codec codec;
protected final CompressionCodec compressor;
protected final boolean fallbackAllowed;
protected final int connectTO;
protected final int readTO;
protected final int writeTO;
/**
* Construct an IPC client for the cluster <code>clusterId</code>
*
* @param conf configuration
* @param clusterId the cluster id
* @param localAddr client socket bind address.
*/
public AbstractRpcClient(Configuration conf, String clusterId, SocketAddress localAddr) {
this.userProvider = UserProvider.instantiate(conf);
this.localAddr = localAddr;
this.tcpKeepAlive = conf.getBoolean("hbase.ipc.client.tcpkeepalive", true);
this.clusterId = clusterId != null ? clusterId : HConstants.CLUSTER_ID_DEFAULT;
this.failureSleep = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
this.maxRetries = conf.getInt("hbase.ipc.client.connect.max.retries", 0);
this.tcpNoDelay = conf.getBoolean("hbase.ipc.client.tcpnodelay", true);
this.ipcUtil = new IPCUtil(conf);
this.minIdleTimeBeforeClose = conf.getInt(IDLE_TIME, 120000); // 2 minutes
this.conf = conf;
this.codec = getCodec();
this.compressor = getCompressor(conf);
this.fallbackAllowed = conf.getBoolean(IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT);
this.connectTO = conf.getInt(SOCKET_TIMEOUT_CONNECT, DEFAULT_SOCKET_TIMEOUT_CONNECT);
this.readTO = conf.getInt(SOCKET_TIMEOUT_READ, DEFAULT_SOCKET_TIMEOUT_READ);
this.writeTO = conf.getInt(SOCKET_TIMEOUT_WRITE, DEFAULT_SOCKET_TIMEOUT_WRITE);
// login the server principal (if using secure Hadoop)
if (LOG.isDebugEnabled()) {
LOG.debug("Codec=" + this.codec + ", compressor=" + this.compressor +
", tcpKeepAlive=" + this.tcpKeepAlive +
", tcpNoDelay=" + this.tcpNoDelay +
", connectTO=" + this.connectTO +
", readTO=" + this.readTO +
", writeTO=" + this.writeTO +
", minIdleTimeBeforeClose=" + this.minIdleTimeBeforeClose +
", maxRetries=" + this.maxRetries +
", fallbackAllowed=" + this.fallbackAllowed +
", bind address=" + (this.localAddr != null ? this.localAddr : "null"));
}
}
@VisibleForTesting
public static String getDefaultCodec(final Configuration c) {
// If "hbase.client.default.rpc.codec" is empty string -- you can't set it to null because
// Configuration will complain -- then no default codec (and we'll pb everything). Else
// default is KeyValueCodec
return c.get(DEFAULT_CODEC_CLASS, KeyValueCodec.class.getCanonicalName());
}
/**
* Encapsulate the ugly casting and RuntimeException conversion in private method.
* @return Codec to use on this client.
*/
Codec getCodec() {
// For NO CODEC, "hbase.client.rpc.codec" must be configured with empty string AND
// "hbase.client.default.rpc.codec" also -- because default is to do cell block encoding.
String className = conf.get(HConstants.RPC_CODEC_CONF_KEY, getDefaultCodec(this.conf));
if (className == null || className.length() == 0) return null;
try {
return (Codec)Class.forName(className).newInstance();
} catch (Exception e) {
throw new RuntimeException("Failed getting codec " + className, e);
}
}
@Override
public boolean hasCellBlockSupport() {
return this.codec != null;
}
/**
* Encapsulate the ugly casting and RuntimeException conversion in private method.
* @param conf configuration
* @return The compressor to use on this client.
*/
private static CompressionCodec getCompressor(final Configuration conf) {
String className = conf.get("hbase.client.rpc.compressor", null);
if (className == null || className.isEmpty()) return null;
try {
return (CompressionCodec)Class.forName(className).newInstance();
} catch (Exception e) {
throw new RuntimeException("Failed getting compressor " + className, e);
}
}
/**
* Return the pool type specified in the configuration, which must be set to
* either {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#RoundRobin} or
* {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#ThreadLocal},
* otherwise default to the former.
*
* For applications with many user threads, use a small round-robin pool. For
* applications with few user threads, you may want to try using a
* thread-local pool. In any case, the number of {@link org.apache.hadoop.hbase.ipc.RpcClient}
* instances should not exceed the operating system's hard limit on the number of
* connections.
*
* @param config configuration
* @return either a {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#RoundRobin} or
* {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#ThreadLocal}
*/
protected static PoolMap.PoolType getPoolType(Configuration config) {
return PoolMap.PoolType
.valueOf(config.get(HConstants.HBASE_CLIENT_IPC_POOL_TYPE), PoolMap.PoolType.RoundRobin,
PoolMap.PoolType.ThreadLocal);
}
/**
* Return the pool size specified in the configuration, which is applicable only if
* the pool type is {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#RoundRobin}.
*
* @param config configuration
* @return the maximum pool size
*/
protected static int getPoolSize(Configuration config) {
return config.getInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, 1);
}
/**
* Make a blocking call. Throws exceptions if there are network problems or if the remote code
* threw an exception.
*
* @param ticket Be careful which ticket you pass. A new user will mean a new Connection.
* {@link UserProvider#getCurrent()} makes a new instance of User each time so
* will be a
* new Connection each time.
* @return A pair with the Message response and the Cell data (if any).
*/
Message callBlockingMethod(Descriptors.MethodDescriptor md, PayloadCarryingRpcController pcrc,
Message param, Message returnType, final User ticket, final InetSocketAddress isa)
throws ServiceException {
if (pcrc == null) {
pcrc = new PayloadCarryingRpcController();
}
long startTime = 0;
if (LOG.isTraceEnabled()) {
startTime = EnvironmentEdgeManager.currentTime();
}
Pair<Message, CellScanner> val;
try {
val = call(pcrc, md, param, returnType, ticket, isa);
// Shove the results into controller so can be carried across the proxy/pb service void.
pcrc.setCellScanner(val.getSecond());
if (LOG.isTraceEnabled()) {
long callTime = EnvironmentEdgeManager.currentTime() - startTime;
LOG.trace("Call: " + md.getName() + ", callTime: " + callTime + "ms");
}
return val.getFirst();
} catch (Throwable e) {
throw new ServiceException(e);
}
}
/**
* Make a call, passing <code>param</code>, to the IPC server running at
* <code>address</code> which is servicing the <code>protocol</code> protocol,
* with the <code>ticket</code> credentials, returning the value.
* Throws exceptions if there are network problems or if the remote code
* threw an exception.
*
* @param ticket Be careful which ticket you pass. A new user will mean a new Connection.
* {@link UserProvider#getCurrent()} makes a new instance of User each time so
* will be a
* new Connection each time.
* @return A pair with the Message response and the Cell data (if any).
* @throws InterruptedException
* @throws java.io.IOException
*/
protected abstract Pair<Message, CellScanner> call(PayloadCarryingRpcController pcrc,
Descriptors.MethodDescriptor md, Message param, Message returnType, User ticket,
InetSocketAddress isa) throws IOException, InterruptedException;
@Override
public BlockingRpcChannel createBlockingRpcChannel(final ServerName sn, final User ticket,
int defaultOperationTimeout) {
return new BlockingRpcChannelImplementation(this, sn, ticket, defaultOperationTimeout);
}
/**
* Blocking rpc channel that goes via hbase rpc.
*/
@VisibleForTesting
public static class BlockingRpcChannelImplementation implements BlockingRpcChannel {
private final InetSocketAddress isa;
private final AbstractRpcClient rpcClient;
private final User ticket;
private final int channelOperationTimeout;
/**
* @param channelOperationTimeout - the default timeout when no timeout is given
*/
protected BlockingRpcChannelImplementation(final AbstractRpcClient rpcClient,
final ServerName sn, final User ticket, int channelOperationTimeout) {
this.isa = new InetSocketAddress(sn.getHostname(), sn.getPort());
this.rpcClient = rpcClient;
this.ticket = ticket;
this.channelOperationTimeout = channelOperationTimeout;
}
@Override
public Message callBlockingMethod(Descriptors.MethodDescriptor md, RpcController controller,
Message param, Message returnType) throws ServiceException {
PayloadCarryingRpcController pcrc;
if (controller != null && controller instanceof PayloadCarryingRpcController) {
pcrc = (PayloadCarryingRpcController) controller;
if (!pcrc.hasCallTimeout()) {
pcrc.setCallTimeout(channelOperationTimeout);
}
} else {
pcrc = new PayloadCarryingRpcController();
pcrc.setCallTimeout(channelOperationTimeout);
}
return this.rpcClient.callBlockingMethod(md, pcrc, param, returnType, this.ticket, this.isa);
}
}
}
|
/**
* Copyright (c) 2016-2019 人人开源 All rights reserved.
* <p>
* https://www.renren.io
* <p>
* 版权所有,侵权必究!
*/
package io.renren.common.utils;
/**
* 常量
*
* @author Mark sunlightcs@gmail.com
*/
public class Constant {
/**
* 超级管理员ID
*/
public static final int SUPER_ADMIN = 1;
/**
* 当前页码
*/
public static final String PAGE = "page";
/**
* 每页显示记录数
*/
public static final String LIMIT = "limit";
/**
* 排序字段
*/
public static final String ORDER_FIELD = "sidx";
/**
* 排序方式
*/
public static final String ORDER = "order";
/**
* 升序
*/
public static final String ASC = "asc";
/**
* 菜单类型
*
* @author chenshun
* @email sunlightcs@gmail.com
* @date 2016年11月15日 下午1:24:29
*/
public enum MenuType {
/**
* 目录
*/
CATALOG(0),
/**
* 菜单
*/
MENU(1),
/**
* 按钮
*/
BUTTON(2);
private int value;
MenuType(int value) {
this.value = value;
}
public int getValue() {
return value;
}
}
/**
* 定时任务状态
*
* @author chenshun
* @email sunlightcs@gmail.com
* @date 2016年12月3日 上午12:07:22
*/
public enum ScheduleStatus {
/**
* 正常
*/
NORMAL(0),
/**
* 暂停
*/
PAUSE(1);
private int value;
ScheduleStatus(int value) {
this.value = value;
}
public int getValue() {
return value;
}
}
/**
* 云服务商
*/
public enum CloudService {
/**
* 七牛云
*/
QINIU(1),
/**
* 阿里云
*/
ALIYUN(2),
/**
* 腾讯云
*/
QCLOUD(3);
private int value;
CloudService(int value) {
this.value = value;
}
public int getValue() {
return value;
}
}
}
|
package com.playzone.kidszone.adaptors;
import android.app.Activity;
import android.app.AlertDialog;
import android.app.Dialog;
import android.content.DialogInterface;
import android.content.res.Configuration;
import android.net.Uri;
import android.os.Build;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.LinearLayout;
import android.widget.SeekBar;
import android.widget.TextView;
import com.playzone.kidszone.MainActivity;
import com.playzone.kidszone.Method;
import com.playzone.kidszone.Parent;
import com.playzone.kidszone.R;
import com.playzone.kidszone.SET_USER_DETAILS;
import com.playzone.kidszone.Swipe_home;
import com.playzone.kidszone.fragmentpackage.Educational_Goals;
import com.playzone.kidszone.fragmentpackage.List_Installed_frag;
import com.playzone.kidszone.fragmentpackage.Parent_control;
import com.playzone.kidszone.fragmentpackage.Web_settings;
import com.playzone.kidszone.fragmentpackage.statistics;
import com.playzone.kidszone.fragmentpackage.time_slot_home;
import com.playzone.kidszone.models.ChildModel;
import java.io.File;
import java.util.List;
import androidx.annotation.NonNull;
import androidx.appcompat.widget.AppCompatSeekBar;
import androidx.fragment.app.Fragment;
import androidx.fragment.app.FragmentActivity;
import androidx.fragment.app.FragmentTransaction;
import androidx.recyclerview.widget.RecyclerView;
public class ChooseChildAdapter extends RecyclerView.Adapter<ChooseChildAdapter.ViewHolder> {
private Activity mContext;
Swipe_home main;
private String type;
String paid_data = null;
String paid_data_name = null;
private int lastPosition = -1;
private int index = 0;
Dialog dialog;
List<ChildModel> mItems;
private ViewGroup parent;
private int viewType;
View view;
public int Volume_value=0;
Method method;
public ChooseChildAdapter(Activity con, List<ChildModel> list) {
this.mContext = con;
this.type = type;
mItems = list;
method = new Method(con);
}
@NonNull
@Override
public ViewHolder onCreateViewHolder(ViewGroup parent, int viewType) {
this.parent = parent;
this.viewType = viewType;
view = null;
switch (MainActivity.screenSize) {
case Configuration.SCREENLAYOUT_SIZE_LARGE:
//toastMsg = true;
view = LayoutInflater.from(mContext).inflate(R.layout.myview6, parent, false);
break;
case Configuration.SCREENLAYOUT_SIZE_NORMAL:
// toastMsg = false;
view = LayoutInflater.from(mContext).inflate(R.layout.myview6, parent, false);
break;
case Configuration.SCREENLAYOUT_SIZE_SMALL:
// toastMsg = false;
view = LayoutInflater.from(mContext).inflate(R.layout.myview6, parent, false);
break;
default:
view = LayoutInflater.from(mContext).inflate(R.layout.myview6, parent, false);
// toastMsg = true;
}
return new ViewHolder(view);
}
@Override
public void onBindViewHolder(ViewHolder holder, final int position ) {
holder.name.setText(
mItems.get(position).getName());
// holder.image.setImageURI(mItems.get(position).getIcon());
holder.image.setImageURI(Uri.parse(new File(mItems.get(position).getIcon()).toString()));
// Picasso.get().load(mItems.get(position).getIcon()).into(holder.image);
if( mItems.get(position).getIcon().equals("")){
holder.image.setImageURI(mItems.get(position).geticon2());
}
else {
holder.image.setImageURI(Uri.parse(new File(mItems.get(position).getIcon()).toString()));
}
holder.id=mItems.get(position).getKid_id();
holder.position=position;
holder.card.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Parent.kid_id=holder.id;
Parent.kid_name=holder.name.getText().toString();
Parent_control.alert.dismiss();
if(Parent_control.choice.equalsIgnoreCase("apps")) {
FragmentTransaction transaction = ((FragmentActivity) v.getContext()).getSupportFragmentManager()
.beginTransaction();
transaction.setCustomAnimations(R.anim.enter_from_left, R.anim.exit_to_right);
transaction.replace(R.id.fragmain, new List_Installed_frag());
// transaction.addToBackStack(null);
transaction.commit();
}
else if(Parent_control.choice.equalsIgnoreCase("age_restriction")) {
setAgeRestriction(holder.position);
}
else if(Parent_control.choice.equalsIgnoreCase("web_settings")){
FragmentTransaction transaction = ((FragmentActivity) v.getContext()).getSupportFragmentManager().beginTransaction();
transaction .setCustomAnimations(R.anim.enter_from_right, R.anim.exit_to_left);
transaction.replace(R.id.fragmain,new Web_settings());
// transaction.addToBackStack(null);
transaction.commit();
}
else if(Parent_control.choice.equalsIgnoreCase("time")){
FragmentTransaction transaction = ((FragmentActivity) v.getContext()).getSupportFragmentManager().beginTransaction();
transaction .setCustomAnimations(R.anim.enter_from_right, R.anim.exit_to_left);
transaction.replace(R.id.fragmain,new time_slot_home());
// transaction.addToBackStack(null);
transaction.commit();
}
else if(Parent_control.choice.equalsIgnoreCase("statistics")){
FragmentTransaction transaction = ((FragmentActivity) v.getContext()).getSupportFragmentManager().beginTransaction();
transaction .setCustomAnimations(R.anim.enter_from_right, R.anim.exit_to_left);
transaction.replace(R.id.fragmain, new statistics());
// transaction.addToBackStack(null);
transaction.commit();
}
else if(Parent_control.choice.equalsIgnoreCase("goal")){
MainActivity main= new MainActivity();
main.user_alloted_time(Parent.kid_id);
FragmentTransaction transaction = ((FragmentActivity) v.getContext()).getSupportFragmentManager().beginTransaction();
transaction.setCustomAnimations(R.anim.enter_from_right, R.anim.exit_to_left);
transaction.replace(R.id.fragmain, new Educational_Goals());
// transaction.addToBackStack(null);
transaction.commit();
}
}
}
);
}
@Override
public int getItemCount() {
return mItems.size();
}
public class ViewHolder extends RecyclerView.ViewHolder {
TextView name, pack;
ImageView image, image_done, lockstatus;
LinearLayout ll;
LinearLayout card;
TextView duration,package_name,expire, status, expiretext;
String id;
int position;
public ViewHolder(View v) {
super(v);
// Toast.makeText(mContext,"view holder", Toast.LENGTH_LONG).show();
name = (TextView) v.findViewById(R.id.childname);
//ll=(LinearLayout) v.findViewById(R.id.ll);
card = (LinearLayout) v.findViewById(R.id.cardView);
image=(ImageView) v.findViewById(R.id.image);
// Toast.makeText(mContext,"view holder", Toast.LENGTH_LONG).show();
}
}
public void setAgeRestriction(int pos) {
try {
final AlertDialog.Builder alertDialogBuilder = new AlertDialog.Builder(mContext);
// alertDialogBuilder.setCancelable(false);
alertDialogBuilder.setIcon(R.drawable.done);
LayoutInflater inflater = mContext.getLayoutInflater();
final View dialog = inflater.inflate(R.layout.set_age_restriction, null);
if (Build.VERSION.SDK_INT >= 21) {
alertDialogBuilder.setView(dialog);
final AlertDialog alert = alertDialogBuilder.create();
((TextView) dialog.findViewById(R.id.itemname)).setText("Set Age Restriction");
AppCompatSeekBar seekbar = null;
seekbar = dialog.findViewById(R.id.seek);
seekbar.setMax(17);
if(Parent.childModelList.get(pos).getContent_Restriction().equals(null) ||
Parent.childModelList.get(pos).getContent_Restriction().equals("")) {
// seekbar.setProgress(Integer.parseInt(Parent.childModelList.get(pos).getContent_Restriction()));
}
else{
seekbar.setProgress(Integer.parseInt(Parent.childModelList.get(pos).getContent_Restriction()));
((TextView) dialog.findViewById(R.id.itemname)).append("\nAge 0 - " +
Parent.childModelList.get(pos).getContent_Restriction());
}
seekbar.setOnSeekBarChangeListener(new SeekBar.OnSeekBarChangeListener() {
@Override
public void onProgressChanged(SeekBar seekBar, int newVolume, boolean b) {
((TextView) dialog.findViewById(R.id.itemname)).setText("Age 0 - " + newVolume);
Volume_value = newVolume;
// audioManager.setStreamVolume(AudioManager.STREAM_MUSIC, newVolume, 0);
}
@Override
public void onStartTrackingTouch(SeekBar seekBar) {
}
@Override
public void onStopTrackingTouch(SeekBar seekBar) {
}
});
((Button) dialog.findViewById(R.id.ve)).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
if (Volume_value == 0) {
Parent.childModelList.get(pos).setContent_Restriction(Volume_value + "");
Parent.dbhelper.UpdateChildContent_Restriction(Volume_value + "", Parent.kid_id);
if (method.isNetworkAvailable()) {
SET_USER_DETAILS set = new SET_USER_DETAILS();
set.updateContentRestriction(Parent.pemail, Parent.kid_id,
String.valueOf(Volume_value),"0",
new Fragment(), mContext);
}
} else {
Parent.childModelList.get(pos).setContent_Restriction(Volume_value + "");
Parent.dbhelper.UpdateChildContent_Restriction(Volume_value + "", Parent.kid_id);
// g.settings_status.setText("Age 0 - " + Volume_value);
((TextView) dialog.findViewById(R.id.itemname)).setText("Age 0 - " + Volume_value);
if (method.isNetworkAvailable()) {
SET_USER_DETAILS set = new SET_USER_DETAILS();
set.updateContentRestriction(Parent.pemail, Parent.kid_id,
String.valueOf(Volume_value),"0",
new Fragment(), mContext);
}
}
// listStorage.get(0).setlock_volume_botton(true);
//Parent.settingsList.get(0).setlock_volume_botton(true);
// Parent.volume_lock= Parent.settingsList.get(0).getlock_volume_botton();
method.alertBox("Age Restriction saved", 2);
alert.dismiss();
}
});
((Button) dialog.findViewById(R.id.close)).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
alert.dismiss();
return;
}
});
alert.show();
} else {
alertDialogBuilder.setMessage("Set Age Restriction");
alertDialogBuilder.setTitle("Content Restriction Settings");
alertDialogBuilder.setPositiveButton("Close", new DialogInterface.OnClickListener() {
public void onClick(DialogInterface arg0, int arg1) {
arg0.dismiss();
}
});
alertDialogBuilder.setNegativeButton("SAVE", new DialogInterface.OnClickListener() {
public void onClick(DialogInterface arg0, int arg1) {
arg0.dismiss();
}
});
alertDialogBuilder.create().show();
}
} catch (Exception e) {
}
}
}
|
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.example.android.samplesync.platform;
import android.content.ContentProviderOperation;
import android.content.ContentProviderResult;
import android.content.ContentResolver;
import android.content.Context;
import android.content.OperationApplicationException;
import android.net.Uri;
import android.os.RemoteException;
import android.provider.ContactsContract;
import android.util.Log;
import java.util.ArrayList;
import java.util.List;
/**
* This class handles execution of batch mOperations on Contacts provider.
*/
final public class BatchOperation {
private final String TAG = "BatchOperation";
private final ContentResolver mResolver;
// List for storing the batch mOperations
private final ArrayList<ContentProviderOperation> mOperations;
public BatchOperation(Context context, ContentResolver resolver) {
mResolver = resolver;
mOperations = new ArrayList<ContentProviderOperation>();
}
public int size() {
return mOperations.size();
}
public void add(ContentProviderOperation cpo) {
mOperations.add(cpo);
}
public List<Uri> execute() {
List<Uri> resultUris = new ArrayList<Uri>();
if (mOperations.size() == 0) {
return resultUris;
}
// Apply the mOperations to the content provider
try {
ContentProviderResult[] results = mResolver.applyBatch(ContactsContract.AUTHORITY,
mOperations);
if ((results != null) && (results.length > 0)){
for (int i = 0; i < results.length; i++){
resultUris.add(results[i].uri);
}
}
} catch (final OperationApplicationException e1) {
Log.e(TAG, "storing contact data failed", e1);
} catch (final RemoteException e2) {
Log.e(TAG, "storing contact data failed", e2);
}
mOperations.clear();
return resultUris;
}
}
|
package curso.modelo;
public class Cliente {
private Long codigo;
private String nome;
public Long getCodigo() {
return codigo;
}
public void setCodigo(Long codigo) {
this.codigo = codigo;
}
public String getNome() {
return nome;
}
public void setNome(String nome) {
this.nome = nome;
}
}
|
package com.xiaojukeji.kafka.manager.service.service;
import com.xiaojukeji.kafka.manager.common.bizenum.ModuleEnum;
import com.xiaojukeji.kafka.manager.common.bizenum.OperateEnum;
import com.xiaojukeji.kafka.manager.common.entity.dto.rd.OperateRecordDTO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.OperateRecordDO;
import java.util.List;
import java.util.Map;
/**
* @author zhongyuankai
* @date 2020/09/03
*/
public interface OperateRecordService {
int insert(OperateRecordDO operateRecordDO);
int insert(String operator, ModuleEnum module, String resourceName, OperateEnum operate, Map<String, String> content);
List<OperateRecordDO> queryByCondition(OperateRecordDTO dto);
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package brooklyn.basic.internal;
import brooklyn.basic.internal.ApiObjectsFactoryInterface;
import brooklyn.entity.Entity;
import brooklyn.entity.basic.BrooklynTaskTags;
import brooklyn.management.Task;
import brooklyn.util.task.Tasks;
public class ApiObjectsFactoryImpl implements ApiObjectsFactoryInterface {
@Override
public String getCatalogItemIdFromContext() {
Task<?> currentTask = Tasks.current();
if (currentTask != null) {
Entity contextEntity = BrooklynTaskTags.getContextEntity(currentTask);
if (contextEntity != null) {
return contextEntity.getCatalogItemId();
}
}
return null;
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sling.testing.mock.sling.loader;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.List;
import org.apache.jackrabbit.vault.util.JcrConstants;
import org.apache.sling.api.resource.Resource;
import org.apache.sling.testing.mock.sling.ResourceResolverType;
import org.apache.sling.testing.mock.sling.junit.SlingContext;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import com.google.common.collect.ImmutableList;
@SuppressWarnings("null")
public abstract class AbstractContentLoaderFolderJsonTest {
@Rule
public SlingContext context = new SlingContext(getResourceResolverType());
protected abstract ResourceResolverType getResourceResolverType();
@Before
public void setUp() {
context.load().folderJson("src/test/resources/json-import-samples", "/mount");
context.load().folderJson("src/test/resources/json-import-samples/apps", "/apps");
}
@After
public final void tearDown() throws Exception {
// make sure all changes from ContentLoader are committed
assertFalse(context.resourceResolver().hasChanges());
}
@Test
public void testContentResourceType() {
Resource resource = context.resourceResolver().getResource("/mount/content/jcr:content");
assertEquals("sample/components/homepage", resource.getResourceType());
}
@Test
public void testContentListChildren() {
Resource resource = context.resourceResolver().getResource("/mount/content");
List<Resource> result = ImmutableList.copyOf(resource.listChildren());
assertEquals("jcr:content", result.get(0).getName());
assertEquals("toolbar", result.get(1).getName());
}
@Test
public void testDamResourceType() {
Resource resource = context.resourceResolver().getResource("/mount/dam/portraits/scott_reynolds.jpg");
assertEquals("dam:Asset", resource.getResourceType());
}
@Test
public void testBinaryResource() throws IOException {
Resource fileResource = context.resourceResolver().getResource("/mount/binary/sample-image.gif");
AbstractContentLoaderBinaryTest.assertSampleImageFileSize(fileResource);
}
@Test
public void testAppsResource() {
Resource resource = context.resourceResolver().getResource("/apps/app1/components/comp1");
assertNotNull(resource);
assertEquals("Component #1", resource.getValueMap().get(JcrConstants.JCR_TITLE, String.class));
}
@Test
public void testAppsResource_SearchPath() {
Resource resource = context.resourceResolver().getResource("app1/components/comp1");
assertNotNull(resource);
assertEquals("Component #1", resource.getValueMap().get(JcrConstants.JCR_TITLE, String.class));
}
@Test
public void testAppsResource_ParentResourceType() {
Resource resource = context.resourceResolver().getResource("/mount/content/jcr:content/comp1-resource");
assertNotNull(resource);
assertEquals("app1/components/base", context.resourceResolver().getParentResourceType(resource));
assertTrue(context.resourceResolver().isResourceType(resource, "app1/components/comp1"));
assertTrue(context.resourceResolver().isResourceType(resource, "/apps/app1/components/comp1"));
assertTrue(context.resourceResolver().isResourceType(resource, "app1/components/base"));
assertTrue(context.resourceResolver().isResourceType(resource, "core/components/superResource"));
}
}
|
package io.xunyss.commons.exec;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
/**
*
* @author XUNYSS
*/
public final class Environment {
/**
* Current process environment variables.
*/
private static final Environment currentEnvironment = new Environment(true);
/**
* System environment variables.
*/
private Map<String, String> environmentMap = new HashMap<>();
/**
* Constructor.
*
* @param inherit whether to inherit current process environment variables (default: false)
*/
public Environment(boolean inherit) {
if (inherit) {
environmentMap.putAll(System.getenv());
}
}
/**
* Constructor.
*/
public Environment() {
this(false);
}
/**
* Get environment variable.
*
* @param key key
* @return value
*/
public String get(String key) {
return environmentMap.get(key);
}
/**
* Put environment variable.
*
* @param key key
* @param value value
* @return the previous value
*/
public String put(String key, String value) {
return environmentMap.put(key, value);
}
/**
* Remove environment variable.
*
* @param key key
* @return the previous value
*/
public String remove(String key) {
return environmentMap.remove(key);
}
/**
* Get the variable list as an array.
*
* @return array of key=value assignment strings
*/
public String[] toStrings() {
if (/* environmentMap == null || */ environmentMap.isEmpty()) {
// return ArrayUtils.EMPTY_STRING_ARRAY;
return null;
}
final String[] result = new String[environmentMap.size()];
int i = 0;
for (final Entry<String, String> entry : environmentMap.entrySet()) {
final String key = entry.getKey() == null ? "" : entry.getKey()/*.toString()*/;
final String value = entry.getValue() == null ? "" : entry.getValue()/*.toString()*/;
result[i] = key + "=" + value;
i++;
}
return result;
}
@Override
public String toString() {
return environmentMap.toString();
}
//----------------------------------------------------------------------------------------------
/**
* Returns current process environment.
*
* @return current process environment
*/
public static Environment currentProcessEnvironment() {
return currentEnvironment;
}
}
|
package com.larissa.Aula54;
public class Formulario {
enum Genero{
FEMININO('F'), MASCULINO('M');
private char valor;
Genero(char valor){
this.valor = valor;
}
}
private String nome;
private Genero genero;
}
|
package challenges.utilitiesTest;
import challenges.utilities.AnimalShelter;
import challenges.utilities.Cat;
import challenges.utilities.Dog;
import org.junit.Test;
import static org.junit.Assert.*;
public class AnimalShelterTest {
@Test public void newAnimalShelterTest(){
AnimalShelter animalShelter = new AnimalShelter();
assertNotNull("new shelter should exist", animalShelter);
assertNull("shelter should be empty", animalShelter.front);
assertNull("shelter should be empty", animalShelter.back);
}
@Test public void enQTest(){
AnimalShelter animalShelter = new AnimalShelter();
animalShelter.enqueue(new Dog("Ashton"));
assertEquals("Ashton should be set to the front", "{Ashton dog} -> NULL", animalShelter.toString());
animalShelter.enqueue(new Cat("Avery"));
assertEquals("Avery should be set to the back", "{Ashton dog} -> {Avery cat} -> NULL", animalShelter.toString());
animalShelter.enqueue(new Dog("Augustus"));
assertEquals("Augustus should now be set to the back", "{Ashton dog} -> {Avery cat} -> {Augustus dog} -> NULL", animalShelter.toString());
}
@Test public void deQTest(){
AnimalShelter animalShelter = new AnimalShelter();
assertNull("cannot dequeue from empty shelter", animalShelter.deQueue("dog"));
Dog ashton = new Dog("Ashton");
animalShelter.enqueue(ashton);
assertNull("cannot dequeue cat from shelter with only dogs", animalShelter.deQueue("cat"));
assertEquals("should return Ashton", ashton, animalShelter.deQueue("dog"));
assertEquals("shelter should not have any pets", "NULL", animalShelter.toString());
animalShelter.enqueue(ashton);
Cat avery = new Cat("Avery");
Dog augustus = new Dog("Augustus");
animalShelter.enqueue(avery);
animalShelter.enqueue(augustus);
assertEquals("shelter should have three pets", "{Ashton dog} -> {Avery cat} -> {Augustus dog} -> NULL", animalShelter.toString());
assertEquals("should return Avery", avery, animalShelter.deQueue("cat"));
assertEquals("shelter should not have any cats", "{Ashton dog} -> {Augustus dog} -> NULL", animalShelter.toString());
AnimalShelter animalShelter2 = new AnimalShelter();
animalShelter2.enqueue(ashton);
animalShelter2.enqueue(avery);
animalShelter2.enqueue(augustus);
assertNull("can only return a cat or a dog", animalShelter.deQueue("panda"));
assertEquals("should return first dog", ashton, animalShelter2.deQueue("dog"));
assertEquals("new front should be Avery", "{Avery cat} -> {Augustus dog} -> NULL", animalShelter2.toString());
}
}
|
package java.sql;
/*
* #%L
* Matos
* $Id:$
* $HeadURL:$
* %%
* Copyright (C) 2010 - 2014 Orange SA
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
@com.francetelecom.rd.stubs.annotation.Real(value = "com.francetelecom.rd.fakeandroid.SqlNClobImpl", superClass = "")
public interface NClob
extends Clob
{
}
|
/*
* Copyright 2016 Goldman Sachs.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.gs.dmn.runtime;
import com.gs.dmn.runtime.annotation.DRGElement;
import com.gs.dmn.runtime.annotation.Rule;
import org.junit.Test;
import static org.junit.Assert.assertNull;
public class DefaultDMNBaseDecisionTest {
private final DefaultDMNBaseDecision baseDecision = new DefaultDMNBaseDecision();
@Test
public void testGetDRGElementAnnotation() {
DRGElement drgElementAnnotation = this.baseDecision.getDRGElementAnnotation();
assertNull(drgElementAnnotation);
}
@Test
public void testGetRuleAnnotation() {
Rule ruleAnnotation = this.baseDecision.getRuleAnnotation(0);
assertNull(ruleAnnotation);
}
}
|
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.amplifybackend.model.transform;
import java.math.*;
import javax.annotation.Generated;
import com.amazonaws.services.amplifybackend.model.*;
import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*;
import com.amazonaws.transform.*;
import com.fasterxml.jackson.core.JsonToken;
import static com.fasterxml.jackson.core.JsonToken.*;
/**
* CreateBackendAuthResult JSON Unmarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class CreateBackendAuthResultJsonUnmarshaller implements Unmarshaller<CreateBackendAuthResult, JsonUnmarshallerContext> {
public CreateBackendAuthResult unmarshall(JsonUnmarshallerContext context) throws Exception {
CreateBackendAuthResult createBackendAuthResult = new CreateBackendAuthResult();
int originalDepth = context.getCurrentDepth();
String currentParentElement = context.getCurrentParentElement();
int targetDepth = originalDepth + 1;
JsonToken token = context.getCurrentToken();
if (token == null)
token = context.nextToken();
if (token == VALUE_NULL) {
return createBackendAuthResult;
}
while (true) {
if (token == null)
break;
if (token == FIELD_NAME || token == START_OBJECT) {
if (context.testExpression("appId", targetDepth)) {
context.nextToken();
createBackendAuthResult.setAppId(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("backendEnvironmentName", targetDepth)) {
context.nextToken();
createBackendAuthResult.setBackendEnvironmentName(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("error", targetDepth)) {
context.nextToken();
createBackendAuthResult.setError(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("jobId", targetDepth)) {
context.nextToken();
createBackendAuthResult.setJobId(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("operation", targetDepth)) {
context.nextToken();
createBackendAuthResult.setOperation(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("status", targetDepth)) {
context.nextToken();
createBackendAuthResult.setStatus(context.getUnmarshaller(String.class).unmarshall(context));
}
} else if (token == END_ARRAY || token == END_OBJECT) {
if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) {
if (context.getCurrentDepth() <= originalDepth)
break;
}
}
token = context.nextToken();
}
return createBackendAuthResult;
}
private static CreateBackendAuthResultJsonUnmarshaller instance;
public static CreateBackendAuthResultJsonUnmarshaller getInstance() {
if (instance == null)
instance = new CreateBackendAuthResultJsonUnmarshaller();
return instance;
}
}
|
package com.linkedin.android.learning_android_accessibility.activities;
import android.content.Context;
import android.content.Intent;
import android.support.annotation.IdRes;
import android.support.v4.app.FragmentManager;
import android.support.v4.app.FragmentTransaction;
import android.os.Bundle;
import android.view.View;
import com.linkedin.android.learning_android_accessibility.R;
import com.linkedin.android.learning_android_accessibility.fragments.DetailFragment;
import com.linkedin.android.learning_android_accessibility.fragments.ListFragment;
public class SingleActivity extends BaseActivity implements ListFragment.ItemClickListener {
public static Intent newIntent(Context context) {
return new Intent(context, SingleActivity.class);
}
@IdRes
private static final int mFragmentContainerId = R.id.single_fragment_container;
@Override
protected int getLayoutResId() {
return R.layout.activity_single;
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
// manager
FragmentManager fragmentManager = getSupportFragmentManager();
// the fragment
ListFragment fragment = ListFragment.newInstance();
FragmentTransaction fragmentTransaction = fragmentManager.beginTransaction();
fragmentTransaction.add(mFragmentContainerId, fragment);
fragmentTransaction.commit();
}
@Override
public void onListItemClicked(View view, int position) {
DetailFragment detailFragment = DetailFragment.newInstance();
getSupportFragmentManager()
.beginTransaction()
.setCustomAnimations(R.anim.slide_in_left, R.anim.slide_out_left)
.add(mFragmentContainerId, detailFragment)
.addToBackStack(null)
.commit();
}
}
|
// ------------------------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
// ------------------------------------------------------------------------------
package com.microsoft.graph.extensions;
import com.microsoft.graph.concurrency.*;
import com.microsoft.graph.core.*;
import com.microsoft.graph.extensions.*;
import com.microsoft.graph.http.*;
import com.microsoft.graph.generated.*;
import com.microsoft.graph.options.*;
import com.microsoft.graph.serializer.*;
import java.util.Arrays;
import java.util.EnumSet;
// This file is available for extending, afterwards please submit a pull request.
/**
* The class for the Workbook Functions Base Request Builder.
*/
public class WorkbookFunctionsBaseRequestBuilder extends BaseWorkbookFunctionsBaseRequestBuilder implements IWorkbookFunctionsBaseRequestBuilder {
/**
* The request builder for this WorkbookFunctionsBase
*
* @param requestUrl The request url
* @param client The service client
* @param requestOptions The options for this request
*/
public WorkbookFunctionsBaseRequestBuilder(final String requestUrl, final IBaseClient client, final java.util.List<Option> requestOptions, final com.google.gson.JsonElement number, final com.google.gson.JsonElement radix, final com.google.gson.JsonElement minLength) {
super(requestUrl, client, requestOptions, number, radix, minLength);
}
}
|
package com.wemirr.framework.db.mybatis;
import com.baomidou.mybatisplus.extension.service.IService;
/**
* @author battcn
*/
public interface SuperService<T> extends IService<T> {
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.mihanjk.nopollenapp.di.components;
import com.mihanjk.nopollenapp.data.services.NotificationService;
import com.mihanjk.nopollenapp.di.modules.UserModule;
import com.mihanjk.nopollenapp.di.scope.UserScope;
import com.mihanjk.nopollenapp.presentation.forecast.ForecastPresenter;
import com.mihanjk.nopollenapp.presentation.main.MainPresenter;
import com.mihanjk.nopollenapp.presentation.message.MessagePresenter;
import com.mihanjk.nopollenapp.presentation.settings.SettingsPresenter;
import dagger.Subcomponent;
@UserScope
@Subcomponent(modules = {UserModule.class})
public interface UserComponent {
void inject(SettingsPresenter settingsPresenter);
void inject(MessagePresenter messagePresenter);
void inject(MainPresenter mainPresenter);
void inject(ForecastPresenter forecastPresenter);
void inject(NotificationService notificationService);
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.cache;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Lock;
import javax.cache.Cache;
import javax.cache.CacheException;
import javax.cache.expiry.Duration;
import javax.cache.expiry.ExpiryPolicy;
import javax.cache.expiry.TouchedExpiryPolicy;
import javax.cache.processor.EntryProcessor;
import javax.cache.processor.EntryProcessorException;
import javax.cache.processor.EntryProcessorResult;
import javax.cache.processor.MutableEntry;
import junit.framework.AssertionFailedError;
import org.apache.ignite.Ignite;
import org.apache.ignite.IgniteCache;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.IgniteEvents;
import org.apache.ignite.IgniteException;
import org.apache.ignite.IgniteLogger;
import org.apache.ignite.IgniteTransactions;
import org.apache.ignite.cache.CacheEntry;
import org.apache.ignite.cache.CacheEntryProcessor;
import org.apache.ignite.cache.CachePeekMode;
import org.apache.ignite.cache.affinity.Affinity;
import org.apache.ignite.cache.query.QueryCursor;
import org.apache.ignite.cache.query.ScanQuery;
import org.apache.ignite.cluster.ClusterGroup;
import org.apache.ignite.cluster.ClusterNode;
import org.apache.ignite.configuration.CacheConfiguration;
import org.apache.ignite.configuration.IgniteConfiguration;
import org.apache.ignite.events.CacheEvent;
import org.apache.ignite.events.Event;
import org.apache.ignite.events.EventType;
import org.apache.ignite.internal.IgniteEx;
import org.apache.ignite.internal.IgniteKernal;
import org.apache.ignite.internal.IgnitionEx;
import org.apache.ignite.internal.processors.cache.query.GridCacheQueryManager;
import org.apache.ignite.internal.processors.resource.GridSpringResourceContext;
import org.apache.ignite.internal.util.future.GridFutureAdapter;
import org.apache.ignite.internal.util.lang.GridAbsPredicate;
import org.apache.ignite.internal.util.lang.GridAbsPredicateX;
import org.apache.ignite.internal.util.lang.IgnitePair;
import org.apache.ignite.internal.util.typedef.CIX1;
import org.apache.ignite.internal.util.typedef.F;
import org.apache.ignite.internal.util.typedef.PA;
import org.apache.ignite.internal.util.typedef.internal.A;
import org.apache.ignite.internal.util.typedef.internal.CU;
import org.apache.ignite.internal.util.typedef.internal.U;
import org.apache.ignite.lang.IgniteBiPredicate;
import org.apache.ignite.lang.IgniteClosure;
import org.apache.ignite.lang.IgniteFuture;
import org.apache.ignite.lang.IgnitePredicate;
import org.apache.ignite.resources.CacheNameResource;
import org.apache.ignite.resources.IgniteInstanceResource;
import org.apache.ignite.resources.LoggerResource;
import org.apache.ignite.resources.ServiceResource;
import org.apache.ignite.services.Service;
import org.apache.ignite.services.ServiceContext;
import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
import org.apache.ignite.testframework.GridTestUtils;
import org.apache.ignite.transactions.Transaction;
import org.apache.ignite.transactions.TransactionConcurrency;
import org.apache.ignite.transactions.TransactionIsolation;
import org.jetbrains.annotations.Nullable;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC;
import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
import static org.apache.ignite.cache.CacheMode.LOCAL;
import static org.apache.ignite.cache.CacheMode.PARTITIONED;
import static org.apache.ignite.cache.CacheMode.REPLICATED;
import static org.apache.ignite.cache.CachePeekMode.ALL;
import static org.apache.ignite.cache.CachePeekMode.OFFHEAP;
import static org.apache.ignite.cache.CachePeekMode.ONHEAP;
import static org.apache.ignite.cache.CachePeekMode.PRIMARY;
import static org.apache.ignite.events.EventType.EVT_CACHE_OBJECT_LOCKED;
import static org.apache.ignite.events.EventType.EVT_CACHE_OBJECT_SWAPPED;
import static org.apache.ignite.events.EventType.EVT_CACHE_OBJECT_UNLOCKED;
import static org.apache.ignite.events.EventType.EVT_CACHE_OBJECT_UNSWAPPED;
import static org.apache.ignite.testframework.GridTestUtils.assertThrows;
import static org.apache.ignite.testframework.GridTestUtils.waitForCondition;
import static org.apache.ignite.transactions.TransactionConcurrency.OPTIMISTIC;
import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
import static org.apache.ignite.transactions.TransactionIsolation.READ_COMMITTED;
import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
import static org.apache.ignite.transactions.TransactionIsolation.SERIALIZABLE;
import static org.apache.ignite.transactions.TransactionState.COMMITTED;
/**
* Full API cache test.
*/
@SuppressWarnings("TransientFieldInNonSerializableClass")
public abstract class GridCacheAbstractFullApiSelfTest extends GridCacheAbstractSelfTest {
/** Test timeout */
private static final long TEST_TIMEOUT = 60 * 1000;
/** Service name. */
private static final String SERVICE_NAME1 = "testService1";
/** */
public static final CacheEntryProcessor<String, Integer, String> ERR_PROCESSOR =
new CacheEntryProcessor<String, Integer, String>() {
/** */
private static final long serialVersionUID = 0L;
@Override public String process(MutableEntry<String, Integer> e, Object... args) {
throw new RuntimeException("Failed!");
}
};
/** Increment processor for invoke operations. */
public static final EntryProcessor<String, Integer, String> INCR_PROCESSOR = new IncrementEntryProcessor();
/** Increment processor for invoke operations with IgniteEntryProcessor. */
public static final CacheEntryProcessor<String, Integer, String> INCR_IGNITE_PROCESSOR =
new CacheEntryProcessor<String, Integer, String>() {
/** */
private static final long serialVersionUID = 0L;
@Override public String process(MutableEntry<String, Integer> e, Object... args) {
return INCR_PROCESSOR.process(e, args);
}
};
/** Increment processor for invoke operations. */
public static final EntryProcessor<String, Integer, String> RMV_PROCESSOR = new RemoveEntryProcessor();
/** Increment processor for invoke operations with IgniteEntryProcessor. */
public static final CacheEntryProcessor<String, Integer, String> RMV_IGNITE_PROCESSOR =
new CacheEntryProcessor<String, Integer, String>() {
/** */
private static final long serialVersionUID = 0L;
@Override public String process(MutableEntry<String, Integer> e, Object... args) {
return RMV_PROCESSOR.process(e, args);
}
};
/** Dflt grid. */
protected transient Ignite dfltIgnite;
/** */
private Map<String, CacheConfiguration[]> cacheCfgMap;
/** {@inheritDoc} */
@Override protected long getTestTimeout() {
return TEST_TIMEOUT;
}
/** {@inheritDoc} */
@Override protected int gridCount() {
return 1;
}
/** {@inheritDoc} */
@Override protected boolean swapEnabled() {
return true;
}
/** {@inheritDoc} */
@Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
((TcpCommunicationSpi)cfg.getCommunicationSpi()).setSharedMemoryPort(-1);
((TcpDiscoverySpi)cfg.getDiscoverySpi()).setForceServerMode(true);
int[] evtTypes = cfg.getIncludeEventTypes();
if (evtTypes == null || evtTypes.length == 0)
cfg.setIncludeEventTypes(EventType.EVT_CACHE_OBJECT_READ);
else {
for (int evtType : evtTypes) {
if (evtType == EventType.EVT_CACHE_OBJECT_READ)
return cfg;
}
int[] updatedEvtTypes = Arrays.copyOf(evtTypes, evtTypes.length + 1);
updatedEvtTypes[updatedEvtTypes.length - 1] = EventType.EVT_CACHE_OBJECT_READ;
}
return cfg;
}
/** {@inheritDoc} */
@Override protected void beforeTestsStarted() throws Exception {
initStoreStrategy();
if (cacheStartType() == CacheStartMode.STATIC)
super.beforeTestsStarted();
else {
cacheCfgMap = Collections.synchronizedMap(new HashMap<String, CacheConfiguration[]>());
if (cacheStartType() == CacheStartMode.NODES_THEN_CACHES) {
super.beforeTestsStarted();
for (Map.Entry<String, CacheConfiguration[]> entry : cacheCfgMap.entrySet()) {
Ignite ignite = grid(entry.getKey());
for (CacheConfiguration cfg : entry.getValue())
ignite.getOrCreateCache(cfg);
}
awaitPartitionMapExchange();
}
else {
int cnt = gridCount();
assert cnt >= 1 : "At least one grid must be started";
for (int i = 0; i < cnt; i++) {
Ignite ignite = startGrid(i);
CacheConfiguration[] cacheCfgs = cacheCfgMap.get(ignite.name());
for (CacheConfiguration cfg : cacheCfgs)
ignite.createCache(cfg);
}
if (cnt > 1)
checkTopology(cnt);
awaitPartitionMapExchange();
}
cacheCfgMap = null;
}
for (int i = 0; i < gridCount(); i++)
info("Grid " + i + ": " + grid(i).localNode().id());
}
/**
* Checks that any invoke returns result.
*
* @throws Exception if something goes bad.
*
* TODO https://issues.apache.org/jira/browse/IGNITE-4380.
*/
public void _testInvokeAllMultithreaded() throws Exception {
final IgniteCache<String, Integer> cache = jcache();
final int threadCnt = 4;
final int cnt = 5000;
// Concurrent invoke can not be used for ATOMIC cache in CLOCK mode.
if (atomicityMode() == ATOMIC &&
cacheMode() != LOCAL &&
false)
return;
final Set<String> keys = Collections.singleton("myKey");
GridTestUtils.runMultiThreaded(new Runnable() {
@Override public void run() {
for (int i = 0; i < cnt; i++) {
final Map<String, EntryProcessorResult<String>> res = cache.invokeAll(keys, INCR_PROCESSOR);
assertEquals(1, res.size());
}
}
}, threadCnt, "testInvokeAllMultithreaded");
assertEquals(cnt * threadCnt, (int)cache.get("myKey"));
}
/**
* Checks that skipStore flag gets overridden inside a transaction.
*/
public void testWriteThroughTx() {
String key = "writeThroughKey";
storeStgy.removeFromStore(key);
try (final Transaction transaction = grid(0).transactions().txStart()) {
IgniteCache<String, Integer> cache = jcache(0);
// retrieve market type from the grid
Integer old = cache.withSkipStore().get(key);
assertNull(old);
// update the grid
cache.put(key, 2);
// finally commit the transaction
transaction.commit();
}
assertEquals(2, storeStgy.getFromStore(key));
}
/**
* Checks that skipStore flag gets overridden inside a transaction.
*/
public void testNoReadThroughTx() {
String key = "writeThroughKey";
IgniteCache<String, Integer> cache = jcache(0);
storeStgy.resetStore();
cache.put(key, 1);
storeStgy.putToStore(key, 2);
try (final Transaction transaction = grid(0).transactions().txStart()) {
Integer old = cache.get(key);
assertEquals((Integer)1, old);
// update the grid
cache.put(key, 2);
// finally commit the transaction
transaction.commit();
}
assertEquals(0, storeStgy.getReads());
}
/** {@inheritDoc} */
@Override protected Ignite startGrid(String igniteInstanceName, GridSpringResourceContext ctx) throws Exception {
if (cacheCfgMap == null)
return super.startGrid(igniteInstanceName, ctx);
IgniteConfiguration cfg = getConfiguration(igniteInstanceName);
cacheCfgMap.put(igniteInstanceName, cfg.getCacheConfiguration());
cfg.setCacheConfiguration();
if (!isRemoteJvm(igniteInstanceName))
return IgnitionEx.start(optimize(cfg), ctx);
else
return startRemoteGrid(igniteInstanceName, optimize(cfg), ctx);
}
/** {@inheritDoc} */
@Override protected void beforeTest() throws Exception {
IgniteCache<String, Integer> cache = jcache();
assertEquals(0, cache.localSize());
assertEquals(0, cache.size());
super.beforeTest();
assertEquals(0, cache.localSize());
assertEquals(0, cache.size());
dfltIgnite = grid(0);
}
/** {@inheritDoc} */
@Override protected void afterTest() throws Exception {
super.afterTest();
IgniteCache<String, Integer> cache = jcache();
assertEquals(0, cache.localSize());
assertEquals(0, cache.size());
assertEquals(0, cache.size(ONHEAP));
dfltIgnite = null;
}
/**
* @return A not near-only cache.
*/
protected IgniteCache<String, Integer> fullCache() {
return jcache();
}
/**
* @throws Exception In case of error.
*/
public void testSize() throws Exception {
assert jcache().localSize() == 0;
int size = 10;
final Map<String, Integer> map = new HashMap<>();
for (int i = 0; i < size; i++)
map.put("key" + i, i);
// Put in primary nodes to avoid near readers which will prevent entry from being cleared.
Map<ClusterNode, Collection<String>> mapped = grid(0).<String>affinity(null).mapKeysToNodes(map.keySet());
for (int i = 0; i < gridCount(); i++) {
Collection<String> keys = mapped.get(grid(i).localNode());
if (!F.isEmpty(keys)) {
for (String key : keys)
jcache(i).put(key, map.get(key));
}
}
map.remove("key0");
mapped = grid(0).<String>affinity(null).mapKeysToNodes(map.keySet());
for (int i = 0; i < gridCount(); i++) {
// Will actually delete entry from map.
CU.invalidate(jcache(i), "key0");
assertNull("Failed check for grid: " + i, jcache(i).localPeek("key0", ONHEAP));
Collection<String> keysCol = mapped.get(grid(i).localNode());
assert jcache(i).localSize() != 0 || F.isEmpty(keysCol);
}
for (int i = 0; i < gridCount(); i++)
executeOnLocalOrRemoteJvm(i, new CheckCacheSizeTask(map));
for (int i = 0; i < gridCount(); i++) {
Collection<String> keysCol = mapped.get(grid(i).localNode());
assertEquals("Failed check for grid: " + i, !F.isEmpty(keysCol) ? keysCol.size() : 0,
jcache(i).localSize(PRIMARY));
}
int globalPrimarySize = map.size();
for (int i = 0; i < gridCount(); i++)
assertEquals(globalPrimarySize, jcache(i).size(PRIMARY));
int times = 1;
if (cacheMode() == REPLICATED)
times = gridCount();
else if (cacheMode() == PARTITIONED)
times = Math.min(gridCount(), jcache().getConfiguration(CacheConfiguration.class).getBackups() + 1);
int globalSize = globalPrimarySize * times;
for (int i = 0; i < gridCount(); i++)
assertEquals(globalSize, jcache(i).size(ALL));
}
/**
* @throws Exception In case of error.
*/
public void testContainsKey() throws Exception {
jcache().put("testContainsKey", 1);
checkContainsKey(true, "testContainsKey");
checkContainsKey(false, "testContainsKeyWrongKey");
}
/**
* @throws Exception If failed.
*/
public void testContainsKeyTx() throws Exception {
if (!txEnabled())
return;
IgniteCache<String, Integer> cache = jcache();
IgniteTransactions txs = ignite(0).transactions();
for (int i = 0; i < 10; i++) {
String key = String.valueOf(i);
try (Transaction tx = txs.txStart()) {
assertNull(key, cache.get(key));
assertFalse(cache.containsKey(key));
tx.commit();
}
try (Transaction tx = txs.txStart()) {
assertNull(key, cache.get(key));
cache.put(key, i);
assertTrue(cache.containsKey(key));
tx.commit();
}
}
}
/**
* @throws Exception If failed.
*/
public void testContainsKeysTx() throws Exception {
if (!txEnabled())
return;
IgniteCache<String, Integer> cache = jcache();
IgniteTransactions txs = ignite(0).transactions();
Set<String> keys = new HashSet<>();
for (int i = 0; i < 10; i++) {
String key = String.valueOf(i);
keys.add(key);
}
try (Transaction tx = txs.txStart()) {
for (String key : keys)
assertNull(key, cache.get(key));
assertFalse(cache.containsKeys(keys));
tx.commit();
}
try (Transaction tx = txs.txStart()) {
for (String key : keys)
assertNull(key, cache.get(key));
for (String key : keys)
cache.put(key, 0);
assertTrue(cache.containsKeys(keys));
tx.commit();
}
}
/**
* @throws Exception If failed.
*/
public void testRemoveInExplicitLocks() throws Exception {
if (lockingEnabled()) {
IgniteCache<String, Integer> cache = jcache();
cache.put("a", 1);
Lock lock = cache.lockAll(ImmutableSet.of("a", "b", "c", "d"));
lock.lock();
try {
cache.remove("a");
// Make sure single-key operation did not remove lock.
cache.putAll(F.asMap("b", 2, "c", 3, "d", 4));
}
finally {
lock.unlock();
}
}
}
/**
* @throws Exception If failed.
*/
public void testRemoveAllSkipStore() throws Exception {
IgniteCache<String, Integer> jcache = jcache();
jcache.putAll(F.asMap("1", 1, "2", 2, "3", 3));
jcache.withSkipStore().removeAll();
assertEquals((Integer)1, jcache.get("1"));
assertEquals((Integer)2, jcache.get("2"));
assertEquals((Integer)3, jcache.get("3"));
}
/**
* @throws IgniteCheckedException If failed.
*/
public void testAtomicOps() throws IgniteCheckedException {
IgniteCache<String, Integer> c = jcache();
final int cnt = 10;
for (int i = 0; i < cnt; i++)
assertNull(c.getAndPutIfAbsent("k" + i, i));
for (int i = 0; i < cnt; i++) {
boolean wrong = i % 2 == 0;
String key = "k" + i;
boolean res = c.replace(key, wrong ? i + 1 : i, -1);
assertEquals(wrong, !res);
}
for (int i = 0; i < cnt; i++) {
boolean success = i % 2 != 0;
String key = "k" + i;
boolean res = c.remove(key, -1);
assertTrue(success == res);
}
}
/**
* @throws Exception In case of error.
*/
public void testGet() throws Exception {
IgniteCache<String, Integer> cache = jcache();
cache.put("key1", 1);
cache.put("key2", 2);
assert cache.get("key1") == 1;
assert cache.get("key2") == 2;
assert cache.get("wrongKey") == null;
}
/**
* @throws Exception In case of error.
*/
public void testGetEntry() throws Exception {
IgniteCache<String, Integer> cache = jcache();
cache.put("key1", 1);
cache.put("key2", 2);
CacheEntry<String, Integer> key1e = cache.getEntry("key1");
CacheEntry<String, Integer> key2e = cache.getEntry("key2");
CacheEntry<String, Integer> wrongKeye = cache.getEntry("wrongKey");
assert key1e.getValue() == 1;
assert key1e.getKey().equals("key1");
assert key1e.version() != null;
assert key2e.getValue() == 2;
assert key2e.getKey().equals("key2");
assert key2e.version() != null;
assert wrongKeye == null;
}
/**
* @throws Exception In case of error.
*/
public void testGetAsync() throws Exception {
IgniteCache<String, Integer> cache = jcache();
cache.put("key1", 1);
cache.put("key2", 2);
IgniteFuture<Integer> fut1 = cache.getAsync("key1");
IgniteFuture<Integer> fut2 = cache.getAsync("key2");
IgniteFuture<Integer> fut3 = cache.getAsync("wrongKey");
assert fut1.get() == 1;
assert fut2.get() == 2;
assert fut3.get() == null;
}
/**
* @throws Exception In case of error.
*/
public void testGetAsyncOld() throws Exception {
IgniteCache<String, Integer> cache = jcache();
cache.put("key1", 1);
cache.put("key2", 2);
IgniteCache<String, Integer> cacheAsync = cache.withAsync();
cacheAsync.get("key1");
IgniteFuture<Integer> fut1 = cacheAsync.future();
cacheAsync.get("key2");
IgniteFuture<Integer> fut2 = cacheAsync.future();
cacheAsync.get("wrongKey");
IgniteFuture<Integer> fut3 = cacheAsync.future();
assert fut1.get() == 1;
assert fut2.get() == 2;
assert fut3.get() == null;
}
/**
* @throws Exception In case of error.
*/
public void testGetAll() throws Exception {
Transaction tx = txShouldBeUsed() ? transactions().txStart() : null;
final IgniteCache<String, Integer> cache = jcache();
try {
cache.put("key1", 1);
cache.put("key2", 2);
if (tx != null)
tx.commit();
}
finally {
if (tx != null)
tx.close();
}
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override public Void call() throws Exception {
cache.getAll(null).isEmpty();
return null;
}
}, NullPointerException.class, null);
assert cache.getAll(Collections.<String>emptySet()).isEmpty();
Map<String, Integer> map1 = cache.getAll(ImmutableSet.of("key1", "key2", "key9999"));
info("Retrieved map1: " + map1);
assert 2 == map1.size() : "Invalid map: " + map1;
assertEquals(1, (int)map1.get("key1"));
assertEquals(2, (int)map1.get("key2"));
assertNull(map1.get("key9999"));
Map<String, Integer> map2 = cache.getAll(ImmutableSet.of("key1", "key2", "key9999"));
info("Retrieved map2: " + map2);
assert 2 == map2.size() : "Invalid map: " + map2;
assertEquals(1, (int)map2.get("key1"));
assertEquals(2, (int)map2.get("key2"));
assertNull(map2.get("key9999"));
// Now do the same checks but within transaction.
if (txShouldBeUsed()) {
try (Transaction tx0 = transactions().txStart()) {
assert cache.getAll(Collections.<String>emptySet()).isEmpty();
map1 = cache.getAll(ImmutableSet.of("key1", "key2", "key9999"));
info("Retrieved map1: " + map1);
assert 2 == map1.size() : "Invalid map: " + map1;
assertEquals(1, (int)map1.get("key1"));
assertEquals(2, (int)map1.get("key2"));
assertNull(map1.get("key9999"));
map2 = cache.getAll(ImmutableSet.of("key1", "key2", "key9999"));
info("Retrieved map2: " + map2);
assert 2 == map2.size() : "Invalid map: " + map2;
assertEquals(1, (int)map2.get("key1"));
assertEquals(2, (int)map2.get("key2"));
assertNull(map2.get("key9999"));
tx0.commit();
}
}
}
/**
* @throws Exception In case of error.
*/
public void testGetEntries() throws Exception {
Transaction tx = txShouldBeUsed() ? transactions().txStart() : null;
final IgniteCache<String, Integer> cache = jcache();
try {
cache.put("key1", 1);
cache.put("key2", 2);
if (tx != null)
tx.commit();
}
finally {
if (tx != null)
tx.close();
}
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override public Void call() throws Exception {
cache.getEntries(null).isEmpty();
return null;
}
}, NullPointerException.class, null);
assert cache.getEntries(Collections.<String>emptySet()).isEmpty();
Collection<CacheEntry<String, Integer>> c1 = cache.getEntries(ImmutableSet.of("key1", "key2", "key9999"));
info("Retrieved c1: " + c1);
assert 2 == c1.size() : "Invalid collection: " + c1;
boolean b1 = false;
boolean b2 = false;
for (CacheEntry<String, Integer> e : c1) {
if (e.getKey().equals("key1") && e.getValue().equals(1))
b1 = true;
if (e.getKey().equals("key2") && e.getValue().equals(2))
b2 = true;
}
assertTrue(b1 && b2);
Collection<CacheEntry<String, Integer>> c2 = cache.getEntries(ImmutableSet.of("key1", "key2", "key9999"));
info("Retrieved c2: " + c2);
assert 2 == c2.size() : "Invalid collection: " + c2;
b1 = false;
b2 = false;
for (CacheEntry<String, Integer> e : c2) {
if (e.getKey().equals("key1") && e.getValue().equals(1))
b1 = true;
if (e.getKey().equals("key2") && e.getValue().equals(2))
b2 = true;
}
assertTrue(b1 && b2);
// Now do the same checks but within transaction.
if (txShouldBeUsed()) {
try (Transaction tx0 = transactions().txStart()) {
assert cache.getEntries(Collections.<String>emptySet()).isEmpty();
c1 = cache.getEntries(ImmutableSet.of("key1", "key2", "key9999"));
info("Retrieved c1: " + c1);
assert 2 == c1.size() : "Invalid collection: " + c1;
b1 = false;
b2 = false;
for (CacheEntry<String, Integer> e : c1) {
if (e.getKey().equals("key1") && e.getValue().equals(1))
b1 = true;
if (e.getKey().equals("key2") && e.getValue().equals(2))
b2 = true;
}
assertTrue(b1 && b2);
c2 = cache.getEntries(ImmutableSet.of("key1", "key2", "key9999"));
info("Retrieved c2: " + c2);
assert 2 == c2.size() : "Invalid collection: " + c2;
b1 = false;
b2 = false;
for (CacheEntry<String, Integer> e : c2) {
if (e.getKey().equals("key1") && e.getValue().equals(1))
b1 = true;
if (e.getKey().equals("key2") && e.getValue().equals(2))
b2 = true;
}
assertTrue(b1 && b2);
tx0.commit();
}
}
}
/**
* @throws Exception In case of error.
*/
public void testGetAllWithNulls() throws Exception {
final IgniteCache<String, Integer> cache = jcache();
final Set<String> c = new HashSet<>();
c.add("key1");
c.add(null);
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override public Void call() throws Exception {
cache.getAll(c);
return null;
}
}, NullPointerException.class, null);
}
/**
* @throws Exception If failed.
*/
public void testGetTxNonExistingKey() throws Exception {
if (txShouldBeUsed()) {
try (Transaction ignored = transactions().txStart()) {
assert jcache().get("key999123") == null;
}
}
}
/**
* @throws Exception In case of error.
*/
public void testGetAllAsync() throws Exception {
final IgniteCache<String, Integer> cache = jcache();
cache.put("key1", 1);
cache.put("key2", 2);
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override public Void call() throws Exception {
cache.getAllAsync(null);
return null;
}
}, NullPointerException.class, null);
IgniteFuture<Map<String, Integer>> fut2 = cache.getAllAsync(Collections.<String>emptySet());
IgniteFuture<Map<String, Integer>> fut3 = cache.getAllAsync(ImmutableSet.of("key1", "key2"));
assert fut2.get().isEmpty();
assert fut3.get().size() == 2 : "Invalid map: " + fut3.get();
assert fut3.get().get("key1") == 1;
assert fut3.get().get("key2") == 2;
}
/**
* @throws Exception In case of error.
*/
public void testGetAllAsyncOld() throws Exception {
final IgniteCache<String, Integer> cache = jcache();
final IgniteCache<String, Integer> cacheAsync = cache.withAsync();
cache.put("key1", 1);
cache.put("key2", 2);
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override public Void call() throws Exception {
cacheAsync.getAll(null);
return null;
}
}, NullPointerException.class, null);
cacheAsync.getAll(Collections.<String>emptySet());
IgniteFuture<Map<String, Integer>> fut2 = cacheAsync.future();
cacheAsync.getAll(ImmutableSet.of("key1", "key2"));
IgniteFuture<Map<String, Integer>> fut3 = cacheAsync.future();
assert fut2.get().isEmpty();
assert fut3.get().size() == 2 : "Invalid map: " + fut3.get();
assert fut3.get().get("key1") == 1;
assert fut3.get().get("key2") == 2;
}
/**
* @throws Exception In case of error.
*/
public void testPut() throws Exception {
IgniteCache<String, Integer> cache = jcache();
assert cache.getAndPut("key1", 1) == null;
assert cache.getAndPut("key2", 2) == null;
// Check inside transaction.
assert cache.get("key1") == 1;
assert cache.get("key2") == 2;
// Put again to check returned values.
assert cache.getAndPut("key1", 1) == 1;
assert cache.getAndPut("key2", 2) == 2;
checkContainsKey(true, "key1");
checkContainsKey(true, "key2");
assert cache.get("key1") != null;
assert cache.get("key2") != null;
assert cache.get("wrong") == null;
// Check outside transaction.
checkContainsKey(true, "key1");
checkContainsKey(true, "key2");
assert cache.get("key1") == 1;
assert cache.get("key2") == 2;
assert cache.get("wrong") == null;
assertEquals((Integer)1, cache.getAndPut("key1", 10));
assertEquals((Integer)2, cache.getAndPut("key2", 11));
}
/**
* @throws Exception In case of error.
*/
public void testPutTx() throws Exception {
if (txShouldBeUsed()) {
IgniteCache<String, Integer> cache = jcache();
try (Transaction tx = transactions().txStart()) {
assert cache.getAndPut("key1", 1) == null;
assert cache.getAndPut("key2", 2) == null;
// Check inside transaction.
assert cache.get("key1") == 1;
assert cache.get("key2") == 2;
// Put again to check returned values.
assert cache.getAndPut("key1", 1) == 1;
assert cache.getAndPut("key2", 2) == 2;
assert cache.get("key1") != null;
assert cache.get("key2") != null;
assert cache.get("wrong") == null;
tx.commit();
}
// Check outside transaction.
checkContainsKey(true, "key1");
checkContainsKey(true, "key2");
assert cache.get("key1") == 1;
assert cache.get("key2") == 2;
assert cache.get("wrong") == null;
assertEquals((Integer)1, cache.getAndPut("key1", 10));
assertEquals((Integer)2, cache.getAndPut("key2", 11));
}
}
/**
* @throws Exception If failed.
*/
public void testTransformOptimisticReadCommitted() throws Exception {
checkTransform(OPTIMISTIC, READ_COMMITTED);
}
/**
* @throws Exception If failed.
*/
public void testTransformOptimisticRepeatableRead() throws Exception {
checkTransform(OPTIMISTIC, REPEATABLE_READ);
}
/**
* @throws Exception If failed.
*/
public void testTransformPessimisticReadCommitted() throws Exception {
checkTransform(PESSIMISTIC, READ_COMMITTED);
}
/**
* @throws Exception If failed.
*/
public void testTransformPessimisticRepeatableRead() throws Exception {
checkTransform(PESSIMISTIC, REPEATABLE_READ);
}
/**
* @throws Exception If failed.
*/
public void testIgniteTransformOptimisticReadCommitted() throws Exception {
checkIgniteTransform(OPTIMISTIC, READ_COMMITTED);
}
/**
* @throws Exception If failed.
*/
public void testIgniteTransformOptimisticRepeatableRead() throws Exception {
checkIgniteTransform(OPTIMISTIC, REPEATABLE_READ);
}
/**
* @throws Exception If failed.
*/
public void testIgniteTransformPessimisticReadCommitted() throws Exception {
checkIgniteTransform(PESSIMISTIC, READ_COMMITTED);
}
/**
* @throws Exception If failed.
*/
public void testIgniteTransformPessimisticRepeatableRead() throws Exception {
checkIgniteTransform(PESSIMISTIC, REPEATABLE_READ);
}
/**
* @param concurrency Concurrency.
* @param isolation Isolation.
* @throws Exception If failed.
*/
private void checkIgniteTransform(TransactionConcurrency concurrency, TransactionIsolation isolation)
throws Exception {
IgniteCache<String, Integer> cache = jcache();
cache.put("key2", 1);
cache.put("key3", 3);
Transaction tx = txShouldBeUsed() ? ignite(0).transactions().txStart(concurrency, isolation) : null;
try {
assertEquals("null", cache.invoke("key1", INCR_IGNITE_PROCESSOR));
assertEquals("1", cache.invoke("key2", INCR_IGNITE_PROCESSOR));
assertEquals("3", cache.invoke("key3", RMV_IGNITE_PROCESSOR));
if (tx != null)
tx.commit();
}
catch (Exception e) {
e.printStackTrace();
throw e;
}
finally {
if (tx != null)
tx.close();
}
assertEquals((Integer)1, cache.get("key1"));
assertEquals((Integer)2, cache.get("key2"));
assertNull(cache.get("key3"));
for (int i = 0; i < gridCount(); i++)
assertNull("Failed for cache: " + i, jcache(i).localPeek("key3", ONHEAP));
cache.remove("key1");
cache.put("key2", 1);
cache.put("key3", 3);
assertEquals("null", cache.invoke("key1", INCR_IGNITE_PROCESSOR));
assertEquals("1", cache.invoke("key2", INCR_IGNITE_PROCESSOR));
assertEquals("3", cache.invoke("key3", RMV_IGNITE_PROCESSOR));
assertEquals((Integer)1, cache.get("key1"));
assertEquals((Integer)2, cache.get("key2"));
assertNull(cache.get("key3"));
for (int i = 0; i < gridCount(); i++)
assertNull(jcache(i).localPeek("key3", ONHEAP));
}
/**
* @param concurrency Concurrency.
* @param isolation Isolation.
* @throws Exception If failed.
*/
private void checkTransform(TransactionConcurrency concurrency, TransactionIsolation isolation) throws Exception {
IgniteCache<String, Integer> cache = jcache();
cache.put("key2", 1);
cache.put("key3", 3);
Transaction tx = txShouldBeUsed() ? ignite(0).transactions().txStart(concurrency, isolation) : null;
try {
assertEquals("null", cache.invoke("key1", INCR_PROCESSOR));
assertEquals("1", cache.invoke("key2", INCR_PROCESSOR));
assertEquals("3", cache.invoke("key3", RMV_PROCESSOR));
if (tx != null)
tx.commit();
}
catch (Exception e) {
e.printStackTrace();
throw e;
}
finally {
if (tx != null)
tx.close();
}
assertEquals((Integer)1, cache.get("key1"));
assertEquals((Integer)2, cache.get("key2"));
assertNull(cache.get("key3"));
for (int i = 0; i < gridCount(); i++)
assertNull("Failed for cache: " + i, jcache(i).localPeek("key3", ONHEAP));
cache.remove("key1");
cache.put("key2", 1);
cache.put("key3", 3);
assertEquals("null", cache.invoke("key1", INCR_PROCESSOR));
assertEquals("1", cache.invoke("key2", INCR_PROCESSOR));
assertEquals("3", cache.invoke("key3", RMV_PROCESSOR));
assertEquals((Integer)1, cache.get("key1"));
assertEquals((Integer)2, cache.get("key2"));
assertNull(cache.get("key3"));
for (int i = 0; i < gridCount(); i++)
assertNull(jcache(i).localPeek("key3", ONHEAP));
}
/**
* @throws Exception If failed.
*/
public void testTransformAllOptimisticReadCommitted() throws Exception {
checkTransformAll(OPTIMISTIC, READ_COMMITTED);
}
/**
* @throws Exception If failed.
*/
public void testTransformAllOptimisticRepeatableRead() throws Exception {
checkTransformAll(OPTIMISTIC, REPEATABLE_READ);
}
/**
* @throws Exception If failed.
*/
public void testTransformAllPessimisticReadCommitted() throws Exception {
checkTransformAll(PESSIMISTIC, READ_COMMITTED);
}
/**
* @throws Exception If failed.
*/
public void testTransformAllPessimisticRepeatableRead() throws Exception {
checkTransformAll(PESSIMISTIC, REPEATABLE_READ);
}
/**
* @param concurrency Transaction concurrency.
* @param isolation Transaction isolation.
* @throws Exception If failed.
*/
private void checkTransformAll(TransactionConcurrency concurrency, TransactionIsolation isolation)
throws Exception {
final IgniteCache<String, Integer> cache = jcache();
cache.put("key2", 1);
cache.put("key3", 3);
if (txShouldBeUsed()) {
Map<String, EntryProcessorResult<String>> res;
try (Transaction tx = ignite(0).transactions().txStart(concurrency, isolation)) {
res = cache.invokeAll(F.asSet("key1", "key2", "key3"), INCR_PROCESSOR);
tx.commit();
}
assertEquals((Integer)1, cache.get("key1"));
assertEquals((Integer)2, cache.get("key2"));
assertEquals((Integer)4, cache.get("key3"));
assertEquals("null", res.get("key1").get());
assertEquals("1", res.get("key2").get());
assertEquals("3", res.get("key3").get());
assertEquals(3, res.size());
cache.remove("key1");
cache.put("key2", 1);
cache.put("key3", 3);
}
Map<String, EntryProcessorResult<String>> res = cache.invokeAll(F.asSet("key1", "key2", "key3"), RMV_PROCESSOR);
for (int i = 0; i < gridCount(); i++) {
assertNull(jcache(i).localPeek("key1", ONHEAP));
assertNull(jcache(i).localPeek("key2", ONHEAP));
assertNull(jcache(i).localPeek("key3", ONHEAP));
}
assertEquals("null", res.get("key1").get());
assertEquals("1", res.get("key2").get());
assertEquals("3", res.get("key3").get());
assertEquals(3, res.size());
cache.remove("key1");
cache.put("key2", 1);
cache.put("key3", 3);
res = cache.invokeAll(F.asSet("key1", "key2", "key3"), INCR_PROCESSOR);
assertEquals((Integer)1, cache.get("key1"));
assertEquals((Integer)2, cache.get("key2"));
assertEquals((Integer)4, cache.get("key3"));
assertEquals("null", res.get("key1").get());
assertEquals("1", res.get("key2").get());
assertEquals("3", res.get("key3").get());
assertEquals(3, res.size());
cache.remove("key1");
cache.put("key2", 1);
cache.put("key3", 3);
res = cache.invokeAll(F.asMap("key1", INCR_PROCESSOR, "key2", INCR_PROCESSOR, "key3", INCR_PROCESSOR));
assertEquals((Integer)1, cache.get("key1"));
assertEquals((Integer)2, cache.get("key2"));
assertEquals((Integer)4, cache.get("key3"));
assertEquals("null", res.get("key1").get());
assertEquals("1", res.get("key2").get());
assertEquals("3", res.get("key3").get());
assertEquals(3, res.size());
}
/**
* @throws Exception If failed.
*/
public void testTransformAllWithNulls() throws Exception {
final IgniteCache<String, Integer> cache = jcache();
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override public Void call() throws Exception {
cache.invokeAll((Set<String>)null, INCR_PROCESSOR);
return null;
}
}, NullPointerException.class, null);
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override public Void call() throws Exception {
cache.invokeAll(F.asSet("key1"), null);
return null;
}
}, NullPointerException.class, null);
{
final Set<String> keys = new LinkedHashSet<>(2);
keys.add("key1");
keys.add(null);
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override public Void call() throws Exception {
cache.invokeAll(keys, INCR_PROCESSOR);
return null;
}
}, NullPointerException.class, null);
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override public Void call() throws Exception {
cache.invokeAll(F.asSet("key1"), null);
return null;
}
}, NullPointerException.class, null);
}
}
/**
* @throws Exception If failed.
*/
public void testTransformSequentialOptimisticNoStart() throws Exception {
checkTransformSequential0(false, OPTIMISTIC);
}
/**
* @throws Exception If failed.
*/
public void testTransformSequentialPessimisticNoStart() throws Exception {
checkTransformSequential0(false, PESSIMISTIC);
}
/**
* @throws Exception If failed.
*/
public void testTransformSequentialOptimisticWithStart() throws Exception {
checkTransformSequential0(true, OPTIMISTIC);
}
/**
* @throws Exception If failed.
*/
public void testTransformSequentialPessimisticWithStart() throws Exception {
checkTransformSequential0(true, PESSIMISTIC);
}
/**
* @param startVal Whether to put value.
* @param concurrency Concurrency.
* @throws Exception If failed.
*/
private void checkTransformSequential0(boolean startVal, TransactionConcurrency concurrency)
throws Exception {
IgniteCache<String, Integer> cache = jcache();
final String key = primaryKeysForCache(cache, 1).get(0);
Transaction tx = txShouldBeUsed() ? ignite(0).transactions().txStart(concurrency, READ_COMMITTED) : null;
try {
if (startVal)
cache.put(key, 2);
else
assertEquals(null, cache.get(key));
Integer expRes = startVal ? 2 : null;
assertEquals(String.valueOf(expRes), cache.invoke(key, INCR_PROCESSOR));
expRes = startVal ? 3 : 1;
assertEquals(String.valueOf(expRes), cache.invoke(key, INCR_PROCESSOR));
expRes++;
assertEquals(String.valueOf(expRes), cache.invoke(key, INCR_PROCESSOR));
if (tx != null)
tx.commit();
}
finally {
if (tx != null)
tx.close();
}
Integer exp = (startVal ? 2 : 0) + 3;
assertEquals(exp, cache.get(key));
for (int i = 0; i < gridCount(); i++) {
if (ignite(i).affinity(null).isPrimaryOrBackup(grid(i).localNode(), key))
assertEquals(exp, peek(jcache(i), key));
}
}
/**
* @throws Exception If failed.
*/
public void testTransformAfterRemoveOptimistic() throws Exception {
checkTransformAfterRemove(OPTIMISTIC);
}
/**
* @throws Exception If failed.
*/
public void testTransformAfterRemovePessimistic() throws Exception {
checkTransformAfterRemove(PESSIMISTIC);
}
/**
* @param concurrency Concurrency.
* @throws Exception If failed.
*/
private void checkTransformAfterRemove(TransactionConcurrency concurrency) throws Exception {
IgniteCache<String, Integer> cache = jcache();
cache.put("key", 4);
Transaction tx = txShouldBeUsed() ? ignite(0).transactions().txStart(concurrency, READ_COMMITTED) : null;
try {
cache.remove("key");
cache.invoke("key", INCR_PROCESSOR);
cache.invoke("key", INCR_PROCESSOR);
cache.invoke("key", INCR_PROCESSOR);
if (tx != null)
tx.commit();
}
finally {
if (tx != null)
tx.close();
}
assertEquals((Integer)3, cache.get("key"));
}
/**
* @throws Exception If failed.
*/
public void testTransformReturnValueGetOptimisticReadCommitted() throws Exception {
checkTransformReturnValue(false, OPTIMISTIC, READ_COMMITTED);
}
/**
* @throws Exception If failed.
*/
public void testTransformReturnValueGetOptimisticRepeatableRead() throws Exception {
checkTransformReturnValue(false, OPTIMISTIC, REPEATABLE_READ);
}
/**
* @throws Exception If failed.
*/
public void testTransformReturnValueGetPessimisticReadCommitted() throws Exception {
checkTransformReturnValue(false, PESSIMISTIC, READ_COMMITTED);
}
/**
* @throws Exception If failed.
*/
public void testTransformReturnValueGetPessimisticRepeatableRead() throws Exception {
checkTransformReturnValue(false, PESSIMISTIC, REPEATABLE_READ);
}
/**
* @throws Exception If failed.
*/
public void testTransformReturnValuePutInTx() throws Exception {
checkTransformReturnValue(true, OPTIMISTIC, READ_COMMITTED);
}
/**
* @param put Whether to put value.
* @param concurrency Concurrency.
* @param isolation Isolation.
* @throws Exception If failed.
*/
private void checkTransformReturnValue(boolean put,
TransactionConcurrency concurrency,
TransactionIsolation isolation)
throws Exception {
IgniteCache<String, Integer> cache = jcache();
if (!put)
cache.put("key", 1);
Transaction tx = txShouldBeUsed() ? ignite(0).transactions().txStart(concurrency, isolation) : null;
try {
if (put)
cache.put("key", 1);
cache.invoke("key", INCR_PROCESSOR);
assertEquals((Integer)2, cache.get("key"));
if (tx != null) {
// Second get inside tx. Make sure read value is not transformed twice.
assertEquals((Integer)2, cache.get("key"));
tx.commit();
}
}
finally {
if (tx != null)
tx.close();
}
}
/**
* @throws Exception In case of error.
*/
public void testGetAndPutAsyncOld() throws Exception {
IgniteCache<String, Integer> cache = jcache();
IgniteCache<String, Integer> cacheAsync = cache.withAsync();
cache.put("key1", 1);
cache.put("key2", 2);
cacheAsync.getAndPut("key1", 10);
IgniteFuture<Integer> fut1 = cacheAsync.future();
cacheAsync.getAndPut("key2", 11);
IgniteFuture<Integer> fut2 = cacheAsync.future();
assertEquals((Integer)1, fut1.get(5000));
assertEquals((Integer)2, fut2.get(5000));
assertEquals((Integer)10, cache.get("key1"));
assertEquals((Integer)11, cache.get("key2"));
}
/**
* @throws Exception In case of error.
*/
public void testGetAndPutAsync() throws Exception {
IgniteCache<String, Integer> cache = jcache();
cache.put("key1", 1);
cache.put("key2", 2);
IgniteFuture<Integer> fut1 = cache.getAndPutAsync("key1", 10);
IgniteFuture<Integer> fut2 = cache.getAndPutAsync("key2", 11);
assertEquals((Integer)1, fut1.get(5000));
assertEquals((Integer)2, fut2.get(5000));
assertEquals((Integer)10, cache.get("key1"));
assertEquals((Integer)11, cache.get("key2"));
}
/**
* @throws Exception In case of error.
*/
public void testPutAsyncOld0() throws Exception {
IgniteCache<String, Integer> cacheAsync = jcache().withAsync();
cacheAsync.getAndPut("key1", 0);
IgniteFuture<Integer> fut1 = cacheAsync.future();
cacheAsync.getAndPut("key2", 1);
IgniteFuture<Integer> fut2 = cacheAsync.future();
assert fut1.get(5000) == null;
assert fut2.get(5000) == null;
}
/**
* @throws Exception In case of error.
*/
public void testPutAsync0() throws Exception {
IgniteCache<String, Integer> cache = jcache();
IgniteFuture<Integer> fut1 = cache.getAndPutAsync("key1", 0);
IgniteFuture<Integer> fut2 = cache.getAndPutAsync("key2", 1);
assert fut1.get(5000) == null;
assert fut2.get(5000) == null;
}
/**
* @throws Exception If failed.
*/
public void testInvokeAsyncOld() throws Exception {
IgniteCache<String, Integer> cache = jcache();
cache.put("key2", 1);
cache.put("key3", 3);
IgniteCache<String, Integer> cacheAsync = cache.withAsync();
assertNull(cacheAsync.invoke("key1", INCR_PROCESSOR));
IgniteFuture<?> fut0 = cacheAsync.future();
assertNull(cacheAsync.invoke("key2", INCR_PROCESSOR));
IgniteFuture<?> fut1 = cacheAsync.future();
assertNull(cacheAsync.invoke("key3", RMV_PROCESSOR));
IgniteFuture<?> fut2 = cacheAsync.future();
fut0.get();
fut1.get();
fut2.get();
assertEquals((Integer)1, cache.get("key1"));
assertEquals((Integer)2, cache.get("key2"));
assertNull(cache.get("key3"));
for (int i = 0; i < gridCount(); i++)
assertNull(jcache(i).localPeek("key3", ONHEAP));
}
/**
* @throws Exception If failed.
*/
public void testInvokeAsync() throws Exception {
IgniteCache<String, Integer> cache = jcache();
cache.put("key2", 1);
cache.put("key3", 3);
IgniteFuture<?> fut0 = cache.invokeAsync("key1", INCR_PROCESSOR);
IgniteFuture<?> fut1 = cache.invokeAsync("key2", INCR_PROCESSOR);
IgniteFuture<?> fut2 = cache.invokeAsync("key3", RMV_PROCESSOR);
fut0.get();
fut1.get();
fut2.get();
assertEquals((Integer)1, cache.get("key1"));
assertEquals((Integer)2, cache.get("key2"));
assertNull(cache.get("key3"));
for (int i = 0; i < gridCount(); i++)
assertNull(jcache(i).localPeek("key3", ONHEAP));
}
/**
* @throws Exception If failed.
*/
public void testInvoke() throws Exception {
final IgniteCache<String, Integer> cache = jcache();
assertEquals("null", cache.invoke("k0", INCR_PROCESSOR));
assertEquals((Integer)1, cache.get("k0"));
assertEquals("1", cache.invoke("k0", INCR_PROCESSOR));
assertEquals((Integer)2, cache.get("k0"));
cache.put("k1", 1);
assertEquals("1", cache.invoke("k1", INCR_PROCESSOR));
assertEquals((Integer)2, cache.get("k1"));
assertEquals("2", cache.invoke("k1", INCR_PROCESSOR));
assertEquals((Integer)3, cache.get("k1"));
EntryProcessor<String, Integer, Integer> c = new RemoveAndReturnNullEntryProcessor();
assertNull(cache.invoke("k1", c));
assertNull(cache.get("k1"));
for (int i = 0; i < gridCount(); i++)
assertNull(jcache(i).localPeek("k1", ONHEAP));
final EntryProcessor<String, Integer, Integer> errProcessor = new FailedEntryProcessor();
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override public Void call() throws Exception {
cache.invoke("k1", errProcessor);
return null;
}
}, EntryProcessorException.class, "Test entry processor exception.");
}
/**
* @throws Exception In case of error.
*/
public void testPutx() throws Exception {
if (txShouldBeUsed())
checkPut(true);
}
/**
* @throws Exception In case of error.
*/
public void testPutxNoTx() throws Exception {
checkPut(false);
}
/**
* @param inTx Whether to start transaction.
* @throws Exception If failed.
*/
private void checkPut(boolean inTx) throws Exception {
Transaction tx = inTx ? transactions().txStart() : null;
IgniteCache<String, Integer> cache = jcache();
try {
cache.put("key1", 1);
cache.put("key2", 2);
// Check inside transaction.
assert cache.get("key1") == 1;
assert cache.get("key2") == 2;
if (tx != null)
tx.commit();
}
finally {
if (tx != null)
tx.close();
}
checkSize(F.asSet("key1", "key2"));
// Check outside transaction.
checkContainsKey(true, "key1");
checkContainsKey(true, "key2");
checkContainsKey(false, "wrong");
assert cache.get("key1") == 1;
assert cache.get("key2") == 2;
assert cache.get("wrong") == null;
}
/**
* @throws Exception If failed.
*/
public void testPutAsyncOld() throws Exception {
Transaction tx = txShouldBeUsed() ? transactions().txStart() : null;
IgniteCache<String, Integer> cacheAsync = jcache().withAsync();
try {
jcache().put("key2", 1);
cacheAsync.put("key1", 10);
IgniteFuture<?> fut1 = cacheAsync.future();
cacheAsync.put("key2", 11);
IgniteFuture<?> fut2 = cacheAsync.future();
IgniteFuture<Transaction> f = null;
if (tx != null) {
tx = (Transaction)tx.withAsync();
tx.commit();
f = tx.future();
}
assertNull(fut1.get());
assertNull(fut2.get());
assert f == null || f.get().state() == COMMITTED;
}
finally {
if (tx != null)
tx.close();
}
checkSize(F.asSet("key1", "key2"));
assert jcache().get("key1") == 10;
assert jcache().get("key2") == 11;
}
/**
* @throws Exception If failed.
*/
public void testPutAsync() throws Exception {
Transaction tx = txShouldBeUsed() ? transactions().txStart() : null;
try {
jcache().put("key2", 1);
IgniteFuture<?> fut1 = jcache().putAsync("key1", 10);
IgniteFuture<?> fut2 = jcache().putAsync("key2", 11);
IgniteFuture<Void> f = null;
if (tx != null)
f = tx.commitAsync();
assertNull(fut1.get());
assertNull(fut2.get());
try {
if (f != null)
f.get();
} catch (Throwable t) {
assert false : "Unexpected exception " + t;
}
}
finally {
if (tx != null)
tx.close();
}
checkSize(F.asSet("key1", "key2"));
assert jcache().get("key1") == 10;
assert jcache().get("key2") == 11;
}
/**
* @throws Exception In case of error.
*/
public void testPutAll() throws Exception {
Map<String, Integer> map = F.asMap("key1", 1, "key2", 2);
IgniteCache<String, Integer> cache = jcache();
cache.putAll(map);
checkSize(F.asSet("key1", "key2"));
assert cache.get("key1") == 1;
assert cache.get("key2") == 2;
map.put("key1", 10);
map.put("key2", 20);
cache.putAll(map);
checkSize(F.asSet("key1", "key2"));
assert cache.get("key1") == 10;
assert cache.get("key2") == 20;
}
/**
* @throws Exception In case of error.
*/
public void testNullInTx() throws Exception {
if (!txShouldBeUsed())
return;
final IgniteCache<String, Integer> cache = jcache();
for (int i = 0; i < 100; i++) {
final String key = "key-" + i;
assertNull(cache.get(key));
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override public Void call() throws Exception {
IgniteTransactions txs = transactions();
try (Transaction tx = txs.txStart()) {
cache.put(key, 1);
cache.put(null, 2);
tx.commit();
}
return null;
}
}, NullPointerException.class, null);
assertNull(cache.get(key));
cache.put(key, 1);
assertEquals(1, (int)cache.get(key));
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override public Void call() throws Exception {
IgniteTransactions txs = transactions();
try (Transaction tx = txs.txStart()) {
cache.put(key, 2);
cache.remove(null);
tx.commit();
}
return null;
}
}, NullPointerException.class, null);
assertEquals(1, (int)cache.get(key));
cache.put(key, 2);
assertEquals(2, (int)cache.get(key));
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override public Void call() throws Exception {
IgniteTransactions txs = transactions();
Map<String, Integer> map = new LinkedHashMap<>();
map.put("k1", 1);
map.put("k2", 2);
map.put(null, 3);
try (Transaction tx = txs.txStart()) {
cache.put(key, 1);
cache.putAll(map);
tx.commit();
}
return null;
}
}, NullPointerException.class, null);
assertNull(cache.get("k1"));
assertNull(cache.get("k2"));
assertEquals(2, (int)cache.get(key));
cache.put(key, 3);
assertEquals(3, (int)cache.get(key));
}
}
/**
* @throws Exception In case of error.
*/
public void testPutAllWithNulls() throws Exception {
final IgniteCache<String, Integer> cache = jcache();
{
final Map<String, Integer> m = new LinkedHashMap<>(2);
m.put("key1", 1);
m.put(null, 2);
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override public Void call() throws Exception {
cache.putAll(m);
return null;
}
}, NullPointerException.class, null);
cache.put("key1", 1);
assertEquals(1, (int)cache.get("key1"));
}
{
final Map<String, Integer> m = new LinkedHashMap<>(2);
m.put("key3", 3);
m.put("key4", null);
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override public Void call() throws Exception {
cache.putAll(m);
return null;
}
}, NullPointerException.class, null);
m.put("key4", 4);
cache.putAll(m);
assertEquals(3, (int)cache.get("key3"));
assertEquals(4, (int)cache.get("key4"));
}
assertThrows(log, new Callable<Object>() {
@Nullable @Override public Object call() throws Exception {
cache.put("key1", null);
return null;
}
}, NullPointerException.class, A.NULL_MSG_PREFIX);
assertThrows(log, new Callable<Object>() {
@Nullable @Override public Object call() throws Exception {
cache.getAndPut("key1", null);
return null;
}
}, NullPointerException.class, A.NULL_MSG_PREFIX);
assertThrows(log, new Callable<Object>() {
@Nullable @Override public Object call() throws Exception {
cache.put(null, 1);
return null;
}
}, NullPointerException.class, A.NULL_MSG_PREFIX);
assertThrows(log, new Callable<Object>() {
@Nullable @Override public Object call() throws Exception {
cache.replace(null, 1);
return null;
}
}, NullPointerException.class, A.NULL_MSG_PREFIX);
assertThrows(log, new Callable<Object>() {
@Nullable @Override public Object call() throws Exception {
cache.getAndReplace(null, 1);
return null;
}
}, NullPointerException.class, A.NULL_MSG_PREFIX);
assertThrows(log, new Callable<Object>() {
@Nullable @Override public Object call() throws Exception {
cache.replace("key", null);
return null;
}
}, NullPointerException.class, A.NULL_MSG_PREFIX);
assertThrows(log, new Callable<Object>() {
@Nullable @Override public Object call() throws Exception {
cache.getAndReplace("key", null);
return null;
}
}, NullPointerException.class, A.NULL_MSG_PREFIX);
assertThrows(log, new Callable<Object>() {
@Nullable @Override public Object call() throws Exception {
cache.replace(null, 1, 2);
return null;
}
}, NullPointerException.class, A.NULL_MSG_PREFIX);
assertThrows(log, new Callable<Object>() {
@Nullable @Override public Object call() throws Exception {
cache.replace("key", null, 2);
return null;
}
}, NullPointerException.class, A.NULL_MSG_PREFIX);
assertThrows(log, new Callable<Object>() {
@Nullable @Override public Object call() throws Exception {
cache.replace("key", 1, null);
return null;
}
}, NullPointerException.class, A.NULL_MSG_PREFIX);
}
/**
* @throws Exception In case of error.
*/
public void testPutAllAsyncOld() throws Exception {
Map<String, Integer> map = F.asMap("key1", 1, "key2", 2);
IgniteCache<String, Integer> cache = jcache();
IgniteCache<String, Integer> cacheAsync = cache.withAsync();
cacheAsync.putAll(map);
IgniteFuture<?> f1 = cacheAsync.future();
map.put("key1", 10);
map.put("key2", 20);
cacheAsync.putAll(map);
IgniteFuture<?> f2 = cacheAsync.future();
assertNull(f2.get());
assertNull(f1.get());
checkSize(F.asSet("key1", "key2"));
assert cache.get("key1") == 10;
assert cache.get("key2") == 20;
}
/**
* @throws Exception In case of error.
*/
public void testPutAllAsync() throws Exception {
Map<String, Integer> map = F.asMap("key1", 1, "key2", 2);
IgniteCache<String, Integer> cache = jcache();
IgniteFuture<?> f1 = cache.putAllAsync(map);
map.put("key1", 10);
map.put("key2", 20);
IgniteFuture<?> f2 = cache.putAllAsync(map);
assertNull(f2.get());
assertNull(f1.get());
checkSize(F.asSet("key1", "key2"));
assert cache.get("key1") == 10;
assert cache.get("key2") == 20;
}
/**
* @throws Exception In case of error.
*/
public void testGetAndPutIfAbsent() throws Exception {
Transaction tx = txShouldBeUsed() ? transactions().txStart() : null;
IgniteCache<String, Integer> cache = jcache();
try {
assert cache.getAndPutIfAbsent("key", 1) == null;
assert cache.get("key") != null;
assert cache.get("key") == 1;
assert cache.getAndPutIfAbsent("key", 2) != null;
assert cache.getAndPutIfAbsent("key", 2) == 1;
assert cache.get("key") != null;
assert cache.get("key") == 1;
if (tx != null)
tx.commit();
}
finally {
if (tx != null)
tx.close();
}
assert cache.getAndPutIfAbsent("key", 2) != null;
for (int i = 0; i < gridCount(); i++) {
info("Peek on node [i=" + i + ", id=" + grid(i).localNode().id() + ", val=" +
grid(i).cache(null).localPeek("key", ONHEAP) + ']');
}
assertEquals((Integer)1, cache.getAndPutIfAbsent("key", 2));
assert cache.get("key") != null;
assert cache.get("key") == 1;
// Check swap.
cache.put("key2", 1);
cache.localEvict(Collections.singleton("key2"));
assertEquals((Integer)1, cache.getAndPutIfAbsent("key2", 3));
// Check db.
if (!isMultiJvm()) {
storeStgy.putToStore("key3", 3);
assertEquals((Integer)3, cache.getAndPutIfAbsent("key3", 4));
assertEquals((Integer)3, cache.get("key3"));
}
assertEquals((Integer)1, cache.get("key2"));
cache.localEvict(Collections.singleton("key2"));
// Same checks inside tx.
tx = txShouldBeUsed() ? transactions().txStart() : null;
try {
assertEquals((Integer)1, cache.getAndPutIfAbsent("key2", 3));
if (tx != null)
tx.commit();
assertEquals((Integer)1, cache.get("key2"));
}
finally {
if (tx != null)
tx.close();
}
}
/**
* @throws Exception If failed.
*/
public void testGetAndPutIfAbsentAsyncOld() throws Exception {
Transaction tx = txShouldBeUsed() ? transactions().txStart() : null;
IgniteCache<String, Integer> cache = jcache();
IgniteCache<String, Integer> cacheAsync = cache.withAsync();
try {
cacheAsync.getAndPutIfAbsent("key", 1);
IgniteFuture<Integer> fut1 = cacheAsync.future();
assertNull(fut1.get());
assertEquals((Integer)1, cache.get("key"));
cacheAsync.getAndPutIfAbsent("key", 2);
IgniteFuture<Integer> fut2 = cacheAsync.future();
assertEquals((Integer)1, fut2.get());
assertEquals((Integer)1, cache.get("key"));
if (tx != null)
tx.commit();
}
finally {
if (tx != null)
tx.close();
}
// Check swap.
cache.put("key2", 1);
cache.localEvict(Collections.singleton("key2"));
cacheAsync.getAndPutIfAbsent("key2", 3);
assertEquals((Integer)1, cacheAsync.<Integer>future().get());
// Check db.
if (!isMultiJvm()) {
storeStgy.putToStore("key3", 3);
cacheAsync.getAndPutIfAbsent("key3", 4);
assertEquals((Integer)3, cacheAsync.<Integer>future().get());
}
cache.localEvict(Collections.singleton("key2"));
// Same checks inside tx.
tx = txShouldBeUsed() ? transactions().txStart() : null;
try {
cacheAsync.getAndPutIfAbsent("key2", 3);
assertEquals(1, cacheAsync.future().get());
if (tx != null)
tx.commit();
assertEquals((Integer)1, cache.get("key2"));
}
finally {
if (tx != null)
tx.close();
}
}
/**
* @throws Exception If failed.
*/
public void testGetAndPutIfAbsentAsync() throws Exception {
Transaction tx = txShouldBeUsed() ? transactions().txStart() : null;
IgniteCache<String, Integer> cache = jcache();
try {
IgniteFuture<Integer> fut1 = cache.getAndPutIfAbsentAsync("key", 1);
assertNull(fut1.get());
assertEquals((Integer)1, cache.get("key"));
IgniteFuture<Integer> fut2 = cache.getAndPutIfAbsentAsync("key", 2);
assertEquals((Integer)1, fut2.get());
assertEquals((Integer)1, cache.get("key"));
if (tx != null)
tx.commit();
}
finally {
if (tx != null)
tx.close();
}
// Check swap.
cache.put("key2", 1);
cache.localEvict(Collections.singleton("key2"));
assertEquals((Integer)1, cache.getAndPutIfAbsentAsync("key2", 3).get());
// Check db.
if (!isMultiJvm()) {
storeStgy.putToStore("key3", 3);
assertEquals((Integer)3, cache.getAndPutIfAbsentAsync("key3", 4).get());
}
cache.localEvict(Collections.singleton("key2"));
// Same checks inside tx.
tx = txShouldBeUsed() ? transactions().txStart() : null;
try {
assertEquals(1, (int)cache.getAndPutIfAbsentAsync("key2", 3).get());
if (tx != null)
tx.commit();
assertEquals((Integer)1, cache.get("key2"));
}
finally {
if (tx != null)
tx.close();
}
}
/**
* @throws Exception If failed.
*/
public void testPutIfAbsent() throws Exception {
IgniteCache<String, Integer> cache = jcache();
assertNull(cache.get("key"));
assert cache.putIfAbsent("key", 1);
assert cache.get("key") != null && cache.get("key") == 1;
assert !cache.putIfAbsent("key", 2);
assert cache.get("key") != null && cache.get("key") == 1;
// Check swap.
cache.put("key2", 1);
cache.localEvict(Collections.singleton("key2"));
assertFalse(cache.putIfAbsent("key2", 3));
// Check db.
if (!isMultiJvm()) {
storeStgy.putToStore("key3", 3);
assertFalse(cache.putIfAbsent("key3", 4));
}
cache.localEvict(Collections.singleton("key2"));
// Same checks inside tx.
Transaction tx = txShouldBeUsed() ? transactions().txStart() : null;
try {
assertFalse(cache.putIfAbsent("key2", 3));
if (tx != null)
tx.commit();
assertEquals((Integer)1, cache.get("key2"));
}
finally {
if (tx != null)
tx.close();
}
}
/**
* @throws Exception In case of error.
*/
public void testPutxIfAbsentAsync() throws Exception {
if (txShouldBeUsed())
checkPutxIfAbsentAsync(true);
}
/**
* @throws Exception In case of error.
*/
public void testPutxIfAbsentAsyncNoTx() throws Exception {
checkPutxIfAbsentAsync(false);
}
/**
* @param inTx In tx flag.
* @throws Exception If failed.
*/
private void checkPutxIfAbsentAsyncOld(boolean inTx) throws Exception {
IgniteCache<String, Integer> cache = jcache();
IgniteCache<String, Integer> cacheAsync = cache.withAsync();
cacheAsync.putIfAbsent("key", 1);
IgniteFuture<Boolean> fut1 = cacheAsync.future();
assert fut1.get();
assert cache.get("key") != null && cache.get("key") == 1;
cacheAsync.putIfAbsent("key", 2);
IgniteFuture<Boolean> fut2 = cacheAsync.future();
assert !fut2.get();
assert cache.get("key") != null && cache.get("key") == 1;
// Check swap.
cache.put("key2", 1);
cache.localEvict(Collections.singleton("key2"));
cacheAsync.putIfAbsent("key2", 3);
assertFalse(cacheAsync.<Boolean>future().get());
// Check db.
if (!isMultiJvm()) {
storeStgy.putToStore("key3", 3);
cacheAsync.putIfAbsent("key3", 4);
assertFalse(cacheAsync.<Boolean>future().get());
}
cache.localEvict(Collections.singletonList("key2"));
// Same checks inside tx.
Transaction tx = inTx ? transactions().txStart() : null;
try {
cacheAsync.putIfAbsent("key2", 3);
assertFalse(cacheAsync.<Boolean>future().get());
if (!isMultiJvm()) {
cacheAsync.putIfAbsent("key3", 4);
assertFalse(cacheAsync.<Boolean>future().get());
}
if (tx != null)
tx.commit();
}
finally {
if (tx != null)
tx.close();
}
assertEquals((Integer)1, cache.get("key2"));
if (!isMultiJvm())
assertEquals((Integer)3, cache.get("key3"));
}
/**
* @param inTx In tx flag.
* @throws Exception If failed.
*/
private void checkPutxIfAbsentAsync(boolean inTx) throws Exception {
IgniteCache<String, Integer> cache = jcache();
IgniteFuture<Boolean> fut1 = cache.putIfAbsentAsync("key", 1);
assert fut1.get();
assert cache.get("key") != null && cache.get("key") == 1;
IgniteFuture<Boolean> fut2 = cache.putIfAbsentAsync("key", 2);
assert !fut2.get();
assert cache.get("key") != null && cache.get("key") == 1;
// Check swap.
cache.put("key2", 1);
cache.localEvict(Collections.singleton("key2"));
assertFalse(cache.putIfAbsentAsync("key2", 3).get());
// Check db.
if (!isMultiJvm()) {
storeStgy.putToStore("key3", 3);
assertFalse(cache.putIfAbsentAsync("key3", 4).get());
}
cache.localEvict(Collections.singletonList("key2"));
// Same checks inside tx.
Transaction tx = inTx ? transactions().txStart() : null;
try {
assertFalse(cache.putIfAbsentAsync("key2", 3).get());
if (!isMultiJvm())
assertFalse(cache.putIfAbsentAsync("key3", 4).get());
if (tx != null)
tx.commit();
}
finally {
if (tx != null)
tx.close();
}
assertEquals((Integer)1, cache.get("key2"));
if (!isMultiJvm())
assertEquals((Integer)3, cache.get("key3"));
}
/**
* @throws Exception In case of error.
*/
public void testPutIfAbsentAsyncConcurrentOld() throws Exception {
IgniteCache<String, Integer> cacheAsync = jcache().withAsync();
cacheAsync.putIfAbsent("key1", 1);
IgniteFuture<Boolean> fut1 = cacheAsync.future();
cacheAsync.putIfAbsent("key2", 2);
IgniteFuture<Boolean> fut2 = cacheAsync.future();
assert fut1.get();
assert fut2.get();
}
/**
* @throws Exception In case of error.
*/
public void testPutIfAbsentAsyncConcurrent() throws Exception {
IgniteCache<String, Integer> cache = jcache();
IgniteFuture<Boolean> fut1 = cache.putIfAbsentAsync("key1", 1);
IgniteFuture<Boolean> fut2 = cache.putIfAbsentAsync("key2", 2);
assert fut1.get();
assert fut2.get();
}
/**
* @throws Exception If failed.
*/
public void testGetAndReplace() throws Exception {
IgniteCache<String, Integer> cache = jcache();
cache.put("key", 1);
assert cache.get("key") == 1;
info("key 1 -> 2");
assert cache.getAndReplace("key", 2) == 1;
assert cache.get("key") == 2;
assert cache.getAndReplace("wrong", 0) == null;
assert cache.get("wrong") == null;
info("key 0 -> 3");
assert !cache.replace("key", 0, 3);
assert cache.get("key") == 2;
info("key 0 -> 3");
assert !cache.replace("key", 0, 3);
assert cache.get("key") == 2;
info("key 2 -> 3");
assert cache.replace("key", 2, 3);
assert cache.get("key") == 3;
info("evict key");
cache.localEvict(Collections.singleton("key"));
info("key 3 -> 4");
assert cache.replace("key", 3, 4);
assert cache.get("key") == 4;
if (!isMultiJvm()) {
storeStgy.putToStore("key2", 5);
info("key2 5 -> 6");
assert cache.replace("key2", 5, 6);
}
for (int i = 0; i < gridCount(); i++) {
info("Peek key on grid [i=" + i + ", nodeId=" + grid(i).localNode().id() +
", peekVal=" + grid(i).cache(null).localPeek("key", ONHEAP) + ']');
info("Peek key2 on grid [i=" + i + ", nodeId=" + grid(i).localNode().id() +
", peekVal=" + grid(i).cache(null).localPeek("key2", ONHEAP) + ']');
}
if (!isMultiJvm())
assertEquals((Integer)6, cache.get("key2"));
cache.localEvict(Collections.singleton("key"));
Transaction tx = txShouldBeUsed() ? transactions().txStart() : null;
try {
assert cache.replace("key", 4, 5);
if (tx != null)
tx.commit();
assert cache.get("key") == 5;
}
finally {
if (tx != null)
tx.close();
}
}
/**
* @throws Exception If failed.
*/
public void testReplace() throws Exception {
IgniteCache<String, Integer> cache = jcache();
cache.put("key", 1);
assert cache.get("key") == 1;
assert cache.replace("key", 2);
assert cache.get("key") == 2;
assert !cache.replace("wrong", 2);
cache.localEvict(Collections.singleton("key"));
assert cache.replace("key", 4);
assert cache.get("key") == 4;
if (!isMultiJvm()) {
storeStgy.putToStore("key2", 5);
assert cache.replace("key2", 6);
assertEquals((Integer)6, cache.get("key2"));
}
cache.localEvict(Collections.singleton("key"));
Transaction tx = txShouldBeUsed() ? transactions().txStart() : null;
try {
assert cache.replace("key", 5);
if (tx != null)
tx.commit();
}
finally {
if (tx != null)
tx.close();
}
assert cache.get("key") == 5;
}
/**
* @throws Exception If failed.
*/
public void testGetAndReplaceAsyncOld() throws Exception {
IgniteCache<String, Integer> cache = jcache();
IgniteCache<String, Integer> cacheAsync = cache.withAsync();
cache.put("key", 1);
assert cache.get("key") == 1;
cacheAsync.getAndReplace("key", 2);
assert cacheAsync.<Integer>future().get() == 1;
assert cache.get("key") == 2;
cacheAsync.getAndReplace("wrong", 0);
assert cacheAsync.future().get() == null;
assert cache.get("wrong") == null;
cacheAsync.replace("key", 0, 3);
assert !cacheAsync.<Boolean>future().get();
assert cache.get("key") == 2;
cacheAsync.replace("key", 0, 3);
assert !cacheAsync.<Boolean>future().get();
assert cache.get("key") == 2;
cacheAsync.replace("key", 2, 3);
assert cacheAsync.<Boolean>future().get();
assert cache.get("key") == 3;
cache.localEvict(Collections.singleton("key"));
cacheAsync.replace("key", 3, 4);
assert cacheAsync.<Boolean>future().get();
assert cache.get("key") == 4;
if (!isMultiJvm()) {
storeStgy.putToStore("key2", 5);
cacheAsync.replace("key2", 5, 6);
assert cacheAsync.<Boolean>future().get();
assertEquals((Integer)6, cache.get("key2"));
}
cache.localEvict(Collections.singleton("key"));
Transaction tx = txShouldBeUsed() ? transactions().txStart() : null;
try {
cacheAsync.replace("key", 4, 5);
assert cacheAsync.<Boolean>future().get();
if (tx != null)
tx.commit();
}
finally {
if (tx != null)
tx.close();
}
assert cache.get("key") == 5;
}
/**
* @throws Exception If failed.
*/
public void testGetAndReplaceAsync() throws Exception {
IgniteCache<String, Integer> cache = jcache();
cache.put("key", 1);
assert cache.get("key") == 1;
assert cache.getAndReplaceAsync("key", 2).get() == 1;
assert cache.get("key") == 2;
assert cache.getAndReplaceAsync("wrong", 0).get() == null;
assert cache.get("wrong") == null;
assert !cache.replaceAsync("key", 0, 3).get();
assert cache.get("key") == 2;
assert !cache.replaceAsync("key", 0, 3).get();
assert cache.get("key") == 2;
assert cache.replaceAsync("key", 2, 3).get();
assert cache.get("key") == 3;
cache.localEvict(Collections.singleton("key"));
assert cache.replaceAsync("key", 3, 4).get();
assert cache.get("key") == 4;
if (!isMultiJvm()) {
storeStgy.putToStore("key2", 5);
assert cache.replaceAsync("key2", 5, 6).get();
assertEquals((Integer)6, cache.get("key2"));
}
cache.localEvict(Collections.singleton("key"));
Transaction tx = txShouldBeUsed() ? transactions().txStart() : null;
try {
assert cache.replaceAsync("key", 4, 5).get();
if (tx != null)
tx.commit();
}
finally {
if (tx != null)
tx.close();
}
assert cache.get("key") == 5;
}
/**
* @throws Exception If failed.
*/
public void testReplacexAsyncOld() throws Exception {
IgniteCache<String, Integer> cache = jcache();
IgniteCache<String, Integer> cacheAsync = cache.withAsync();
cache.put("key", 1);
assert cache.get("key") == 1;
cacheAsync.replace("key", 2);
assert cacheAsync.<Boolean>future().get();
info("Finished replace.");
assertEquals((Integer)2, cache.get("key"));
cacheAsync.replace("wrond", 2);
assert !cacheAsync.<Boolean>future().get();
cache.localEvict(Collections.singleton("key"));
cacheAsync.replace("key", 4);
assert cacheAsync.<Boolean>future().get();
assert cache.get("key") == 4;
if (!isMultiJvm()) {
storeStgy.putToStore("key2", 5);
cacheAsync.replace("key2", 6);
assert cacheAsync.<Boolean>future().get();
assert cache.get("key2") == 6;
}
cache.localEvict(Collections.singleton("key"));
Transaction tx = txShouldBeUsed() ? transactions().txStart() : null;
try {
cacheAsync.replace("key", 5);
assert cacheAsync.<Boolean>future().get();
if (tx != null)
tx.commit();
}
finally {
if (tx != null)
tx.close();
}
assert cache.get("key") == 5;
}
/**
* @throws Exception If failed.
*/
public void testReplacexAsync() throws Exception {
IgniteCache<String, Integer> cache = jcache();
cache.put("key", 1);
assert cache.get("key") == 1;
assert cache.replaceAsync("key", 2).get();
info("Finished replace.");
assertEquals((Integer)2, cache.get("key"));
assert !cache.replaceAsync("wrond", 2).get();
cache.localEvict(Collections.singleton("key"));
assert cache.replaceAsync("key", 4).get();
assert cache.get("key") == 4;
if (!isMultiJvm()) {
storeStgy.putToStore("key2", 5);
assert cache.replaceAsync("key2", 6).get();
assert cache.get("key2") == 6;
}
cache.localEvict(Collections.singleton("key"));
Transaction tx = txShouldBeUsed() ? transactions().txStart() : null;
try {
assert cache.replaceAsync("key", 5).get();
if (tx != null)
tx.commit();
}
finally {
if (tx != null)
tx.close();
}
assert cache.get("key") == 5;
}
/**
* @throws Exception In case of error.
*/
public void testGetAndRemove() throws Exception {
IgniteCache<String, Integer> cache = jcache();
cache.put("key1", 1);
cache.put("key2", 2);
assert !cache.remove("key1", 0);
assert cache.get("key1") != null && cache.get("key1") == 1;
assert cache.remove("key1", 1);
assert cache.get("key1") == null;
assert cache.getAndRemove("key2") == 2;
assert cache.get("key2") == null;
assert cache.getAndRemove("key2") == null;
}
/**
* @throws Exception If failed.
*/
public void testGetAndRemoveObject() throws Exception {
IgniteCache<String, TestValue> cache = ignite(0).cache(null);
TestValue val1 = new TestValue(1);
TestValue val2 = new TestValue(2);
cache.put("key1", val1);
cache.put("key2", val2);
assert !cache.remove("key1", new TestValue(0));
TestValue oldVal = cache.get("key1");
assert oldVal != null && F.eq(val1, oldVal);
assert cache.remove("key1");
assert cache.get("key1") == null;
TestValue oldVal2 = cache.getAndRemove("key2");
assert F.eq(val2, oldVal2);
assert cache.get("key2") == null;
assert cache.getAndRemove("key2") == null;
}
/**
* @throws Exception If failed.
*/
public void testGetAndPutObject() throws Exception {
IgniteCache<String, TestValue> cache = ignite(0).cache(null);
TestValue val1 = new TestValue(1);
TestValue val2 = new TestValue(2);
cache.put("key1", val1);
TestValue oldVal = cache.get("key1");
assertEquals(val1, oldVal);
oldVal = cache.getAndPut("key1", val2);
assertEquals(val1, oldVal);
TestValue updVal = cache.get("key1");
assertEquals(val2, updVal);
}
/**
* TODO: GG-11241.
*
* @throws Exception If failed.
*/
public void testDeletedEntriesFlag() throws Exception {
if (cacheMode() != LOCAL && cacheMode() != REPLICATED) {
final int cnt = 3;
IgniteCache<String, Integer> cache = jcache();
for (int i = 0; i < cnt; i++)
cache.put(String.valueOf(i), i);
for (int i = 0; i < cnt; i++)
cache.remove(String.valueOf(i));
for (int g = 0; g < gridCount(); g++)
executeOnLocalOrRemoteJvm(g, new CheckEntriesDeletedTask(cnt));
}
}
/**
* @throws Exception If failed.
*/
public void testRemoveLoad() throws Exception {
int cnt = 10;
Set<String> keys = new HashSet<>();
for (int i = 0; i < cnt; i++)
keys.add(String.valueOf(i));
jcache().removeAll(keys);
for (String key : keys)
storeStgy.putToStore(key, Integer.parseInt(key));
for (int g = 0; g < gridCount(); g++)
grid(g).cache(null).localLoadCache(null);
for (int g = 0; g < gridCount(); g++) {
for (int i = 0; i < cnt; i++) {
String key = String.valueOf(i);
if (grid(0).affinity(null).mapKeyToPrimaryAndBackups(key).contains(grid(g).localNode()))
assertEquals((Integer)i, peek(jcache(g), key));
else
assertNull(peek(jcache(g), key));
}
}
}
/**
* @throws Exception If failed.
*/
public void testRemoveLoadAsync() throws Exception {
if (isMultiJvm())
return;
int cnt = 10;
Set<String> keys = new HashSet<>();
for (int i = 0; i < cnt; i++)
keys.add(String.valueOf(i));
jcache().removeAllAsync(keys).get();
for (String key : keys)
storeStgy.putToStore(key, Integer.parseInt(key));
for (int g = 0; g < gridCount(); g++)
grid(g).cache(null).localLoadCacheAsync(null).get();
for (int g = 0; g < gridCount(); g++) {
for (int i = 0; i < cnt; i++) {
String key = String.valueOf(i);
if (grid(0).affinity(null).mapKeyToPrimaryAndBackups(key).contains(grid(g).localNode()))
assertEquals((Integer)i, peek(jcache(g), key));
else
assertNull(peek(jcache(g), key));
}
}
}
/**
* @throws Exception In case of error.
*/
public void testRemoveAsyncOld() throws Exception {
IgniteCache<String, Integer> cache = jcache();
IgniteCache<String, Integer> cacheAsync = cache.withAsync();
cache.put("key1", 1);
cache.put("key2", 2);
cacheAsync.remove("key1", 0);
assert !cacheAsync.<Boolean>future().get();
assert cache.get("key1") != null && cache.get("key1") == 1;
cacheAsync.remove("key1", 1);
assert cacheAsync.<Boolean>future().get();
assert cache.get("key1") == null;
cacheAsync.getAndRemove("key2");
assert cacheAsync.<Integer>future().get() == 2;
assert cache.get("key2") == null;
cacheAsync.getAndRemove("key2");
assert cacheAsync.future().get() == null;
}
/**
* @throws Exception In case of error.
*/
public void testRemoveAsync() throws Exception {
IgniteCache<String, Integer> cache = jcache();
cache.put("key1", 1);
cache.put("key2", 2);
assert !cache.removeAsync("key1", 0).get();
assert cache.get("key1") != null && cache.get("key1") == 1;
assert cache.removeAsync("key1", 1).get();
assert cache.get("key1") == null;
assert cache.getAndRemoveAsync("key2").get() == 2;
assert cache.get("key2") == null;
assert cache.getAndRemoveAsync("key2").get() == null;
}
/**
* @throws Exception In case of error.
*/
public void testRemove() throws Exception {
IgniteCache<String, Integer> cache = jcache();
cache.put("key1", 1);
assert cache.remove("key1");
assert cache.get("key1") == null;
assert !cache.remove("key1");
}
/**
* @throws Exception In case of error.
*/
public void testRemovexAsyncOld() throws Exception {
IgniteCache<String, Integer> cache = jcache();
IgniteCache<String, Integer> cacheAsync = cache.withAsync();
cache.put("key1", 1);
cacheAsync.remove("key1");
assert cacheAsync.<Boolean>future().get();
assert cache.get("key1") == null;
cacheAsync.remove("key1");
assert !cacheAsync.<Boolean>future().get();
}
/**
* @throws Exception In case of error.
*/
public void testRemovexAsync() throws Exception {
IgniteCache<String, Integer> cache = jcache();
cache.put("key1", 1);
assert cache.removeAsync("key1").get();
assert cache.get("key1") == null;
assert !cache.removeAsync("key1").get();
}
/**
* @throws Exception In case of error.
*/
public void testGlobalRemoveAll() throws Exception {
globalRemoveAll(false);
}
/**
* @throws Exception In case of error.
*/
public void testGlobalRemoveAllAsync() throws Exception {
globalRemoveAll(true);
}
/**
* @param async If {@code true} uses asynchronous operation.
* @throws Exception In case of error.
*/
private void globalRemoveAllOld(boolean async) throws Exception {
IgniteCache<String, Integer> cache = jcache();
cache.put("key1", 1);
cache.put("key2", 2);
cache.put("key3", 3);
checkSize(F.asSet("key1", "key2", "key3"));
IgniteCache<String, Integer> asyncCache = cache.withAsync();
if (async) {
asyncCache.removeAll(F.asSet("key1", "key2"));
asyncCache.future().get();
}
else
cache.removeAll(F.asSet("key1", "key2"));
checkSize(F.asSet("key3"));
checkContainsKey(false, "key1");
checkContainsKey(false, "key2");
checkContainsKey(true, "key3");
// Put values again.
cache.put("key1", 1);
cache.put("key2", 2);
cache.put("key3", 3);
if (async) {
IgniteCache<String, Integer> asyncCache0 = jcache(gridCount() > 1 ? 1 : 0).withAsync();
asyncCache0.removeAll();
asyncCache0.future().get();
}
else
jcache(gridCount() > 1 ? 1 : 0).removeAll();
assertEquals(0, cache.localSize());
long entryCnt = hugeRemoveAllEntryCount();
for (int i = 0; i < entryCnt; i++)
cache.put(String.valueOf(i), i);
for (int i = 0; i < entryCnt; i++)
assertEquals(Integer.valueOf(i), cache.get(String.valueOf(i)));
if (async) {
asyncCache.removeAll();
asyncCache.future().get();
}
else
cache.removeAll();
for (int i = 0; i < entryCnt; i++)
assertNull(cache.get(String.valueOf(i)));
}
/**
* @param async If {@code true} uses asynchronous operation.
* @throws Exception In case of error.
*/
private void globalRemoveAll(boolean async) throws Exception {
IgniteCache<String, Integer> cache = jcache();
cache.put("key1", 1);
cache.put("key2", 2);
cache.put("key3", 3);
checkSize(F.asSet("key1", "key2", "key3"));
if (async)
cache.removeAllAsync(F.asSet("key1", "key2")).get();
else
cache.removeAll(F.asSet("key1", "key2"));
checkSize(F.asSet("key3"));
checkContainsKey(false, "key1");
checkContainsKey(false, "key2");
checkContainsKey(true, "key3");
// Put values again.
cache.put("key1", 1);
cache.put("key2", 2);
cache.put("key3", 3);
if (async)
jcache(gridCount() > 1 ? 1 : 0).removeAllAsync().get();
else
jcache(gridCount() > 1 ? 1 : 0).removeAll();
assertEquals(0, cache.localSize());
long entryCnt = hugeRemoveAllEntryCount();
for (int i = 0; i < entryCnt; i++)
cache.put(String.valueOf(i), i);
for (int i = 0; i < entryCnt; i++)
assertEquals(Integer.valueOf(i), cache.get(String.valueOf(i)));
if (async)
cache.removeAllAsync().get();
else
cache.removeAll();
for (int i = 0; i < entryCnt; i++)
assertNull(cache.get(String.valueOf(i)));
}
/**
* @return Count of entries to be removed in removeAll() test.
*/
protected long hugeRemoveAllEntryCount() {
return 1000L;
}
/**
* @throws Exception In case of error.
*/
public void testRemoveAllWithNulls() throws Exception {
final IgniteCache<String, Integer> cache = jcache();
final Set<String> c = new LinkedHashSet<>();
c.add("key1");
c.add(null);
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override public Void call() throws Exception {
cache.removeAll(c);
return null;
}
}, NullPointerException.class, null);
assertEquals(0, grid(0).cache(null).localSize());
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override public Void call() throws Exception {
cache.removeAll(null);
return null;
}
}, NullPointerException.class, null);
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override public Void call() throws Exception {
cache.remove(null);
return null;
}
}, NullPointerException.class, null);
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override public Void call() throws Exception {
cache.getAndRemove(null);
return null;
}
}, NullPointerException.class, null);
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override public Void call() throws Exception {
cache.remove("key1", null);
return null;
}
}, NullPointerException.class, null);
}
/**
* @throws Exception In case of error.
*/
public void testRemoveAllDuplicates() throws Exception {
jcache().removeAll(ImmutableSet.of("key1", "key1", "key1"));
}
/**
* @throws Exception In case of error.
*/
public void testRemoveAllDuplicatesTx() throws Exception {
if (txShouldBeUsed()) {
try (Transaction tx = transactions().txStart()) {
jcache().removeAll(ImmutableSet.of("key1", "key1", "key1"));
tx.commit();
}
}
}
/**
* @throws Exception In case of error.
*/
public void testRemoveAllEmpty() throws Exception {
jcache().removeAll();
}
/**
* @throws Exception In case of error.
*/
public void testRemoveAllAsyncOld() throws Exception {
IgniteCache<String, Integer> cache = jcache();
IgniteCache<String, Integer> cacheAsync = cache.withAsync();
cache.put("key1", 1);
cache.put("key2", 2);
cache.put("key3", 3);
checkSize(F.asSet("key1", "key2", "key3"));
cacheAsync.removeAll(F.asSet("key1", "key2"));
assertNull(cacheAsync.future().get());
checkSize(F.asSet("key3"));
checkContainsKey(false, "key1");
checkContainsKey(false, "key2");
checkContainsKey(true, "key3");
}
/**
* @throws Exception In case of error.
*/
public void testRemoveAllAsync() throws Exception {
IgniteCache<String, Integer> cache = jcache();
cache.put("key1", 1);
cache.put("key2", 2);
cache.put("key3", 3);
checkSize(F.asSet("key1", "key2", "key3"));
assertNull(cache.removeAllAsync(F.asSet("key1", "key2")).get());
checkSize(F.asSet("key3"));
checkContainsKey(false, "key1");
checkContainsKey(false, "key2");
checkContainsKey(true, "key3");
}
/**
* @throws Exception In case of error.
*/
public void testLoadAll() throws Exception {
IgniteCache<String, Integer> cache = jcache();
Set<String> keys = new HashSet<>(primaryKeysForCache(cache, 2));
for (String key : keys)
assertNull(cache.localPeek(key, ONHEAP));
Map<String, Integer> vals = new HashMap<>();
int i = 0;
for (String key : keys) {
cache.put(key, i);
vals.put(key, i);
i++;
}
for (String key : keys)
assertEquals(vals.get(key), peek(cache, key));
cache.clear();
for (String key : keys)
assertNull(peek(cache, key));
loadAll(cache, keys, true);
for (String key : keys)
assertEquals(vals.get(key), peek(cache, key));
}
/**
* @throws Exception If failed.
*/
public void testRemoveAfterClear() throws Exception {
IgniteEx ignite = grid(0);
boolean affNode = ignite.context().cache().internalCache(null).context().affinityNode();
if (!affNode) {
if (gridCount() < 2)
return;
ignite = grid(1);
}
IgniteCache<Integer, Integer> cache = ignite.cache(null);
int key = 0;
Collection<Integer> keys = new ArrayList<>();
for (int k = 0; k < 2; k++) {
while (!ignite.affinity(null).isPrimary(ignite.localNode(), key))
key++;
keys.add(key);
key++;
}
info("Keys: " + keys);
for (Integer k : keys)
cache.put(k, k);
cache.clear();
for (int g = 0; g < gridCount(); g++) {
Ignite grid0 = grid(g);
grid0.cache(null).removeAll();
assertTrue(grid0.cache(null).localSize() == 0);
}
}
/**
* @throws Exception In case of error.
*/
public void testClear() throws Exception {
IgniteCache<String, Integer> cache = jcache();
Set<String> keys = new HashSet<>(primaryKeysForCache(cache, 3));
for (String key : keys)
assertNull(cache.get(key));
Map<String, Integer> vals = new HashMap<>(keys.size());
int i = 0;
for (String key : keys) {
cache.put(key, i);
vals.put(key, i);
i++;
}
for (String key : keys)
assertEquals(vals.get(key), peek(cache, key));
cache.clear();
for (String key : keys)
assertNull(peek(cache, key));
for (i = 0; i < gridCount(); i++)
jcache(i).clear();
for (i = 0; i < gridCount(); i++)
assert jcache(i).localSize() == 0;
for (Map.Entry<String, Integer> entry : vals.entrySet())
cache.put(entry.getKey(), entry.getValue());
for (String key : keys)
assertEquals(vals.get(key), peek(cache, key));
String first = F.first(keys);
if (lockingEnabled()) {
Lock lock = cache.lock(first);
lock.lock();
try {
cache.clear();
GridCacheContext<String, Integer> cctx = context(0);
GridCacheEntryEx entry = cctx.isNear() ? cctx.near().dht().peekEx(first) :
cctx.cache().peekEx(first);
assertNotNull(entry);
}
finally {
lock.unlock();
}
}
else {
cache.clear();
cache.put(first, vals.get(first));
}
cache.clear();
assert cache.localSize() == 0 : "Values after clear.";
i = 0;
for (String key : keys) {
cache.put(key, i);
vals.put(key, i);
i++;
}
cache.put("key1", 1);
cache.put("key2", 2);
cache.localEvict(Sets.union(ImmutableSet.of("key1", "key2"), keys));
assert cache.localSize(ONHEAP) == 0;
cache.clear();
assert cache.localPeek("key1", ONHEAP) == null;
assert cache.localPeek("key2", ONHEAP) == null;
}
/**
* @param keys0 Keys to check.
* @throws IgniteCheckedException If failed.
*/
protected void checkUnlocked(final Collection<String> keys0) throws IgniteCheckedException {
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override public boolean apply() {
try {
for (int i = 0; i < gridCount(); i++) {
GridCacheAdapter<Object, Object> cache = ((IgniteKernal)ignite(i)).internalCache();
for (String key : keys0) {
GridCacheEntryEx entry = cache.peekEx(key);
if (entry != null) {
if (entry.lockedByAny()) {
info("Entry is still locked [i=" + i + ", entry=" + entry + ']');
return false;
}
}
if (cache.isNear()) {
entry = cache.context().near().dht().peekEx(key);
if (entry != null) {
if (entry.lockedByAny()) {
info("Entry is still locked [i=" + i + ", entry=" + entry + ']');
return false;
}
}
}
}
}
return true;
}
catch (GridCacheEntryRemovedException ignore) {
info("Entry was removed, will retry");
return false;
}
}
}, 10_000);
}
/**
* @throws Exception If failed.
*/
public void testGlobalClearAll() throws Exception {
globalClearAll(false, false);
}
/**
* @throws Exception If failed.
*/
public void testGlobalClearAllAsyncOld() throws Exception {
globalClearAll(true, true);
}
/**
* @throws Exception If failed.
*/
public void testGlobalClearAllAsync() throws Exception {
globalClearAll(true, false);
}
/**
* @param async If {@code true} uses async method.
* @param oldAsync Use old async API.
* @throws Exception If failed.
*/
protected void globalClearAll(boolean async, boolean oldAsync) throws Exception {
// Save entries only on their primary nodes. If we didn't do so, clearLocally() will not remove all entries
// because some of them were blocked due to having readers.
for (int i = 0; i < gridCount(); i++) {
for (String key : primaryKeysForCache(jcache(i), 3, 100_000))
jcache(i).put(key, 1);
}
if (async) {
if(oldAsync) {
IgniteCache<String, Integer> asyncCache = jcache().withAsync();
asyncCache.clear();
asyncCache.future().get();
} else
jcache().clearAsync().get();
}
else
jcache().clear();
for (int i = 0; i < gridCount(); i++)
assert jcache(i).localSize() == 0;
}
/**
* @throws Exception In case of error.
*/
@SuppressWarnings("BusyWait")
public void testLockUnlock() throws Exception {
if (lockingEnabled()) {
final CountDownLatch lockCnt = new CountDownLatch(1);
final CountDownLatch unlockCnt = new CountDownLatch(1);
grid(0).events().localListen(new IgnitePredicate<Event>() {
@Override public boolean apply(Event evt) {
switch (evt.type()) {
case EVT_CACHE_OBJECT_LOCKED:
lockCnt.countDown();
break;
case EVT_CACHE_OBJECT_UNLOCKED:
unlockCnt.countDown();
break;
}
return true;
}
}, EVT_CACHE_OBJECT_LOCKED, EVT_CACHE_OBJECT_UNLOCKED);
IgniteCache<String, Integer> cache = jcache();
String key = primaryKeysForCache(cache, 1).get(0);
cache.put(key, 1);
assert !cache.isLocalLocked(key, false);
Lock lock = cache.lock(key);
lock.lock();
try {
lockCnt.await();
assert cache.isLocalLocked(key, false);
}
finally {
lock.unlock();
}
unlockCnt.await();
for (int i = 0; i < 100; i++)
if (cache.isLocalLocked(key, false))
Thread.sleep(10);
else
break;
assert !cache.isLocalLocked(key, false);
}
}
/**
* @throws Exception In case of error.
*/
@SuppressWarnings("BusyWait")
public void testLockUnlockAll() throws Exception {
if (lockingEnabled()) {
IgniteCache<String, Integer> cache = jcache();
cache.put("key1", 1);
cache.put("key2", 2);
assert !cache.isLocalLocked("key1", false);
assert !cache.isLocalLocked("key2", false);
Lock lock1_2 = cache.lockAll(ImmutableSet.of("key1", "key2"));
lock1_2.lock();
try {
assert cache.isLocalLocked("key1", false);
assert cache.isLocalLocked("key2", false);
}
finally {
lock1_2.unlock();
}
for (int i = 0; i < 100; i++)
if (cache.isLocalLocked("key1", false) || cache.isLocalLocked("key2", false))
Thread.sleep(10);
else
break;
assert !cache.isLocalLocked("key1", false);
assert !cache.isLocalLocked("key2", false);
lock1_2.lock();
try {
assert cache.isLocalLocked("key1", false);
assert cache.isLocalLocked("key2", false);
}
finally {
lock1_2.unlock();
}
for (int i = 0; i < 100; i++)
if (cache.isLocalLocked("key1", false) || cache.isLocalLocked("key2", false))
Thread.sleep(10);
else
break;
assert !cache.isLocalLocked("key1", false);
assert !cache.isLocalLocked("key2", false);
}
}
/**
* @throws Exception In case of error.
*/
public void testPeek() throws Exception {
Ignite ignite = primaryIgnite("key");
IgniteCache<String, Integer> cache = ignite.cache(null);
assert peek(cache, "key") == null;
cache.put("key", 1);
cache.replace("key", 2);
assertEquals(2, peek(cache, "key").intValue());
}
/**
* @throws Exception If failed.
*/
public void testPeekTxRemoveOptimistic() throws Exception {
checkPeekTxRemove(OPTIMISTIC);
}
/**
* @throws Exception If failed.
*/
public void testPeekTxRemovePessimistic() throws Exception {
checkPeekTxRemove(PESSIMISTIC);
}
/**
* @param concurrency Concurrency.
* @throws Exception If failed.
*/
private void checkPeekTxRemove(TransactionConcurrency concurrency) throws Exception {
if (txShouldBeUsed()) {
Ignite ignite = primaryIgnite("key");
IgniteCache<String, Integer> cache = ignite.cache(null);
cache.put("key", 1);
try (Transaction tx = ignite.transactions().txStart(concurrency, READ_COMMITTED)) {
cache.remove("key");
assertNull(cache.get("key")); // localPeek ignores transactions.
assertNotNull(peek(cache, "key")); // localPeek ignores transactions.
tx.commit();
}
}
}
/**
* @throws Exception If failed.
*/
public void testPeekRemove() throws Exception {
IgniteCache<String, Integer> cache = primaryCache("key");
cache.put("key", 1);
cache.remove("key");
assertNull(peek(cache, "key"));
}
/**
* TODO GG-11133.
* @throws Exception In case of error.
*/
public void testEvictExpired() throws Exception {
final IgniteCache<String, Integer> cache = jcache();
final String key = primaryKeysForCache(cache, 1).get(0);
cache.put(key, 1);
assertEquals((Integer)1, cache.get(key));
long ttl = 500;
final ExpiryPolicy expiry = new TouchedExpiryPolicy(new Duration(MILLISECONDS, ttl));
grid(0).cache(null).withExpiryPolicy(expiry).put(key, 1);
final Affinity<String> aff = ignite(0).affinity(null);
boolean wait = waitForCondition(new GridAbsPredicate() {
@Override public boolean apply() {
for (int i = 0; i < gridCount(); i++) {
if (peek(jcache(i), key) != null)
return false;
}
return true;
}
}, ttl + 1000);
assertTrue("Failed to wait for entry expiration.", wait);
// Expired entry should not be swapped.
cache.localEvict(Collections.singleton(key));
assertNull(peek(cache, "key"));
assertNull(cache.localPeek(key, ONHEAP));
assertTrue(cache.localSize() == 0);
load(cache, key, true);
for (int i = 0; i < gridCount(); i++) {
if (aff.isPrimary(grid(i).cluster().localNode(), key))
assertEquals((Integer)1, peek(jcache(i), key));
if (aff.isBackup(grid(i).cluster().localNode(), key))
assertEquals((Integer)1, peek(jcache(i), key));
}
}
/**
* TODO GG-11133.
*
* @throws Exception If failed.
*/
public void testPeekExpired() throws Exception {
final IgniteCache<String, Integer> c = jcache();
final String key = primaryKeysForCache(c, 1).get(0);
info("Using key: " + key);
c.put(key, 1);
assertEquals(Integer.valueOf(1), peek(c, key));
int ttl = 500;
final ExpiryPolicy expiry = new TouchedExpiryPolicy(new Duration(MILLISECONDS, ttl));
c.withExpiryPolicy(expiry).put(key, 1);
Thread.sleep(ttl + 100);
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override public boolean apply() {
return peek(c, key) == null;
}
}, 2000);
assert peek(c, key) == null;
assert c.localSize() == 0 : "Cache is not empty.";
}
/**
* TODO GG-11133.
*
* @throws Exception If failed.
*/
public void testPeekExpiredTx() throws Exception {
if (txShouldBeUsed()) {
final IgniteCache<String, Integer> c = jcache();
final String key = "1";
int ttl = 500;
try (Transaction tx = grid(0).transactions().txStart()) {
final ExpiryPolicy expiry = new TouchedExpiryPolicy(new Duration(MILLISECONDS, ttl));
grid(0).cache(null).withExpiryPolicy(expiry).put(key, 1);
tx.commit();
}
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override public boolean apply() {
return peek(c, key) == null;
}
}, 2000);
assertNull(peek(c, key));
assert c.localSize() == 0;
}
}
/**
* @throws Exception If failed.
*/
public void testTtlTx() throws Exception {
if (txShouldBeUsed())
checkTtl(true, false);
}
/**
* @throws Exception If failed.
*/
public void testTtlNoTx() throws Exception {
checkTtl(false, false);
}
/**
* @throws Exception If failed.
*/
public void testTtlNoTxOldEntry() throws Exception {
checkTtl(false, true);
}
/**
* @param inTx In tx flag.
* @param oldEntry {@code True} to check TTL on old entry, {@code false} on new.
* @throws Exception If failed.
*/
private void checkTtl(boolean inTx, boolean oldEntry) throws Exception {
// TODO GG-11133.
if (true)
return;
int ttl = 1000;
final ExpiryPolicy expiry = new TouchedExpiryPolicy(new Duration(MILLISECONDS, ttl));
final IgniteCache<String, Integer> c = jcache();
final String key = primaryKeysForCache(jcache(), 1).get(0);
IgnitePair<Long> entryTtl;
if (oldEntry) {
c.put(key, 1);
entryTtl = entryTtl(fullCache(), key);
assertNotNull(entryTtl.get1());
assertNotNull(entryTtl.get2());
assertEquals((Long)0L, entryTtl.get1());
assertEquals((Long)0L, entryTtl.get2());
}
long startTime = System.currentTimeMillis();
if (inTx) {
// Rollback transaction for the first time.
Transaction tx = transactions().txStart();
try {
jcache().withExpiryPolicy(expiry).put(key, 1);
}
finally {
tx.rollback();
}
if (oldEntry) {
entryTtl = entryTtl(fullCache(), key);
assertEquals((Long)0L, entryTtl.get1());
assertEquals((Long)0L, entryTtl.get2());
}
}
// Now commit transaction and check that ttl and expire time have been saved.
Transaction tx = inTx ? transactions().txStart() : null;
try {
jcache().withExpiryPolicy(expiry).put(key, 1);
if (tx != null)
tx.commit();
}
finally {
if (tx != null)
tx.close();
}
long[] expireTimes = new long[gridCount()];
for (int i = 0; i < gridCount(); i++) {
if (grid(i).affinity(null).isPrimaryOrBackup(grid(i).localNode(), key)) {
IgnitePair<Long> curEntryTtl = entryTtl(jcache(i), key);
assertNotNull(curEntryTtl.get1());
assertNotNull(curEntryTtl.get2());
assertEquals(ttl, (long)curEntryTtl.get1());
assertTrue(curEntryTtl.get2() > startTime);
expireTimes[i] = curEntryTtl.get2();
}
}
// One more update from the same cache entry to ensure that expire time is shifted forward.
U.sleep(100);
tx = inTx ? transactions().txStart() : null;
try {
jcache().withExpiryPolicy(expiry).put(key, 2);
if (tx != null)
tx.commit();
}
finally {
if (tx != null)
tx.close();
}
for (int i = 0; i < gridCount(); i++) {
if (grid(i).affinity(null).isPrimaryOrBackup(grid(i).localNode(), key)) {
IgnitePair<Long> curEntryTtl = entryTtl(jcache(i), key);
assertNotNull(curEntryTtl.get1());
assertNotNull(curEntryTtl.get2());
assertEquals(ttl, (long)curEntryTtl.get1());
assertTrue(curEntryTtl.get2() > startTime);
expireTimes[i] = curEntryTtl.get2();
}
}
// And one more direct update to ensure that expire time is shifted forward.
U.sleep(100);
tx = inTx ? transactions().txStart() : null;
try {
jcache().withExpiryPolicy(expiry).put(key, 3);
if (tx != null)
tx.commit();
}
finally {
if (tx != null)
tx.close();
}
for (int i = 0; i < gridCount(); i++) {
if (grid(i).affinity(null).isPrimaryOrBackup(grid(i).localNode(), key)) {
IgnitePair<Long> curEntryTtl = entryTtl(jcache(i), key);
assertNotNull(curEntryTtl.get1());
assertNotNull(curEntryTtl.get2());
assertEquals(ttl, (long)curEntryTtl.get1());
assertTrue(curEntryTtl.get2() > startTime);
expireTimes[i] = curEntryTtl.get2();
}
}
// And one more update to ensure that ttl is not changed and expire time is not shifted forward.
U.sleep(100);
log.info("Put 4");
tx = inTx ? transactions().txStart() : null;
try {
jcache().put(key, 4);
if (tx != null)
tx.commit();
}
finally {
if (tx != null)
tx.close();
}
log.info("Put 4 done");
for (int i = 0; i < gridCount(); i++) {
if (grid(i).affinity(null).isPrimaryOrBackup(grid(i).localNode(), key)) {
IgnitePair<Long> curEntryTtl = entryTtl(jcache(i), key);
assertNotNull(curEntryTtl.get1());
assertNotNull(curEntryTtl.get2());
assertEquals(ttl, (long)curEntryTtl.get1());
assertEquals(expireTimes[i], (long)curEntryTtl.get2());
}
}
// Avoid reloading from store.
storeStgy.removeFromStore(key);
assertTrue(GridTestUtils.waitForCondition(new GridAbsPredicateX() {
@SuppressWarnings("unchecked")
@Override public boolean applyx() {
try {
Integer val = c.get(key);
if (val != null) {
info("Value is in cache [key=" + key + ", val=" + val + ']');
return false;
}
// Get "cache" field from GridCacheProxyImpl.
GridCacheAdapter c0 = cacheFromCtx(c);
if (!c0.context().deferredDelete()) {
GridCacheEntryEx e0 = c0.peekEx(key);
return e0 == null || (e0.rawGet() == null && e0.valueBytes() == null);
}
else
return true;
}
catch (GridCacheEntryRemovedException e) {
throw new RuntimeException(e);
}
}
}, Math.min(ttl * 10, getTestTimeout())));
IgniteCache fullCache = fullCache();
if (!isMultiJvmObject(fullCache)) {
GridCacheAdapter internalCache = internalCache(fullCache);
if (internalCache.isLocal())
return;
}
assert c.get(key) == null;
// Ensure that old TTL and expire time are not longer "visible".
entryTtl = entryTtl(fullCache(), key);
assertNotNull(entryTtl.get1());
assertNotNull(entryTtl.get2());
assertEquals(0, (long)entryTtl.get1());
assertEquals(0, (long)entryTtl.get2());
// Ensure that next update will not pick old expire time.
tx = inTx ? transactions().txStart() : null;
try {
jcache().put(key, 10);
if (tx != null)
tx.commit();
}
finally {
if (tx != null)
tx.close();
}
U.sleep(2000);
entryTtl = entryTtl(fullCache(), key);
assertEquals((Integer)10, c.get(key));
assertNotNull(entryTtl.get1());
assertNotNull(entryTtl.get2());
assertEquals(0, (long)entryTtl.get1());
assertEquals(0, (long)entryTtl.get2());
}
/**
* @throws Exception In case of error.
*/
public void testLocalEvict() throws Exception {
IgniteCache<String, Integer> cache = jcache();
List<String> keys = primaryKeysForCache(cache, 3);
String key1 = keys.get(0);
String key2 = keys.get(1);
String key3 = keys.get(2);
cache.put(key1, 1);
cache.put(key2, 2);
cache.put(key3, 3);
assert peek(cache, key1) == 1;
assert peek(cache, key2) == 2;
assert peek(cache, key3) == 3;
cache.localEvict(F.asList(key1, key2));
assert cache.localPeek(key1, ONHEAP) == null;
assert cache.localPeek(key2, ONHEAP) == null;
assert peek(cache, key3) == 3;
loadAll(cache, ImmutableSet.of(key1, key2), true);
Affinity<String> aff = ignite(0).affinity(null);
for (int i = 0; i < gridCount(); i++) {
if (aff.isPrimaryOrBackup(grid(i).cluster().localNode(), key1))
assertEquals((Integer)1, peek(jcache(i), key1));
if (aff.isPrimaryOrBackup(grid(i).cluster().localNode(), key2))
assertEquals((Integer)2, peek(jcache(i), key2));
if (aff.isPrimaryOrBackup(grid(i).cluster().localNode(), key3))
assertEquals((Integer)3, peek(jcache(i), key3));
}
}
/**
* JUnit.
*/
public void testCacheProxy() {
IgniteCache<String, Integer> cache = jcache();
assert cache instanceof IgniteCacheProxy;
}
/**
* TODO GG-11133.
*
* @throws Exception If failed.
*/
public void testCompactExpired() throws Exception {
final IgniteCache<String, Integer> cache = jcache();
final String key = F.first(primaryKeysForCache(cache, 1));
cache.put(key, 1);
long ttl = 500;
final ExpiryPolicy expiry = new TouchedExpiryPolicy(new Duration(MILLISECONDS, ttl));
grid(0).cache(null).withExpiryPolicy(expiry).put(key, 1);
waitForCondition(new GridAbsPredicate() {
@Override public boolean apply() {
return cache.localPeek(key) == null;
}
}, ttl + 1000);
// Peek will actually remove entry from cache.
assertNull(cache.localPeek(key));
assertEquals(0, cache.localSize());
// Clear readers, if any.
cache.remove(key);
}
/**
* JUnit.
*
* @throws Exception If failed.
*/
public void testOptimisticTxMissingKey() throws Exception {
if (txShouldBeUsed()) {
try (Transaction tx = transactions().txStart(OPTIMISTIC, READ_COMMITTED)) {
// Remove missing key.
assertFalse(jcache().remove(UUID.randomUUID().toString()));
tx.commit();
}
}
}
/**
* JUnit.
*
* @throws Exception If failed.
*/
public void testOptimisticTxMissingKeyNoCommit() throws Exception {
if (txShouldBeUsed()) {
try (Transaction tx = transactions().txStart(OPTIMISTIC, READ_COMMITTED)) {
// Remove missing key.
assertFalse(jcache().remove(UUID.randomUUID().toString()));
tx.setRollbackOnly();
}
}
}
/**
* @throws Exception If failed.
*/
public void testOptimisticTxReadCommittedInTx() throws Exception {
checkRemovexInTx(OPTIMISTIC, READ_COMMITTED);
}
/**
* @throws Exception If failed.
*/
public void testOptimisticTxRepeatableReadInTx() throws Exception {
checkRemovexInTx(OPTIMISTIC, REPEATABLE_READ);
}
/**
* @throws Exception If failed.
*/
public void testPessimisticTxReadCommittedInTx() throws Exception {
checkRemovexInTx(PESSIMISTIC, READ_COMMITTED);
}
/**
* @throws Exception If failed.
*/
public void testPessimisticTxRepeatableReadInTx() throws Exception {
checkRemovexInTx(PESSIMISTIC, REPEATABLE_READ);
}
/**
* @param concurrency Concurrency.
* @param isolation Isolation.
* @throws Exception If failed.
*/
private void checkRemovexInTx(TransactionConcurrency concurrency, TransactionIsolation isolation) throws Exception {
if (txShouldBeUsed()) {
final int cnt = 10;
CU.inTx(ignite(0), jcache(), concurrency, isolation, new CIX1<IgniteCache<String, Integer>>() {
@Override public void applyx(IgniteCache<String, Integer> cache) {
for (int i = 0; i < cnt; i++)
cache.put("key" + i, i);
}
});
CU.inTx(ignite(0), jcache(), concurrency, isolation, new CIX1<IgniteCache<String, Integer>>() {
@Override public void applyx(IgniteCache<String, Integer> cache) {
for (int i = 0; i < cnt; i++)
assertEquals(new Integer(i), cache.get("key" + i));
}
});
CU.inTx(ignite(0), jcache(), concurrency, isolation, new CIX1<IgniteCache<String, Integer>>() {
@Override public void applyx(IgniteCache<String, Integer> cache) {
for (int i = 0; i < cnt; i++)
assertTrue("Failed to remove key: key" + i, cache.remove("key" + i));
}
});
CU.inTx(ignite(0), jcache(), concurrency, isolation, new CIX1<IgniteCache<String, Integer>>() {
@Override public void applyx(IgniteCache<String, Integer> cache) {
for (int i = 0; i < cnt; i++)
assertNull(cache.get("key" + i));
}
});
}
}
/**
* JUnit.
*
* @throws Exception If failed.
*/
public void testPessimisticTxMissingKey() throws Exception {
if (txShouldBeUsed()) {
try (Transaction tx = transactions().txStart(PESSIMISTIC, READ_COMMITTED)) {
// Remove missing key.
assertFalse(jcache().remove(UUID.randomUUID().toString()));
tx.commit();
}
}
}
/**
* JUnit.
*
* @throws Exception If failed.
*/
public void testPessimisticTxMissingKeyNoCommit() throws Exception {
if (txShouldBeUsed()) {
try (Transaction tx = transactions().txStart(PESSIMISTIC, READ_COMMITTED)) {
// Remove missing key.
assertFalse(jcache().remove(UUID.randomUUID().toString()));
tx.setRollbackOnly();
}
}
}
/**
* @throws Exception If failed.
*/
public void testPessimisticTxRepeatableRead() throws Exception {
if (txShouldBeUsed()) {
try (Transaction ignored = transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
jcache().put("key", 1);
assert jcache().get("key") == 1;
}
}
}
/**
* @throws Exception If failed.
*/
public void testPessimisticTxRepeatableReadOnUpdate() throws Exception {
if (txShouldBeUsed()) {
try (Transaction ignored = transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
jcache().put("key", 1);
assert jcache().getAndPut("key", 2) == 1;
}
}
}
/**
* @throws Exception In case of error.
*/
public void testToMap() throws Exception {
IgniteCache<String, Integer> cache = jcache();
cache.put("key1", 1);
cache.put("key2", 2);
Map<String, Integer> map = new HashMap<>();
for (int i = 0; i < gridCount(); i++) {
for (Cache.Entry<String, Integer> entry : jcache(i))
map.put(entry.getKey(), entry.getValue());
}
assert map.size() == 2;
assert map.get("key1") == 1;
assert map.get("key2") == 2;
}
/**
* @param keys Expected keys.
* @throws Exception If failed.
*/
protected void checkSize(final Collection<String> keys) throws Exception {
if (nearEnabled())
assertEquals(keys.size(), jcache().localSize(CachePeekMode.ALL));
else {
for (int i = 0; i < gridCount(); i++)
executeOnLocalOrRemoteJvm(i, new CheckEntriesTask(keys));
}
}
/**
* @param keys Expected keys.
* @throws Exception If failed.
*/
protected void checkKeySize(final Collection<String> keys) throws Exception {
if (nearEnabled())
assertEquals("Invalid key size: " + jcache().localSize(ALL),
keys.size(), jcache().localSize(ALL));
else {
for (int i = 0; i < gridCount(); i++)
executeOnLocalOrRemoteJvm(i, new CheckKeySizeTask(keys));
}
}
/**
* @param exp Expected value.
* @param key Key.
* @throws Exception If failed.
*/
private void checkContainsKey(boolean exp, String key) throws Exception {
if (nearEnabled())
assertEquals(exp, jcache().containsKey(key));
else {
boolean contains = false;
for (int i = 0; i < gridCount(); i++)
if (containsKey(jcache(i), key)) {
contains = true;
break;
}
assertEquals("Key: " + key, exp, contains);
}
}
/**
* @param key Key.
* @return Ignite instance for primary node.
*/
protected Ignite primaryIgnite(String key) {
ClusterNode node = grid(0).affinity(null).mapKeyToNode(key);
if (node == null)
throw new IgniteException("Failed to find primary node.");
UUID nodeId = node.id();
for (int i = 0; i < gridCount(); i++) {
if (grid(i).localNode().id().equals(nodeId))
return ignite(i);
}
throw new IgniteException("Failed to find primary node.");
}
/**
* @param key Key.
* @return Cache.
*/
protected IgniteCache<String, Integer> primaryCache(String key) {
return primaryIgnite(key).cache(null);
}
/**
* @param cache Cache.
* @param cnt Keys count.
* @param startFrom Begin value ofthe key.
* @return Collection of keys for which given cache is primary.
*/
protected List<String> primaryKeysForCache(IgniteCache<String, Integer> cache, int cnt, int startFrom) {
return executeOnLocalOrRemoteJvm(cache, new CheckPrimaryKeysTask(startFrom, cnt));
}
/**
* @param cache Cache.
* @param cnt Keys count.
* @return Collection of keys for which given cache is primary.
* @throws IgniteCheckedException If failed.
*/
protected List<String> primaryKeysForCache(IgniteCache<String, Integer> cache, int cnt)
throws IgniteCheckedException {
return primaryKeysForCache(cache, cnt, 1);
}
/**
* @param cache Cache.
* @param key Entry key.
* @return Pair [ttl, expireTime]; both values null if entry not found
*/
protected IgnitePair<Long> entryTtl(IgniteCache cache, String key) {
return executeOnLocalOrRemoteJvm(cache, new EntryTtlTask(key, true));
}
/**
* @throws Exception If failed.
*/
public void testIterator() throws Exception {
IgniteCache<Integer, Integer> cache = grid(0).cache(null);
final int KEYS = 1000;
for (int i = 0; i < KEYS; i++)
cache.put(i, i);
// Try to initialize readers in case when near cache is enabled.
for (int i = 0; i < gridCount(); i++) {
cache = grid(i).cache(null);
for (int k = 0; k < KEYS; k++)
assertEquals((Object)k, cache.get(k));
}
int cnt = 0;
for (Cache.Entry e : cache)
cnt++;
assertEquals(KEYS, cnt);
}
/**
* @throws Exception If failed.
*/
public void testIgniteCacheIterator() throws Exception {
IgniteCache<String, Integer> cache = jcache(0);
Iterator<Cache.Entry<String, Integer>> it = cache.iterator();
boolean hasNext = it.hasNext();
if (hasNext)
assertFalse("Cache has value: " + it.next(), hasNext);
final int SIZE = 10_000;
Map<String, Integer> entries = new HashMap<>();
Map<String, Integer> putMap = new HashMap<>();
for (int i = 0; i < SIZE; ++i) {
String key = Integer.toString(i);
putMap.put(key, i);
entries.put(key, i);
if (putMap.size() == 500) {
cache.putAll(putMap);
info("Puts finished: " + (i + 1));
putMap.clear();
}
}
cache.putAll(putMap);
checkIteratorHasNext();
checkIteratorCache(entries);
checkIteratorRemove(cache, entries);
checkIteratorEmpty(cache);
}
/**
* @throws Exception If failed.
*/
public void testIteratorLeakOnCancelCursor() throws Exception {
IgniteCache<String, Integer> cache = jcache(0);
final int SIZE = 10_000;
Map<String, Integer> putMap = new HashMap<>();
for (int i = 0; i < SIZE; ++i) {
String key = Integer.toString(i);
putMap.put(key, i);
if (putMap.size() == 500) {
cache.putAll(putMap);
info("Puts finished: " + (i + 1));
putMap.clear();
}
}
cache.putAll(putMap);
QueryCursor<Cache.Entry<String, Integer>> cur = cache.query(new ScanQuery<String, Integer>());
cur.iterator().next();
cur.close();
waitForIteratorsCleared(cache, 10);
}
/**
* If hasNext() is called repeatedly, it should return the same result.
*/
private void checkIteratorHasNext() {
Iterator<Cache.Entry<String, Integer>> iter = jcache(0).iterator();
assertEquals(iter.hasNext(), iter.hasNext());
while (iter.hasNext())
iter.next();
assertFalse(iter.hasNext());
}
/**
* @param cache Cache.
* @param entries Expected entries in the cache.
*/
private void checkIteratorRemove(IgniteCache<String, Integer> cache, Map<String, Integer> entries) {
// Check that we can remove element.
String rmvKey = Integer.toString(5);
removeCacheIterator(cache, rmvKey);
entries.remove(rmvKey);
assertFalse(cache.containsKey(rmvKey));
assertNull(cache.get(rmvKey));
checkIteratorCache(entries);
// Check that we cannot call Iterator.remove() without next().
final Iterator<Cache.Entry<String, Integer>> iter = jcache(0).iterator();
assertTrue(iter.hasNext());
iter.next();
iter.remove();
GridTestUtils.assertThrows(log, new Callable<Object>() {
@Override public Void call() throws Exception {
iter.remove();
return null;
}
}, IllegalStateException.class, null);
}
/**
* @param cache Cache.
* @param key Key to remove.
*/
private void removeCacheIterator(IgniteCache<String, Integer> cache, String key) {
Iterator<Cache.Entry<String, Integer>> iter = cache.iterator();
int delCnt = 0;
while (iter.hasNext()) {
Cache.Entry<String, Integer> cur = iter.next();
if (cur.getKey().equals(key)) {
iter.remove();
delCnt++;
}
}
assertEquals(1, delCnt);
}
/**
* @param entries Expected entries in the cache.
*/
private void checkIteratorCache(Map<String, Integer> entries) {
for (int i = 0; i < gridCount(); ++i)
checkIteratorCache(jcache(i), entries);
}
/**
* @param cache Cache.
* @param entries Expected entries in the cache.
*/
private void checkIteratorCache(IgniteCache<String, Integer> cache, Map<String, Integer> entries) {
Iterator<Cache.Entry<String, Integer>> iter = cache.iterator();
int cnt = 0;
while (iter.hasNext()) {
Cache.Entry<String, Integer> cur = iter.next();
assertTrue(entries.containsKey(cur.getKey()));
assertEquals(entries.get(cur.getKey()), cur.getValue());
cnt++;
}
assertEquals(entries.size(), cnt);
}
/**
* Checks iterators are cleared.
*/
private void checkIteratorsCleared() {
for (int j = 0; j < gridCount(); j++)
executeOnLocalOrRemoteJvm(j, new CheckIteratorTask());
}
/**
* Checks iterators are cleared.
*/
private void waitForIteratorsCleared(IgniteCache<String, Integer> cache, int secs) throws InterruptedException {
for (int i = 0; i < secs; i++) {
try {
cache.size(); // Trigger weak queue poll.
checkIteratorsCleared();
}
catch (AssertionFailedError e) {
if (i == 9) {
for (int j = 0; j < gridCount(); j++)
executeOnLocalOrRemoteJvm(j, new PrintIteratorStateTask());
throw e;
}
log.info("Iterators not cleared, will wait");
Thread.sleep(1000);
}
}
}
/**
* Checks iterators are cleared after using.
*
* @param cache Cache.
* @throws Exception If failed.
*/
private void checkIteratorEmpty(IgniteCache<String, Integer> cache) throws Exception {
int cnt = 5;
for (int i = 0; i < cnt; ++i) {
Iterator<Cache.Entry<String, Integer>> iter = cache.iterator();
iter.next();
assert iter.hasNext();
}
System.gc();
waitForIteratorsCleared(cache, 10);
}
/**
* @throws Exception If failed.
*/
public void testLocalClearKey() throws Exception {
addKeys();
String keyToRmv = "key" + 25;
Ignite g = primaryIgnite(keyToRmv);
g.<String, Integer>cache(null).localClear(keyToRmv);
checkLocalRemovedKey(keyToRmv);
g.<String, Integer>cache(null).put(keyToRmv, 1);
String keyToEvict = "key" + 30;
g = primaryIgnite(keyToEvict);
g.<String, Integer>cache(null).localEvict(Collections.singleton(keyToEvict));
g.<String, Integer>cache(null).localClear(keyToEvict);
checkLocalRemovedKey(keyToEvict);
}
/**
* @param keyToRmv Removed key.
*/
protected void checkLocalRemovedKey(String keyToRmv) {
for (int i = 0; i < 500; ++i) {
String key = "key" + i;
boolean found = primaryIgnite(key).cache(null).localPeek(key) != null;
if (keyToRmv.equals(key)) {
Collection<ClusterNode> nodes = grid(0).affinity(null).mapKeyToPrimaryAndBackups(key);
for (int j = 0; j < gridCount(); ++j) {
if (nodes.contains(grid(j).localNode()) && grid(j) != primaryIgnite(key))
assertTrue("Not found on backup removed key ", grid(j).cache(null).localPeek(key) != null);
}
assertFalse("Found removed key " + key, found);
}
else
assertTrue("Not found key " + key, found);
}
}
/**
* @throws Exception If failed.
*/
public void testLocalClearKeys() throws Exception {
Map<String, List<String>> keys = addKeys();
Ignite g = grid(0);
Set<String> keysToRmv = new HashSet<>();
for (int i = 0; i < gridCount(); ++i) {
List<String> gridKeys = keys.get(grid(i).name());
if (gridKeys.size() > 2) {
keysToRmv.add(gridKeys.get(0));
keysToRmv.add(gridKeys.get(1));
g = grid(i);
break;
}
}
assert keysToRmv.size() > 1;
info("Will clear keys on node: " + g.cluster().localNode().id());
g.<String, Integer>cache(null).localClearAll(keysToRmv);
for (int i = 0; i < 500; ++i) {
String key = "key" + i;
Ignite ignite = primaryIgnite(key);
boolean found = ignite.cache(null).localPeek(key) != null;
if (keysToRmv.contains(key))
assertFalse("Found removed key [key=" + key + ", node=" + ignite.cluster().localNode().id() + ']',
found);
else
assertTrue("Not found key " + key, found);
}
}
/**
* Add 500 keys to cache only on primaries nodes.
*
* @return Map grid's name to its primary keys.
*/
protected Map<String, List<String>> addKeys() {
// Save entries only on their primary nodes. If we didn't do so, clearLocally() will not remove all entries
// because some of them were blocked due to having readers.
Map<String, List<String>> keys = new HashMap<>();
for (int i = 0; i < gridCount(); ++i)
keys.put(grid(i).name(), new ArrayList<String>());
for (int i = 0; i < 500; ++i) {
String key = "key" + i;
Ignite g = primaryIgnite(key);
g.cache(null).put(key, "value" + i);
keys.get(g.name()).add(key);
}
return keys;
}
/**
* @throws Exception If failed.
*/
public void testGlobalClearKey() throws Exception {
testGlobalClearKey(false, Arrays.asList("key25"), false);
}
/**
* @throws Exception If failed.
*/
public void testGlobalClearKeyAsyncOld() throws Exception {
testGlobalClearKey(true, Arrays.asList("key25"), true);
}
/**
* @throws Exception If failed.
*/
public void testGlobalClearKeyAsync() throws Exception {
testGlobalClearKey(true, Arrays.asList("key25"), false);
}
/**
* @throws Exception If failed.
*/
public void testGlobalClearKeys() throws Exception {
testGlobalClearKey(false, Arrays.asList("key25", "key100", "key150"), false);
}
/**
* @throws Exception If failed.
*/
public void testGlobalClearKeysAsyncOld() throws Exception {
testGlobalClearKey(true, Arrays.asList("key25", "key100", "key150"), true);
}
/**
* @throws Exception If failed.
*/
public void testGlobalClearKeysAsync() throws Exception {
testGlobalClearKey(true, Arrays.asList("key25", "key100", "key150"), false);
}
/**
* @param async If {@code true} uses async method.
* @param keysToRmv Keys to remove.
* @param oldAsync Use old async API.
* @throws Exception If failed.
*/
protected void testGlobalClearKey(boolean async, Collection<String> keysToRmv, boolean oldAsync) throws Exception {
// Save entries only on their primary nodes. If we didn't do so, clearLocally() will not remove all entries
// because some of them were blocked due to having readers.
for (int i = 0; i < 500; ++i) {
String key = "key" + i;
Ignite g = primaryIgnite(key);
g.cache(null).put(key, "value" + i);
}
if (async) {
if (oldAsync) {
IgniteCache<String, Integer> asyncCache = jcache().withAsync();
if (keysToRmv.size() == 1)
asyncCache.clear(F.first(keysToRmv));
else
asyncCache.clearAll(new HashSet<>(keysToRmv));
asyncCache.future().get();
} else {
if (keysToRmv.size() == 1)
jcache().clearAsync(F.first(keysToRmv)).get();
else
jcache().clearAllAsync(new HashSet<>(keysToRmv)).get();
}
}
else {
if (keysToRmv.size() == 1)
jcache().clear(F.first(keysToRmv));
else
jcache().clearAll(new HashSet<>(keysToRmv));
}
for (int i = 0; i < 500; ++i) {
String key = "key" + i;
boolean found = false;
for (int j = 0; j < gridCount(); j++) {
if (jcache(j).localPeek(key) != null)
found = true;
}
if (!keysToRmv.contains(key))
assertTrue("Not found key " + key, found);
else
assertFalse("Found removed key " + key, found);
}
}
/**
* @throws Exception If failed.
*/
public void testWithSkipStore() throws Exception {
IgniteCache<String, Integer> cache = grid(0).cache(null);
IgniteCache<String, Integer> cacheSkipStore = cache.withSkipStore();
List<String> keys = primaryKeysForCache(cache, 10);
for (int i = 0; i < keys.size(); ++i)
storeStgy.putToStore(keys.get(i), i);
assertFalse(cacheSkipStore.iterator().hasNext());
for (String key : keys) {
assertNull(cacheSkipStore.get(key));
assertNotNull(cache.get(key));
}
for (String key : keys) {
cacheSkipStore.remove(key);
assertNotNull(cache.get(key));
}
cache.removeAll(new HashSet<>(keys));
for (String key : keys)
assertNull(cache.get(key));
final int KEYS = 250;
// Put/remove data from multiple nodes.
keys = new ArrayList<>(KEYS);
for (int i = 0; i < KEYS; i++)
keys.add("key_" + i);
for (int i = 0; i < keys.size(); ++i)
cache.put(keys.get(i), i);
for (int i = 0; i < keys.size(); ++i) {
String key = keys.get(i);
assertNotNull(cacheSkipStore.get(key));
assertNotNull(cache.get(key));
assertEquals(i, storeStgy.getFromStore(key));
}
for (int i = 0; i < keys.size(); ++i) {
String key = keys.get(i);
Integer val1 = -1;
cacheSkipStore.put(key, val1);
assertEquals(i, storeStgy.getFromStore(key));
assertEquals(val1, cacheSkipStore.get(key));
Integer val2 = -2;
assertEquals(val1, cacheSkipStore.invoke(key, new SetValueProcessor(val2)));
assertEquals(i, storeStgy.getFromStore(key));
assertEquals(val2, cacheSkipStore.get(key));
}
for (String key : keys) {
cacheSkipStore.remove(key);
assertNull(cacheSkipStore.get(key));
assertNotNull(cache.get(key));
assertTrue(storeStgy.isInStore(key));
}
for (String key : keys) {
cache.remove(key);
assertNull(cacheSkipStore.get(key));
assertNull(cache.get(key));
assertFalse(storeStgy.isInStore(key));
storeStgy.putToStore(key, 0);
Integer val = -1;
assertNull(cacheSkipStore.invoke(key, new SetValueProcessor(val)));
assertEquals(0, storeStgy.getFromStore(key));
assertEquals(val, cacheSkipStore.get(key));
cache.remove(key);
storeStgy.putToStore(key, 0);
assertTrue(cacheSkipStore.putIfAbsent(key, val));
assertEquals(val, cacheSkipStore.get(key));
assertEquals(0, storeStgy.getFromStore(key));
cache.remove(key);
storeStgy.putToStore(key, 0);
assertNull(cacheSkipStore.getAndPut(key, val));
assertEquals(val, cacheSkipStore.get(key));
assertEquals(0, storeStgy.getFromStore(key));
cache.remove(key);
}
assertFalse(cacheSkipStore.iterator().hasNext());
assertTrue(storeStgy.getStoreSize() == 0);
assertTrue(cache.size(ALL) == 0);
// putAll/removeAll from multiple nodes.
Map<String, Integer> data = new LinkedHashMap<>();
for (int i = 0; i < keys.size(); i++)
data.put(keys.get(i), i);
cacheSkipStore.putAll(data);
for (String key : keys) {
assertNotNull(cacheSkipStore.get(key));
assertNotNull(cache.get(key));
assertFalse(storeStgy.isInStore(key));
}
cache.putAll(data);
for (String key : keys) {
assertNotNull(cacheSkipStore.get(key));
assertNotNull(cache.get(key));
assertTrue(storeStgy.isInStore(key));
}
cacheSkipStore.removeAll(data.keySet());
for (String key : keys) {
assertNull(cacheSkipStore.get(key));
assertNotNull(cache.get(key));
assertTrue(storeStgy.isInStore(key));
}
cacheSkipStore.putAll(data);
for (String key : keys) {
assertNotNull(cacheSkipStore.get(key));
assertNotNull(cache.get(key));
assertTrue(storeStgy.isInStore(key));
}
cacheSkipStore.removeAll(data.keySet());
for (String key : keys) {
assertNull(cacheSkipStore.get(key));
assertNotNull(cache.get(key));
assertTrue(storeStgy.isInStore(key));
}
cache.removeAll(data.keySet());
for (String key : keys) {
assertNull(cacheSkipStore.get(key));
assertNull(cache.get(key));
assertFalse(storeStgy.isInStore(key));
}
assertTrue(storeStgy.getStoreSize() == 0);
// Miscellaneous checks.
String newKey = "New key";
assertFalse(storeStgy.isInStore(newKey));
cacheSkipStore.put(newKey, 1);
assertFalse(storeStgy.isInStore(newKey));
cache.put(newKey, 1);
assertTrue(storeStgy.isInStore(newKey));
Iterator<Cache.Entry<String, Integer>> it = cacheSkipStore.iterator();
assertTrue(it.hasNext());
Cache.Entry<String, Integer> entry = it.next();
String rmvKey = entry.getKey();
assertTrue(storeStgy.isInStore(rmvKey));
it.remove();
assertNull(cacheSkipStore.get(rmvKey));
assertTrue(storeStgy.isInStore(rmvKey));
assertTrue(cache.size(ALL) == 0);
assertTrue(cacheSkipStore.size(ALL) == 0);
cache.remove(rmvKey);
assertTrue(storeStgy.getStoreSize() == 0);
}
/**
* @throws Exception If failed.
*/
public void testWithSkipStoreRemoveAll() throws Exception {
if (atomicityMode() == TRANSACTIONAL || (atomicityMode() == ATOMIC && nearEnabled())) // TODO IGNITE-373.
return;
IgniteCache<String, Integer> cache = grid(0).cache(null);
IgniteCache<String, Integer> cacheSkipStore = cache.withSkipStore();
Map<String, Integer> data = new HashMap<>();
for (int i = 0; i < 100; i++)
data.put("key_" + i, i);
cache.putAll(data);
for (String key : data.keySet()) {
assertNotNull(cacheSkipStore.get(key));
assertNotNull(cache.get(key));
assertTrue(storeStgy.isInStore(key));
}
cacheSkipStore.removeAll();
for (String key : data.keySet()) {
assertNull(cacheSkipStore.get(key));
assertNotNull(cache.get(key));
assertTrue(storeStgy.isInStore(key));
}
cache.removeAll();
for (String key : data.keySet()) {
assertNull(cacheSkipStore.get(key));
assertNull(cache.get(key));
assertFalse(storeStgy.isInStore(key));
}
}
/**
* @throws Exception If failed.
*/
public void testWithSkipStoreTx() throws Exception {
if (txShouldBeUsed()) {
IgniteCache<String, Integer> cache = grid(0).cache(null);
IgniteCache<String, Integer> cacheSkipStore = cache.withSkipStore();
final int KEYS = 250;
// Put/remove data from multiple nodes.
List<String> keys = new ArrayList<>(KEYS);
for (int i = 0; i < KEYS; i++)
keys.add("key_" + i);
Map<String, Integer> data = new LinkedHashMap<>();
for (int i = 0; i < keys.size(); i++)
data.put(keys.get(i), i);
checkSkipStoreWithTransaction(cache, cacheSkipStore, data, keys, OPTIMISTIC, READ_COMMITTED);
checkSkipStoreWithTransaction(cache, cacheSkipStore, data, keys, OPTIMISTIC, REPEATABLE_READ);
checkSkipStoreWithTransaction(cache, cacheSkipStore, data, keys, OPTIMISTIC, SERIALIZABLE);
checkSkipStoreWithTransaction(cache, cacheSkipStore, data, keys, PESSIMISTIC, READ_COMMITTED);
checkSkipStoreWithTransaction(cache, cacheSkipStore, data, keys, PESSIMISTIC, REPEATABLE_READ);
checkSkipStoreWithTransaction(cache, cacheSkipStore, data, keys, PESSIMISTIC, SERIALIZABLE);
}
}
/**
* @param cache Cache instance.
* @param cacheSkipStore Cache skip store projection.
* @param data Data set.
* @param keys Keys list.
* @param txConcurrency Concurrency mode.
* @param txIsolation Isolation mode.
* @throws Exception If failed.
*/
private void checkSkipStoreWithTransaction(IgniteCache<String, Integer> cache,
IgniteCache<String, Integer> cacheSkipStore,
Map<String, Integer> data,
List<String> keys,
TransactionConcurrency txConcurrency,
TransactionIsolation txIsolation)
throws Exception {
info("Test tx skip store [concurrency=" + txConcurrency + ", isolation=" + txIsolation + ']');
cache.removeAll(data.keySet());
checkEmpty(cache, cacheSkipStore);
IgniteTransactions txs = cache.unwrap(Ignite.class).transactions();
Integer val = -1;
// Several put check.
try (Transaction tx = txs.txStart(txConcurrency, txIsolation)) {
for (String key : keys)
cacheSkipStore.put(key, val);
for (String key : keys) {
assertEquals(val, cacheSkipStore.get(key));
assertEquals(val, cache.get(key));
assertFalse(storeStgy.isInStore(key));
}
tx.commit();
}
for (String key : keys) {
assertEquals(val, cacheSkipStore.get(key));
assertEquals(val, cache.get(key));
assertFalse(storeStgy.isInStore(key));
}
assertEquals(0, storeStgy.getStoreSize());
// cacheSkipStore putAll(..)/removeAll(..) check.
try (Transaction tx = txs.txStart(txConcurrency, txIsolation)) {
cacheSkipStore.putAll(data);
tx.commit();
}
for (String key : keys) {
val = data.get(key);
assertEquals(val, cacheSkipStore.get(key));
assertEquals(val, cache.get(key));
assertFalse(storeStgy.isInStore(key));
}
storeStgy.putAllToStore(data);
try (Transaction tx = txs.txStart(txConcurrency, txIsolation)) {
cacheSkipStore.removeAll(data.keySet());
tx.commit();
}
for (String key : keys) {
assertNull(cacheSkipStore.get(key));
assertNotNull(cache.get(key));
assertTrue(storeStgy.isInStore(key));
cache.remove(key);
}
assertTrue(storeStgy.getStoreSize() == 0);
// cache putAll(..)/removeAll(..) check.
try (Transaction tx = txs.txStart(txConcurrency, txIsolation)) {
cache.putAll(data);
for (String key : keys) {
assertNotNull(cacheSkipStore.get(key));
assertNotNull(cache.get(key));
assertFalse(storeStgy.isInStore(key));
}
cache.removeAll(data.keySet());
for (String key : keys) {
assertNull(cacheSkipStore.get(key));
assertNull(cache.get(key));
assertFalse(storeStgy.isInStore(key));
}
tx.commit();
}
assertTrue(storeStgy.getStoreSize() == 0);
// putAll(..) from both cacheSkipStore and cache.
try (Transaction tx = txs.txStart(txConcurrency, txIsolation)) {
Map<String, Integer> subMap = new HashMap<>();
for (int i = 0; i < keys.size() / 2; i++)
subMap.put(keys.get(i), i);
cacheSkipStore.putAll(subMap);
subMap.clear();
for (int i = keys.size() / 2; i < keys.size(); i++)
subMap.put(keys.get(i), i);
cache.putAll(subMap);
for (String key : keys) {
assertNotNull(cacheSkipStore.get(key));
assertNotNull(cache.get(key));
assertFalse(storeStgy.isInStore(key));
}
tx.commit();
}
for (int i = 0; i < keys.size() / 2; i++) {
String key = keys.get(i);
assertNotNull(cacheSkipStore.get(key));
assertNotNull(cache.get(key));
assertFalse(storeStgy.isInStore(key));
}
for (int i = keys.size() / 2; i < keys.size(); i++) {
String key = keys.get(i);
assertNotNull(cacheSkipStore.get(key));
assertNotNull(cache.get(key));
assertTrue(storeStgy.isInStore(key));
}
cache.removeAll(data.keySet());
for (String key : keys) {
assertNull(cacheSkipStore.get(key));
assertNull(cache.get(key));
assertFalse(storeStgy.isInStore(key));
}
// Check that read-through is disabled when cacheSkipStore is used.
for (int i = 0; i < keys.size(); i++)
storeStgy.putToStore(keys.get(i), i);
assertTrue(cacheSkipStore.size(ALL) == 0);
assertTrue(cache.size(ALL) == 0);
assertTrue(storeStgy.getStoreSize() != 0);
try (Transaction tx = txs.txStart(txConcurrency, txIsolation)) {
assertTrue(cacheSkipStore.getAll(data.keySet()).size() == 0);
for (String key : keys) {
assertNull(cacheSkipStore.get(key));
if (txIsolation == READ_COMMITTED) {
assertNotNull(cache.get(key));
assertNotNull(cacheSkipStore.get(key));
}
}
tx.commit();
}
cache.removeAll(data.keySet());
val = -1;
try (Transaction tx = txs.txStart(txConcurrency, txIsolation)) {
for (String key : data.keySet()) {
storeStgy.putToStore(key, 0);
assertNull(cacheSkipStore.invoke(key, new SetValueProcessor(val)));
}
tx.commit();
}
for (String key : data.keySet()) {
assertEquals(0, storeStgy.getFromStore(key));
assertEquals(val, cacheSkipStore.get(key));
assertEquals(val, cache.get(key));
}
cache.removeAll(data.keySet());
try (Transaction tx = txs.txStart(txConcurrency, txIsolation)) {
for (String key : data.keySet()) {
storeStgy.putToStore(key, 0);
assertTrue(cacheSkipStore.putIfAbsent(key, val));
}
tx.commit();
}
for (String key : data.keySet()) {
assertEquals(0, storeStgy.getFromStore(key));
assertEquals(val, cacheSkipStore.get(key));
assertEquals(val, cache.get(key));
}
cache.removeAll(data.keySet());
try (Transaction tx = txs.txStart(txConcurrency, txIsolation)) {
for (String key : data.keySet()) {
storeStgy.putToStore(key, 0);
assertNull(cacheSkipStore.getAndPut(key, val));
}
tx.commit();
}
for (String key : data.keySet()) {
assertEquals(0, storeStgy.getFromStore(key));
assertEquals(val, cacheSkipStore.get(key));
assertEquals(val, cache.get(key));
}
cache.removeAll(data.keySet());
checkEmpty(cache, cacheSkipStore);
}
/**
* @param cache Cache instance.
* @param cacheSkipStore Cache skip store projection.
* @throws Exception If failed.
*/
private void checkEmpty(IgniteCache<String, Integer> cache, IgniteCache<String, Integer> cacheSkipStore)
throws Exception {
assertTrue(cache.size(ALL) == 0);
assertTrue(cacheSkipStore.size(ALL) == 0);
assertTrue(storeStgy.getStoreSize() == 0);
}
/**
* @return Cache start mode.
*/
protected CacheStartMode cacheStartType() {
String mode = System.getProperty("cache.start.mode");
if (CacheStartMode.NODES_THEN_CACHES.name().equalsIgnoreCase(mode))
return CacheStartMode.NODES_THEN_CACHES;
if (CacheStartMode.ONE_BY_ONE.name().equalsIgnoreCase(mode))
return CacheStartMode.ONE_BY_ONE;
return CacheStartMode.STATIC;
}
/**
* @throws Exception If failed.
*/
public void testGetOutTx() throws Exception {
checkGetOutTx(false);
}
/**
* @throws Exception If failed.
*/
public void testGetOutTxAsync() throws Exception {
checkGetOutTx(true);
}
/**
* @throws Exception If failed.
*/
private void checkGetOutTx(boolean async) throws Exception {
final AtomicInteger lockEvtCnt = new AtomicInteger();
IgnitePredicate<Event> lsnr = new IgnitePredicate<Event>() {
@Override public boolean apply(Event evt) {
lockEvtCnt.incrementAndGet();
return true;
}
};
try {
IgniteCache<String, Integer> cache = grid(0).cache(null);
List<String> keys = primaryKeysForCache(cache, 2);
assertEquals(2, keys.size());
cache.put(keys.get(0), 0);
cache.put(keys.get(1), 1);
grid(0).events().localListen(lsnr, EVT_CACHE_OBJECT_LOCKED, EVT_CACHE_OBJECT_UNLOCKED);
try (Transaction tx = transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
Integer val0;
if (async)
val0 = cache.getAsync(keys.get(0)).get();
else
val0 = cache.get(keys.get(0));
assertEquals(0, val0.intValue());
Map<String, Integer> allOutTx;
if (async)
allOutTx = cache.getAllOutTxAsync(F.asSet(keys.get(1))).get();
else
allOutTx = cache.getAllOutTx(F.asSet(keys.get(1)));
assertEquals(1, allOutTx.size());
assertTrue(allOutTx.containsKey(keys.get(1)));
assertEquals(1, allOutTx.get(keys.get(1)).intValue());
}
assertTrue(GridTestUtils.waitForCondition(new PA() {
@Override public boolean apply() {
info("Lock event count: " + lockEvtCnt.get());
if (atomicityMode() == ATOMIC)
return lockEvtCnt.get() == 0;
if (cacheMode() == PARTITIONED && nearEnabled()) {
if (!grid(0).configuration().isClientMode())
return lockEvtCnt.get() == 4;
}
return lockEvtCnt.get() == 2;
}
}, 15000));
}
finally {
grid(0).events().stopLocalListen(lsnr, EVT_CACHE_OBJECT_LOCKED, EVT_CACHE_OBJECT_UNLOCKED);
}
}
/**
* @throws Exception If failed.
*/
public void testTransformException() throws Exception {
final IgniteCache<String, Integer> cache = jcache();
assertThrows(log, new Callable<Object>() {
@Override public Object call() throws Exception {
IgniteFuture fut = cache.invokeAsync("key2", ERR_PROCESSOR).chain(new IgniteClosure<IgniteFuture, Object>() {
@Override public Object apply(IgniteFuture o) {
return o.get();
}
});
fut.get();
return null;
}
}, EntryProcessorException.class, null);
}
/**
* @throws Exception If failed.
*/
public void testLockInsideTransaction() throws Exception {
if (txEnabled()) {
GridTestUtils.assertThrows(
log,
new Callable<Object>() {
@Override public Object call() throws Exception {
try (Transaction tx = ignite(0).transactions().txStart()) {
jcache(0).lock("key").lock();
}
return null;
}
},
CacheException.class,
"Explicit lock can't be acquired within a transaction."
);
GridTestUtils.assertThrows(
log,
new Callable<Object>() {
@Override public Object call() throws Exception {
try (Transaction tx = ignite(0).transactions().txStart()) {
jcache(0).lockAll(Arrays.asList("key1", "key2")).lock();
}
return null;
}
},
CacheException.class,
"Explicit lock can't be acquired within a transaction."
);
}
}
/**
* @throws Exception If failed.
*/
public void testTransformResourceInjection() throws Exception {
ClusterGroup servers = grid(0).cluster().forServers();
if(F.isEmpty(servers.nodes()))
return;
grid(0).services( grid(0).cluster()).deployNodeSingleton(SERVICE_NAME1, new DummyServiceImpl());
IgniteCache<String, Integer> cache = jcache();
Ignite ignite = ignite(0);
doTransformResourceInjection(ignite, cache, false, false);
doTransformResourceInjection(ignite, cache, true, false);
doTransformResourceInjection(ignite, cache, true, true);
if (txEnabled()) {
doTransformResourceInjectionInTx(ignite, cache, false, false);
doTransformResourceInjectionInTx(ignite, cache, true, false);
doTransformResourceInjectionInTx(ignite, cache, true, true);
}
}
/**
* @param ignite Node.
* @param cache Cache.
* @param async Use async API.
* @param oldAsync Use old async API.
* @throws Exception If failed.
*/
private void doTransformResourceInjectionInTx(Ignite ignite, IgniteCache<String, Integer> cache, boolean async,
boolean oldAsync) throws Exception {
for (TransactionConcurrency concurrency : TransactionConcurrency.values()) {
for (TransactionIsolation isolation : TransactionIsolation.values()) {
IgniteTransactions txs = ignite.transactions();
try (Transaction tx = txs.txStart(concurrency, isolation)) {
doTransformResourceInjection(ignite, cache, async, oldAsync);
tx.commit();
}
}
}
}
/**
* @param ignite Node.
* @param cache Cache.
* @param async Use async API.
* @param oldAsync Use old async API.
* @throws Exception If failed.
*/
private void doTransformResourceInjection(Ignite ignite, IgniteCache<String, Integer> cache, boolean async,
boolean oldAsync) throws Exception {
final Collection<ResourceType> required = Arrays.asList(ResourceType.IGNITE_INSTANCE,
ResourceType.CACHE_NAME,
ResourceType.LOGGER);
final CacheEventListener lsnr = new CacheEventListener();
IgniteEvents evts = ignite.events(ignite.cluster());
UUID opId = evts.remoteListen(lsnr, null, EventType.EVT_CACHE_OBJECT_READ);
try {
checkResourceInjectionOnInvoke(cache, required, async, oldAsync);
checkResourceInjectionOnInvokeAll(cache, required, async, oldAsync);
checkResourceInjectionOnInvokeAllMap(cache, required, async, oldAsync);
}
finally {
evts.stopRemoteListen(opId);
}
}
/**
* Tests invokeAll method for map of pairs (key, entryProcessor).
*
* @param cache Cache.
* @param required Expected injected resources.
* @param async Use async API.
* @param oldAsync Use old async API.
*/
private void checkResourceInjectionOnInvokeAllMap(IgniteCache<String, Integer> cache,
Collection<ResourceType> required, boolean async, boolean oldAsync) {
Map<String, EntryProcessorResult<Integer>> results;
Map<String, EntryProcessor<String, Integer, Integer>> map = new HashMap<>();
map.put(UUID.randomUUID().toString(), new ResourceInjectionEntryProcessor());
map.put(UUID.randomUUID().toString(), new ResourceInjectionEntryProcessor());
map.put(UUID.randomUUID().toString(), new ResourceInjectionEntryProcessor());
map.put(UUID.randomUUID().toString(), new ResourceInjectionEntryProcessor());
if (async) {
if (oldAsync) {
IgniteCache<String, Integer> acache = cache.withAsync();
acache.invokeAll(map);
results = acache.<Map<String, EntryProcessorResult<Integer>>>future().get();
}
else
results = cache.invokeAllAsync(map).get();
}
else
results = cache.invokeAll(map);
assertEquals(map.size(), results.size());
for (EntryProcessorResult<Integer> res : results.values()) {
Collection<ResourceType> notInjected = ResourceInfoSet.valueOf(res.get()).notInjected(required);
if (!notInjected.isEmpty())
fail("Can't inject resource(s): " + Arrays.toString(notInjected.toArray()));
}
}
/**
* Tests invokeAll method for set of keys.
*
* @param cache Cache.
* @param required Expected injected resources.
* @param async Use async API.
* @param oldAsync Use old async API.
*/
private void checkResourceInjectionOnInvokeAll(IgniteCache<String, Integer> cache,
Collection<ResourceType> required, boolean async, boolean oldAsync) {
Set<String> keys = new HashSet<>(Arrays.asList(UUID.randomUUID().toString(),
UUID.randomUUID().toString(),
UUID.randomUUID().toString(),
UUID.randomUUID().toString()));
Map<String, EntryProcessorResult<Integer>> results;
if (async) {
if (oldAsync) {
IgniteCache<String, Integer> acache = cache.withAsync();
acache.invokeAll(keys, new ResourceInjectionEntryProcessor());
results = acache.<Map<String, EntryProcessorResult<Integer>>>future().get();
}
else
results = cache.invokeAllAsync(keys, new ResourceInjectionEntryProcessor()).get();
}
else
results = cache.invokeAll(keys, new ResourceInjectionEntryProcessor());
assertEquals(keys.size(), results.size());
for (EntryProcessorResult<Integer> res : results.values()) {
Collection<ResourceType> notInjected1 = ResourceInfoSet.valueOf(res.get()).notInjected(required);
if (!notInjected1.isEmpty())
fail("Can't inject resource(s): " + Arrays.toString(notInjected1.toArray()));
}
}
/**
* Tests invoke for single key.
*
* @param cache Cache.
* @param required Expected injected resources.
* @param async Use async API.
* @param oldAsync Use old async API.
*/
private void checkResourceInjectionOnInvoke(IgniteCache<String, Integer> cache,
Collection<ResourceType> required, boolean async, boolean oldAsync) {
String key = UUID.randomUUID().toString();
Integer flags;
if (async) {
if (oldAsync) {
IgniteCache<String, Integer> acache = cache.withAsync();
acache.invoke(key, new GridCacheAbstractFullApiSelfTest.ResourceInjectionEntryProcessor());
flags = acache.<Integer>future().get();
}
else
flags = cache.invokeAsync(key,
new GridCacheAbstractFullApiSelfTest.ResourceInjectionEntryProcessor()).get();
}
else
flags = cache.invoke(key, new GridCacheAbstractFullApiSelfTest.ResourceInjectionEntryProcessor());
if (cache.isAsync())
flags = cache.<Integer>future().get();
assertTrue("Processor result is null", flags != null);
Collection<ResourceType> notInjected = ResourceInfoSet.valueOf(flags).notInjected(required);
if (!notInjected.isEmpty())
fail("Can't inject resource(s): " + Arrays.toString(notInjected.toArray()));
}
/**
* Sets given value, returns old value.
*/
public static final class SetValueProcessor implements EntryProcessor<String, Integer, Integer> {
/** */
private Integer newVal;
/**
* @param newVal New value to set.
*/
SetValueProcessor(Integer newVal) {
this.newVal = newVal;
}
/** {@inheritDoc} */
@Override public Integer process(MutableEntry<String, Integer> entry,
Object... arguments) throws EntryProcessorException {
Integer val = entry.getValue();
entry.setValue(newVal);
return val;
}
}
/**
*
*/
public enum CacheStartMode {
/** Start caches together nodes (not dynamically) */
STATIC,
/** */
NODES_THEN_CACHES,
/** */
ONE_BY_ONE
}
/**
*
*/
private static class RemoveEntryProcessor implements EntryProcessor<String, Integer, String>, Serializable {
/** {@inheritDoc} */
@Override public String process(MutableEntry<String, Integer> e, Object... args) {
assertNotNull(e.getKey());
Integer old = e.getValue();
e.remove();
return String.valueOf(old);
}
}
/**
*
*/
private static class IncrementEntryProcessor implements EntryProcessor<String, Integer, String>, Serializable {
/** {@inheritDoc} */
@Override public String process(MutableEntry<String, Integer> e, Object... args) {
assertNotNull(e.getKey());
Integer old = e.getValue();
e.setValue(old == null ? 1 : old + 1);
return String.valueOf(old);
}
}
/**
*
*/
public static class ResourceInjectionEntryProcessor extends ResourceInjectionEntryProcessorBase<String, Integer> {
/** */
protected transient Ignite ignite;
/** */
protected transient String cacheName;
/** */
protected transient IgniteLogger log;
/** */
protected transient DummyService svc;
/**
* @param ignite Ignite.
*/
@IgniteInstanceResource
public void setIgnite(Ignite ignite) {
assert ignite != null;
checkSet();
infoSet.set(ResourceType.IGNITE_INSTANCE, true);
this.ignite = ignite;
}
/**
* @param cacheName Cache name.
*/
@CacheNameResource
public void setCacheName(String cacheName) {
checkSet();
infoSet.set(ResourceType.CACHE_NAME, true);
this.cacheName = cacheName;
}
/**
* @param log Logger.
*/
@LoggerResource
public void setLoggerResource(IgniteLogger log) {
assert log != null;
checkSet();
infoSet.set(ResourceType.LOGGER, true);
this.log = log;
}
/**
* @param svc Service.
*/
@ServiceResource(serviceName = SERVICE_NAME1)
public void setDummyService(DummyService svc) {
assert svc != null;
checkSet();
infoSet.set(ResourceType.SERVICE, true);
this.svc = svc;
}
/** {@inheritDoc} */
@Override public Integer process(MutableEntry<String, Integer> e, Object... args) {
Integer oldVal = e.getValue();
e.setValue(ThreadLocalRandom.current().nextInt() + (oldVal == null ? 0 : oldVal));
return super.process(e, args);
}
}
/**
*
*/
private static class CheckEntriesTask extends TestIgniteIdxRunnable {
/** Keys. */
private final Collection<String> keys;
/**
* @param keys Keys.
*/
public CheckEntriesTask(Collection<String> keys) {
this.keys = keys;
}
/** {@inheritDoc} */
@Override public void run(int idx) throws Exception {
GridCacheContext<String, Integer> ctx = ((IgniteKernal)ignite).<String, Integer>internalCache().context();
int size = 0;
if (ctx.isNear())
ctx = ctx.near().dht().context();
for (String key : keys) {
if (ctx.affinity().keyLocalNode(key, ctx.discovery().topologyVersionEx())) {
GridCacheEntryEx e = ctx.cache().entryEx(key);
assert e != null : "Entry is null [idx=" + idx + ", key=" + key + ", ctx=" + ctx + ']';
assert !e.deleted() : "Entry is deleted: " + e;
size++;
ctx.evicts().touch(e, null);
}
}
assertEquals("Incorrect size on cache #" + idx, size, ignite.cache(ctx.name()).localSize(ALL));
}
}
/**
*
*/
private static class CheckCacheSizeTask extends TestIgniteIdxRunnable {
/** */
private final Map<String, Integer> map;
/**
* @param map Map.
*/
CheckCacheSizeTask(Map<String, Integer> map) {
this.map = map;
}
/** {@inheritDoc} */
@Override public void run(int idx) throws Exception {
GridCacheContext<String, Integer> ctx = ((IgniteKernal)ignite).<String, Integer>internalCache().context();
int size = 0;
for (String key : map.keySet())
if (ctx.affinity().keyLocalNode(key, ctx.discovery().topologyVersionEx()))
size++;
assertEquals("Incorrect key size on cache #" + idx, size, ignite.cache(ctx.name()).localSize(ALL));
}
}
/**
*
*/
private static class CheckPrimaryKeysTask implements TestCacheCallable<String, Integer, List<String>> {
/** Start from. */
private final int startFrom;
/** Count. */
private final int cnt;
/**
* @param startFrom Start from.
* @param cnt Count.
*/
public CheckPrimaryKeysTask(int startFrom, int cnt) {
this.startFrom = startFrom;
this.cnt = cnt;
}
/** {@inheritDoc} */
@Override public List<String> call(Ignite ignite, IgniteCache<String, Integer> cache) throws Exception {
List<String> found = new ArrayList<>();
Affinity<Object> affinity = ignite.affinity(cache.getName());
for (int i = startFrom; i < startFrom + 100_000; i++) {
String key = "key" + i;
if (affinity.isPrimary(ignite.cluster().localNode(), key)) {
found.add(key);
if (found.size() == cnt)
return found;
}
}
throw new IgniteException("Unable to find " + cnt + " keys as primary for cache.");
}
}
/**
*
*/
public static class EntryTtlTask implements TestCacheCallable<String, Integer, IgnitePair<Long>> {
/** Entry key. */
private final String key;
/** Check cache for nearness, use DHT cache if it is near. */
private final boolean useDhtForNearCache;
/**
* @param key Entry key.
* @param useDhtForNearCache Check cache for nearness, use DHT cache if it is near.
*/
public EntryTtlTask(String key, boolean useDhtForNearCache) {
this.key = key;
this.useDhtForNearCache = useDhtForNearCache;
}
/** {@inheritDoc} */
@Override public IgnitePair<Long> call(Ignite ignite, IgniteCache<String, Integer> cache) throws Exception {
GridCacheAdapter<?, ?> internalCache = internalCache0(cache);
if (useDhtForNearCache && internalCache.context().isNear())
internalCache = internalCache.context().near().dht();
GridCacheEntryEx entry = internalCache.peekEx(key);
return entry != null ?
new IgnitePair<>(entry.ttl(), entry.expireTime()) :
new IgnitePair<Long>(null, null);
}
}
/**
*
*/
private static class CheckIteratorTask extends TestIgniteIdxCallable<Void> {
/**
* @param idx Index.
*/
@Override public Void call(int idx) throws Exception {
GridCacheContext<String, Integer> ctx = ((IgniteKernal)ignite).<String, Integer>internalCache().context();
GridCacheQueryManager queries = ctx.queries();
ConcurrentMap<UUID, Map<Long, GridFutureAdapter<?>>> map = GridTestUtils.getFieldValue(queries,
GridCacheQueryManager.class, "qryIters");
for (Map<Long, GridFutureAdapter<?>> map1 : map.values())
assertTrue("Iterators not removed for grid " + idx, map1.isEmpty());
return null;
}
}
/**
*
*/
private static class PrintIteratorStateTask extends TestIgniteIdxCallable<Void> {
/** */
@LoggerResource
private IgniteLogger log;
/**
* @param idx Index.
*/
@Override public Void call(int idx) throws Exception {
GridCacheContext<String, Integer> ctx = ((IgniteKernal)ignite).<String, Integer>internalCache().context();
GridCacheQueryManager queries = ctx.queries();
ConcurrentMap<UUID, Map<Long, GridFutureAdapter<?>>> map = GridTestUtils.getFieldValue(queries,
GridCacheQueryManager.class, "qryIters");
for (Map<Long, GridFutureAdapter<?>> map1 : map.values()) {
if (!map1.isEmpty()) {
log.warning("Iterators leak detected at grid: " + idx);
for (Map.Entry<Long, GridFutureAdapter<?>> entry : map1.entrySet())
log.warning(entry.getKey() + "; " + entry.getValue());
}
}
return null;
}
}
/**
*
*/
private static class RemoveAndReturnNullEntryProcessor implements
EntryProcessor<String, Integer, Integer>, Serializable {
/** {@inheritDoc} */
@Override public Integer process(MutableEntry<String, Integer> e, Object... args) {
e.remove();
return null;
}
}
/**
*
*/
private static class SwapEvtsLocalListener implements IgnitePredicate<Event> {
/** */
@LoggerResource
private IgniteLogger log;
/** Swap events. */
private final AtomicInteger swapEvts;
/** Unswap events. */
private final AtomicInteger unswapEvts;
/**
* @param swapEvts Swap events.
* @param unswapEvts Unswap events.
*/
public SwapEvtsLocalListener(AtomicInteger swapEvts, AtomicInteger unswapEvts) {
this.swapEvts = swapEvts;
this.unswapEvts = unswapEvts;
}
/** {@inheritDoc} */
@Override public boolean apply(Event evt) {
log.info("Received event: " + evt);
switch (evt.type()) {
case EVT_CACHE_OBJECT_SWAPPED:
swapEvts.incrementAndGet();
break;
case EVT_CACHE_OBJECT_UNSWAPPED:
unswapEvts.incrementAndGet();
break;
}
return true;
}
}
/**
*
*/
private static class CheckEntriesDeletedTask extends TestIgniteIdxRunnable {
/** */
private final int cnt;
/**
* @param cnt Keys count.
*/
public CheckEntriesDeletedTask(int cnt) {
this.cnt = cnt;
}
/** {@inheritDoc} */
@Override public void run(int idx) throws Exception {
for (int i = 0; i < cnt; i++) {
String key = String.valueOf(i);
GridCacheContext<String, Integer> ctx = ((IgniteKernal)ignite).<String, Integer>internalCache().context();
GridCacheEntryEx entry = ctx.isNear() ? ctx.near().dht().peekEx(key) : ctx.cache().peekEx(key);
if (ignite.affinity(null).mapKeyToPrimaryAndBackups(key).contains(((IgniteKernal)ignite).localNode())) {
assertNotNull(entry);
assertTrue(entry.deleted());
}
else
assertNull(entry);
}
}
}
/**
*
*/
private static class CheckKeySizeTask extends TestIgniteIdxRunnable {
/** Keys. */
private final Collection<String> keys;
/**
* @param keys Keys.
*/
public CheckKeySizeTask(Collection<String> keys) {
this.keys = keys;
}
/** {@inheritDoc} */
@Override public void run(int idx) throws Exception {
GridCacheContext<String, Integer> ctx = ((IgniteKernal)ignite).<String, Integer>internalCache().context();
int size = 0;
for (String key : keys)
if (ctx.affinity().keyLocalNode(key, ctx.discovery().topologyVersionEx()))
size++;
assertEquals("Incorrect key size on cache #" + idx, size, ignite.cache(null).localSize(ALL));
}
}
/**
*
*/
private static class FailedEntryProcessor implements EntryProcessor<String, Integer, Integer>, Serializable {
/** {@inheritDoc} */
@Override public Integer process(MutableEntry<String, Integer> e, Object... args) {
throw new EntryProcessorException("Test entry processor exception.");
}
}
/**
*
*/
private static class TestValue implements Serializable {
/** */
private int val;
/**
* @param val Value.
*/
TestValue(int val) {
this.val = val;
}
/**
* @return Value.
*/
public int value() {
return val;
}
/** {@inheritDoc} */
@Override public boolean equals(Object o) {
if (this == o)
return true;
if (!(o instanceof TestValue))
return false;
TestValue value = (TestValue)o;
if (val != value.val)
return false;
return true;
}
/** {@inheritDoc} */
@Override public int hashCode() {
return val;
}
}
/**
* Dummy Service.
*/
public interface DummyService {
/**
*
*/
public void noop();
}
/**
* No-op test service.
*/
public static class DummyServiceImpl implements DummyService, Service {
/** */
private static final long serialVersionUID = 0L;
/** {@inheritDoc} */
@Override public void noop() {
// No-op.
}
/** {@inheritDoc} */
@Override public void cancel(ServiceContext ctx) {
System.out.println("Cancelling service: " + ctx.name());
}
/** {@inheritDoc} */
@Override public void init(ServiceContext ctx) throws Exception {
System.out.println("Initializing service: " + ctx.name());
}
/** {@inheritDoc} */
@Override public void execute(ServiceContext ctx) {
System.out.println("Executing service: " + ctx.name());
}
}
/**
*
*/
public static class CacheEventListener implements IgniteBiPredicate<UUID, CacheEvent>, IgnitePredicate<CacheEvent> {
/** */
public final LinkedBlockingQueue<CacheEvent> evts = new LinkedBlockingQueue<>();
/** {@inheritDoc} */
@Override public boolean apply(UUID uuid, CacheEvent evt) {
evts.add(evt);
return true;
}
/** {@inheritDoc} */
@Override public boolean apply(CacheEvent evt) {
evts.add(evt);
return true;
}
}
}
|
package com.nonstoppvp.core.utils;
import com.mongodb.BasicDBObject;
import com.nonstoppvp.core.NSPCore;
import com.nonstoppvp.core.profiles.NSPPlayer;
import org.bson.Document;
import org.bukkit.Bukkit;
import java.util.Map;
public class PlayerUtils
{
public static void handleJoin(final NSPPlayer player)
{
player.setLoaded(false);
Bukkit.getScheduler().runTaskAsynchronously(NSPCore.getInstance(), () ->
{
if (!NSPCore.getInstance().getMongoManager().doesPlayerExist(player))
{
Map<String, Boolean> settings = player.getSettings();
Map<String, String> socialMedia = player.getSocialMedia();
Document document = new Document();
settings.put("playerHide", false);
settings.put("censorship", false);
settings.put("allowFriendRequests", true);
socialMedia.put("twitter", "None");
socialMedia.put("youtube", "None");
socialMedia.put("instagram", "None");
socialMedia.put("twitch", "None");
socialMedia.put("discord", "None");
player.setLevel(1);
player.setExp(0);
player.setOrbs(0);
player.setRank("");
document.append("uuid", player.getUuid().toString());
document.append("level", player.getLevel());
document.append("exp", player.getExp());
document.append("orbs", player.getOrbs());
document.append("friends", player.getFriends());
document.append("socialmedia", new BasicDBObject(socialMedia));
document.append("settings", new BasicDBObject(settings));
NSPCore.getInstance().getMongoManager().createPlayerDocument(player, document);
ItemUtils.joinInventory(Bukkit.getPlayer(player.getUuid()));
player.loadGUIs();
player.setLoaded(true);
return;
} else
{
player.loadPlayer();
ItemUtils.joinInventory(Bukkit.getPlayer(player.getUuid()));
}
});
}
}
|
// ============================================================================
//
// Copyright (C) 2006-2021 Talend Inc. - www.talend.com
//
// This source code is available under agreement available at
// %InstallDIR%\features\org.talend.rcp.branding.%PRODUCTNAME%\%PRODUCTNAME%license.txt
//
// You should have received a copy of the agreement
// along with this program; if not, write to Talend SA
// 9 rue Pages 92150 Suresnes, France
//
// ============================================================================
package org.talend.dataprofiler.core.ui.wizard.analysis.table;
import org.eclipse.jface.viewers.CheckboxTreeViewer;
import org.eclipse.swt.SWT;
import org.eclipse.swt.layout.FillLayout;
import org.eclipse.swt.widgets.Composite;
import org.eclipse.ui.dialogs.ContainerCheckedTreeViewer;
import org.talend.core.model.general.Project;
import org.talend.core.model.repository.ERepositoryObjectType;
import org.talend.dataprofiler.core.i18n.internal.DefaultMessagesImpl;
import org.talend.dataprofiler.core.ui.utils.AnalysisUtils;
import org.talend.dataprofiler.core.ui.views.provider.ResourceViewContentProvider;
import org.talend.dataprofiler.core.ui.wizard.analysis.AbstractAnalysisWizardPage;
import org.talend.dq.helper.ProxyRepositoryManager;
import org.talend.dq.helper.RepositoryNodeHelper;
import org.talend.dq.nodes.DQRepositoryNode;
import org.talend.repository.ProjectManager;
import org.talend.repository.model.IRepositoryNode;
import org.talend.repository.model.IRepositoryNode.ENodeType;
import org.talend.repository.model.RepositoryNode;
import org.talend.resource.EResourceConstant;
/**
* DOC xqliu class global comment. Detailled comment
*/
public class DQRuleSelectPage extends AbstractAnalysisWizardPage {
private CheckboxTreeViewer cViewer;
public CheckboxTreeViewer getCViewer() {
return cViewer;
}
public void setCViewer(CheckboxTreeViewer viewer) {
cViewer = viewer;
}
private final String pageTitle = DefaultMessagesImpl.getString("DQRuleSelectPage.newAnalysis"); //$NON-NLS-1$
private final String pageMessage = DefaultMessagesImpl.getString("DQRuleSelectPage.selectDQRule"); //$NON-NLS-1$
public DQRuleSelectPage() {
setTitle(pageTitle);
setDescription(pageMessage);
}
public void createControl(Composite parent) {
Composite container = new Composite(parent, SWT.NONE);
FillLayout layout = new FillLayout();
container.setLayout(layout);
cViewer = new ContainerCheckedTreeViewer(container, SWT.NONE);
cViewer.setLabelProvider(new DQRuleLabelProvider());
ResourceViewContentProvider cContentProvider = new ResourceViewContentProvider();
cViewer.setContentProvider(cContentProvider);
cViewer.setInput(getNodeListWithReferenceProject(ERepositoryObjectType.TDQ_RULES_SQL));
// MOD gdbu 2011-7-25 bug : 23220
((ResourceViewContentProvider) cContentProvider).setTreeViewer(cViewer);
cViewer.addFilter(AnalysisUtils.createRuleFilter());
setControl(container);
}
protected RepositoryNode getNodeListWithReferenceProject(ERepositoryObjectType type) {
if (!ProxyRepositoryManager.getInstance().isLocalProject() && !ProxyRepositoryManager.getInstance().isMergeRefProject()) {
DQRepositoryNode node = new DQRepositoryNode(null, null, ENodeType.SYSTEM_FOLDER, ProjectManager.getInstance()
.getCurrentProject());
node.getChildren().clear();
java.util.Set<Project> allProjects = ProxyRepositoryManager.getInstance().getAllProjects();
for (Project project : allProjects) {
IRepositoryNode metaRootNode = RepositoryNodeHelper.getLibrariesFolderNode(EResourceConstant.RULES_SQL, project);
if (metaRootNode != null) {
node.getChildren().add(metaRootNode);
}
}
return node;
} else {
return (RepositoryNode) RepositoryNodeHelper.getLibrariesFolderNode(EResourceConstant.RULES);
}
}
@Override
public boolean canFlipToNextPage() {
return false;
}
@Override
public boolean isCanFinishEarly() {
return true;
}
@Override
public boolean isHasPages() {
return false;
}
}
|
package org.coody.framework.serializer;
import org.coody.framework.container.ThreadSetContainer;
import org.coody.framework.serializer.iface.AbstractSerializer;
public class ArraySerializer extends AbstractSerializer<Object[]> {
@Override
public String adapt(Object[] target) {
if (target == null) {
return null;
}
if (target.length == 0) {
return "[]";
}
if (!ThreadSetContainer.add(target)) {
return "[]";
}
StringBuilder jsonBuilder = new StringBuilder();
for (Object line : target) {
if (line == null) {
jsonBuilder.append(",").append("null");
continue;
}
jsonBuilder.append(",").append(AbstractSerializer.serialize(line));
}
jsonBuilder.append("]");
String json = jsonBuilder.toString();
while (json.startsWith(",")) {
json = json.substring(1, json.length());
}
return "[" + json;
}
}
|
/*
* Copyright 2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.glowroot.agent.plugin.httpclient;
import java.io.IOException;
import com.squareup.okhttp.Callback;
import com.squareup.okhttp.Request;
import com.squareup.okhttp.Response;
import org.glowroot.agent.plugin.api.AsyncTraceEntry;
import org.glowroot.agent.plugin.api.AuxThreadContext;
import org.glowroot.agent.plugin.api.TraceEntry;
public class OkHttp2xCallbackWrapper implements Callback {
private final Callback delegate;
private final AsyncTraceEntry asyncTraceEntry;
private final AuxThreadContext auxContext;
public OkHttp2xCallbackWrapper(Callback delegate, AsyncTraceEntry asyncTraceEntry,
AuxThreadContext auxContext) {
this.delegate = delegate;
this.asyncTraceEntry = asyncTraceEntry;
this.auxContext = auxContext;
}
@Override
public void onFailure(Request request, IOException exception) {
asyncTraceEntry.endWithError(exception);
TraceEntry traceEntry = auxContext.start();
try {
delegate.onFailure(request, exception);
} catch (Throwable t) {
traceEntry.endWithError(t);
throw rethrow(t);
}
traceEntry.end();
}
@Override
public void onResponse(Response response) throws IOException {
asyncTraceEntry.end();
TraceEntry traceEntry = auxContext.start();
try {
delegate.onResponse(response);
} catch (Throwable t) {
traceEntry.endWithError(t);
throw rethrow(t);
}
traceEntry.end();
}
private static RuntimeException rethrow(Throwable t) {
OkHttp2xCallbackWrapper.<RuntimeException>throwsUnchecked(t);
throw new AssertionError();
}
@SuppressWarnings("unchecked")
private static <T extends Throwable> void throwsUnchecked(Throwable t) throws T {
throw (T) t;
}
}
|
package org.jembi.bsis.model.reporting;
import java.util.Objects;
public class Cohort {
private String category;
private Object option;
private Comparator comparator;
public String getCategory() {
return category;
}
public void setCategory(String category) {
this.category = category;
}
public Object getOption() {
return option;
}
public void setOption(Object option) {
this.option = option;
}
public Comparator getComparator() {
return comparator;
}
public void setComparator(Comparator comparator) {
this.comparator = comparator;
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof Cohort)) {
return false;
}
Cohort other = (Cohort) obj;
return Objects.equals(getCategory(), other.getCategory()) &&
Objects.equals(getOption(), other.getOption()) &&
Objects.equals(getComparator(), other.getComparator());
}
@Override
public int hashCode() {
return Objects.hash(getCategory(), getOption(), getComparator());
}
}
|
package com.potato.boot.config.rocketmq;
import org.apache.commons.lang3.StringUtils;
import org.apache.rocketmq.client.exception.MQBrokerException;
import org.apache.rocketmq.client.exception.MQClientException;
import org.apache.rocketmq.client.producer.DefaultMQProducer;
import org.apache.rocketmq.client.producer.LocalTransactionExecuter;
import org.apache.rocketmq.client.producer.MessageQueueSelector;
import org.apache.rocketmq.client.producer.SendCallback;
import org.apache.rocketmq.client.producer.SendResult;
import org.apache.rocketmq.client.producer.TransactionSendResult;
import org.apache.rocketmq.common.message.Message;
import org.apache.rocketmq.common.message.MessageQueue;
import org.apache.rocketmq.remoting.exception.RemotingException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
public class ProduceProxy {
private static final Logger log = LoggerFactory.getLogger(ProduceProxy.class);
private final String nameSrv;
private final String groupName;
private final String env = "";
private DefaultMQProducer producer;
public ProduceProxy(String nameSrv, String groupName) {
this.nameSrv = nameSrv;
this.groupName = groupName;
try {
this.producer = new DefaultMQProducer(groupName);
this.producer.setNamesrvAddr(this.nameSrv);
this.producer.start();
} catch (Exception var5) {
log.error("init rocket produce error", var5);
}
}
private void preCondition(Message msg) {
if (StringUtils.isNotEmpty(this.env)) {
msg.setTopic(this.env + msg.getTopic());
}
}
public SendResult send(Message msg) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
this.preCondition(msg);
return this.producer.send(msg);
}
public SendResult send(Message msg, long timeout) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
this.preCondition(msg);
return this.producer.send(msg, timeout);
}
public void send(Message msg, SendCallback sendCallback) throws MQClientException, RemotingException, InterruptedException {
this.preCondition(msg);
this.producer.send(msg, sendCallback);
}
public void send(Message msg, SendCallback sendCallback, long timeout) throws MQClientException, RemotingException, InterruptedException {
this.preCondition(msg);
this.producer.send(msg, sendCallback, timeout);
}
public SendResult send(Message msg, MessageQueue mq) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
this.preCondition(msg);
return this.producer.send(msg, mq);
}
public SendResult send(Message msg, MessageQueue mq, long timeout) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
this.preCondition(msg);
return this.producer.send(msg, mq, timeout);
}
public void send(Message msg, MessageQueue mq, SendCallback sendCallback) throws MQClientException, RemotingException, InterruptedException {
this.preCondition(msg);
this.producer.send(msg, mq, sendCallback);
}
public void send(Message msg, MessageQueue mq, SendCallback sendCallback, long timeout) throws MQClientException, RemotingException, InterruptedException {
this.preCondition(msg);
this.producer.send(msg, mq, sendCallback, timeout);
}
public SendResult send(Message msg, MessageQueueSelector selector, Object arg) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
this.preCondition(msg);
return this.producer.send(msg, selector, arg);
}
public SendResult send(Message msg, MessageQueueSelector selector, Object arg, long timeout) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
this.preCondition(msg);
return this.producer.send(msg, selector, arg, timeout);
}
public void send(Message msg, MessageQueueSelector selector, Object arg, SendCallback sendCallback) throws MQClientException, RemotingException, InterruptedException {
this.preCondition(msg);
this.producer.send(msg, selector, arg, sendCallback);
}
public void send(Message msg, MessageQueueSelector selector, Object arg, SendCallback sendCallback, long timeout) throws MQClientException, RemotingException, InterruptedException {
this.preCondition(msg);
this.producer.send(msg, selector, arg, sendCallback, timeout);
}
public TransactionSendResult sendMessageInTransaction(Message msg, LocalTransactionExecuter tranExecuter, Object arg) throws MQClientException {
this.preCondition(msg);
return this.producer.sendMessageInTransaction(msg, tranExecuter, arg);
}
public SendResult send(Collection<Message> msgs) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
Iterator var2 = msgs.iterator();
while (var2.hasNext()) {
Message msg = (Message) var2.next();
this.preCondition(msg);
}
return this.producer.send(msgs);
}
public SendResult send(Collection<Message> msgs, long timeout) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
Iterator var4 = msgs.iterator();
while (var4.hasNext()) {
Message msg = (Message) var4.next();
this.preCondition(msg);
}
return this.producer.send(msgs, timeout);
}
public SendResult send(Collection<Message> msgs, MessageQueue mq) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
Iterator var3 = msgs.iterator();
while (var3.hasNext()) {
Message msg = (Message) var3.next();
this.preCondition(msg);
}
return this.producer.send(msgs, mq);
}
public SendResult send(Collection<Message> msgs, MessageQueue mq, long timeout) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
Iterator var5 = msgs.iterator();
while (var5.hasNext()) {
Message msg = (Message) var5.next();
this.preCondition(msg);
}
return this.producer.send(msgs, mq, timeout);
}
public List<MessageQueue> fetchPublishMessageQueues(String topic) throws MQClientException {
topic = this.env + topic;
return this.producer.fetchPublishMessageQueues(topic);
}
public DefaultMQProducer getProducer() {
return this.producer;
}
}
|
package transgenic.lauterbrunnen.lateral.example.simplehazelcast.libdomain;
/**
* Created by stumeikle on 28/05/16.
*/
public class SubjectDayTime {
private String subject;
private Day day;
private int startHour;
private int endHour;
}
|
package org.cthul.strings.plural;
import java.util.*;
import java.util.ResourceBundle.Control;
import org.cthul.strings.Pluralizer;
/**
* Manages {@linkplain Pluralizer pluralizers} for different
* {@linkplain Locale locales}.
*
* @author Arian Treffer
*/
public class PluralizerRegistry {
public static PluralizerRegistry INSTANCE;
public static PluralizerRegistry instance() {
return INSTANCE;
}
private static final ResourceBundle.Control defControl = new ResourceBundle.Control() { };
private static final String PLURALIZER_CLASS = Pluralizer.class.getName();
static {
PluralizerRegistry reg = new PluralizerRegistry();
Pluralizer pEn = new DefaultEnglishPluralizer();
reg.register(Locale.ROOT, pEn);
reg.register(Locale.ENGLISH, pEn);
INSTANCE = reg;
}
private final Map<Locale, Pluralizer> pluralizers = new HashMap<>();
private final ResourceBundle.Control control;
public PluralizerRegistry() {
this(defControl);
}
public PluralizerRegistry(Control c) {
this.control = c;
}
/**
* Registers a pluralizer
* @param l
* @param p
* @return the pluralizer previously registered for that locale,
* or {@code null}
*/
public synchronized Pluralizer register(Locale l, Pluralizer p) {
return pluralizers.put(l, p);
}
/**
* Finds the best pluralizer for a locale.
* @param l
* @return a pluralizer
* @see #getRegistered(java.util.Locale)
*/
public synchronized Pluralizer find(Locale l) {
while (l != null) {
List<Locale> candidates = control
.getCandidateLocales(PLURALIZER_CLASS, l);
for (Locale c: candidates) {
Pluralizer p = pluralizers.get(c);
if (p != null) return p;
}
l = control.getFallbackLocale(PLURALIZER_CLASS, l);
}
return null;
}
/**
* Returns the pluralizer that is registered for a locale, or {@code null}.
* @param l
* @return a pluralizer
* @see #find(java.util.Locale)
*/
public synchronized Pluralizer getRegistered(Locale l) {
return pluralizers.get(l);
}
/**
* Creates a shallow copy of this registry.
* @return a new registry
*/
public synchronized PluralizerRegistry copy() {
PluralizerRegistry r = new PluralizerRegistry(control);
r.pluralizers.putAll(pluralizers);
return r;
}
}
|
package org.qiunet.utils.scanner;
import com.google.common.collect.Lists;
import org.qiunet.utils.args.ArgsContainer;
import org.qiunet.utils.async.LazyLoader;
import org.qiunet.utils.exceptions.CustomException;
import org.qiunet.utils.reflect.ReflectUtil;
import org.qiunet.utils.scanner.anno.AutoWired;
import org.qiunet.utils.scanner.anno.IgnoreEmptyWired;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.util.Set;
/***
* 实例自动注入
*
* @author qiunet
* 2020-12-28 12:45
*/
enum AutoWiredHandler implements IApplicationContextAware {
instance;
private IApplicationContext context;
@Override
public void setApplicationContext(IApplicationContext context, ArgsContainer argsContainer) throws Exception {
this.context = context;
Set<Field> fields = context.getFieldsAnnotatedWith(AutoWired.class);
for (Field field : fields) {
if (this.handlerCfgAutoWire(field)) {
continue;
}
Class<?> fieldType = field.getType();
Class<?> type = fieldType;
if (Modifier.isAbstract(fieldType.getModifiers())
|| Modifier.isInterface(fieldType.getModifiers())) {
Set<Class<?>> classes = context.getSubTypesOf((Class<Object>) fieldType);
if (classes.isEmpty()) {
if (! fieldType.isAnnotationPresent(IgnoreEmptyWired.class)) {
throw new CustomException("Field type {} have none subType class, Do not know how to wired", fieldType.getName());
}
continue;
}
if (classes.size() > 1) {
throw new CustomException("Field type {} have much subType class, Do not know how to wired", fieldType.getName());
}
type = Lists.newArrayList(classes).get(0);
}
// 被注入的对象
Object autoWiredObj = context.getInstanceOfClass(type);
Object declaringObj = null;
if (! Modifier.isStatic(field.getModifiers())) {
declaringObj = context.getInstanceOfClass(field.getDeclaringClass());
}
ReflectUtil.setField(declaringObj, field, autoWiredObj);
}
}
private static final LazyLoader<Class<?>> cfgScannerManagerClass = new LazyLoader<>(() -> {
String cfgAutoWireClass = "org.qiunet.cfg.annotation.support.CfgScannerManager";
try {
return Class.forName(cfgAutoWireClass);
}catch (Exception e) {
return null;
}
});
private boolean handlerCfgAutoWire(Field field) throws Exception {
if (cfgScannerManagerClass.get() == null) {
return false;
}
Object cfgScannerManager = this.context.getInstanceOfClass(cfgScannerManagerClass.get());
Method method = cfgScannerManagerClass.get().getDeclaredMethod("cfgAutoWired", Field.class);
method.setAccessible(true);
Boolean result = (Boolean) method.invoke(cfgScannerManager, field);
return result;
}
}
|
package com.threathunter.mock.simulator;
import com.threathunter.model.Event;
/**
*
*/
public class CompositeEvent {
private Event parent;
private Event child;
public CompositeEvent() {
}
public Event getParent() {
return parent;
}
public void setParent(Event parent) {
this.parent = parent;
}
public Event getChild() {
return child;
}
public void setChild(Event child) {
this.child = child;
}
}
|
/**Copyright (c) 2017, AT&T Intellectual Property. All other rights reserved.**/
package com.att.jsoneditor;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.Map.Entry;
import javax.swing.tree.DefaultMutableTreeNode;
import org.apache.commons.lang3.StringUtils;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import com.google.gson.JsonSyntaxException;
public class JSONJTreeNode extends DefaultMutableTreeNode {
private static final long serialVersionUID = 1L;
public enum DataType {ARRAY, OBJECT, VALUE};
final DataType dataType;
final int index;
String fieldName;
final String value;
public JSONJTreeNode(String fieldName, int index, JsonElement jsonElement) {
this.index = index;
this.fieldName = fieldName;
if(jsonElement.isJsonArray()) {
this.dataType = DataType.ARRAY;
this.value = jsonElement.toString();
populateChildren(jsonElement);
} else if(jsonElement.isJsonObject()) {
this.dataType = DataType.OBJECT;
this.value = jsonElement.toString();
populateChildren(jsonElement);
} else if(jsonElement.isJsonPrimitive()) {
this.dataType = DataType.VALUE;
this.value = jsonElement.toString();
} else if(jsonElement.isJsonNull()) {
this.dataType = DataType.VALUE;
this.value = jsonElement.toString();
} else {
throw new IllegalArgumentException("jsonElement is an unknown element type.");
}
}
private void populateChildren(JsonElement myJsonElement) {
switch(dataType) {
case ARRAY:
int index = 0;
Iterator<JsonElement> it = myJsonElement.getAsJsonArray().iterator();
while(it.hasNext()) {
JsonElement element = it.next();
JSONJTreeNode childNode = new JSONJTreeNode(null, index, element);
this.add(childNode);
index++;
}
break;
case OBJECT:
for(Entry<String,JsonElement> entry : myJsonElement.getAsJsonObject().entrySet()) {
JSONJTreeNode childNode = new JSONJTreeNode(entry.getKey(), -1, entry.getValue());
this.add(childNode);
}
break;
default:
throw new IllegalStateException("Internal coding error this should never happen.");
}
}
public JsonElement asJsonElement() {
StringBuilder sb = new StringBuilder();
buildJsonString(sb);
String json = sb.toString().trim();
if(json.startsWith("{") || json.startsWith("["))
return new JsonParser().parse(sb.toString());
else {
String testValue = "{" + json + "}";
try {
JsonElement wrapperElt = new JsonParser().parse(testValue);
JsonObject obj = (JsonObject) wrapperElt;
Iterator<Entry<String,JsonElement>> it = obj.entrySet().iterator();
Entry<String,JsonElement> entry = it.next();
return entry.getValue();
} catch(JsonSyntaxException jse) {
JsonElement rawElement = new JsonParser().parse(json);
return rawElement;
}
}
}
@SuppressWarnings("unchecked")
private void buildJsonString(StringBuilder sb) {
if(!StringUtils.isEmpty(this.fieldName)) {
sb.append("\"" + this.fieldName + "\":");
}
Enumeration children;
switch(dataType) {
case ARRAY:
sb.append("[");
children = this.children();
while(children.hasMoreElements()) {
JSONJTreeNode child = (JSONJTreeNode) children.nextElement();
child.buildJsonString(sb);
if(children.hasMoreElements())
sb.append(",");
}
sb.append("]");
break;
case OBJECT:
sb.append("{");
children = this.children();
while(children.hasMoreElements()) {
JSONJTreeNode child = (JSONJTreeNode) children.nextElement();
child.buildJsonString(sb);
if(children.hasMoreElements())
sb.append(",");
}
sb.append("}");
break;
default: {
JsonElement elt = new JsonParser().parse(this.value);
sb.append(elt.toString());
}
}
}
@Override
public String toString() {
switch(dataType) {
case ARRAY:
case OBJECT:
if(index >= 0) {
return String.format("[%d] (%s)", index, dataType.name());
} else if(fieldName != null) {
return String.format("%s (%s)", fieldName, dataType.name());
} else {
return String.format("(%s)", dataType.name());
}
default:
if(index >= 0) {
return String.format("[%d] %s", index, value);
} else if(fieldName != null) {
return String.format("%s: %s", fieldName, value);
} else {
return String.format("%s", value);
}
}
}
}
|
package com.jacckx.gulimall.coupon.entity;
import com.baomidou.mybatisplus.annotation.TableId;
import com.baomidou.mybatisplus.annotation.TableName;
import java.math.BigDecimal;
import java.io.Serializable;
import java.util.Date;
import lombok.Data;
/**
* 商品阶梯价格
*
* @author Jack
* @email jk.chen1104@gmail.com
* @date 2022-02-05 22:52:22
*/
@Data
@TableName("sms_sku_ladder")
public class SkuLadderEntity implements Serializable {
private static final long serialVersionUID = 1L;
/**
* id
*/
@TableId
private Long id;
/**
* spu_id
*/
private Long skuId;
/**
* 满几件
*/
private Integer fullCount;
/**
* 打几折
*/
private BigDecimal discount;
/**
* 折后价
*/
private BigDecimal price;
/**
* 是否叠加其他优惠[0-不可叠加,1-可叠加]
*/
private Integer addOther;
}
|
package net.starlark.java.eval;
import java.util.Iterator;
/**
* A StarlarkIterator value is StarlarkIterable and may be iterated by Starlark language constructs
* such as {@code for} loops, list and dict comprehensions, and {@code f(*args)}.
*
* <p>Functionally this interface is equivalent to {@code java.lang.Iterable}, but it additionally
* affirms that the iterability of a Java class should be exposed to Starlark programs.
*/
public interface StarlarkIterator<T> extends StarlarkIterable<T>, Iterator<T> {
@Override
default Iterator<T> iterator() {
return this;
}
@Override
boolean hasNext();
@Override
T next();
}
|
package com.mysiteforme.admin.service.impl;
import com.baomidou.mybatisplus.mapper.Condition;
import com.baomidou.mybatisplus.mapper.EntityWrapper;
import com.mysiteforme.admin.entity.Dict;
import com.mysiteforme.admin.dao.DictDao;
import com.mysiteforme.admin.service.DictService;
import com.baomidou.mybatisplus.service.impl.ServiceImpl;
import org.apache.commons.lang3.StringUtils;
import org.springframework.cache.annotation.CacheEvict;
import org.springframework.cache.annotation.CachePut;
import org.springframework.cache.annotation.Cacheable;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import java.util.List;
/**
* <p>
* 字典表 服务实现类
* </p>
*
* @author wangl
* @since 2017-11-26
*/
@Service
@Transactional(readOnly = true, rollbackFor = Exception.class)
public class DictServiceImpl extends ServiceImpl<DictDao, Dict> implements DictService {
@Cacheable(value = "dictCache",key = "#type",unless = "#result == null or #result.size() == 0")
@Override
public List<Dict> getDictByType(String type) {
EntityWrapper<Dict> wrapper = new EntityWrapper<>();
wrapper.eq("type",type);
wrapper.eq("del_flag",false);
wrapper.orderBy("sort");
return selectList(wrapper);
}
@Override
public Integer getCountByType(String type) {
EntityWrapper<Dict> wrapper = new EntityWrapper<>();
wrapper.eq("type",type);
wrapper.eq("del_flag",false);
return selectCount(wrapper);
}
@Override
public Integer getMaxSortByType(String type) {
Object o = selectObj(Condition.create().setSqlSelect("max(sort)").eq("type",type));
int sort = 0;
if(o != null){
sort = (Integer)o + 1;
}
return sort;
}
@Override
public Integer getCountByAll(String type, String label, String value) {
EntityWrapper<Dict> wrapper = new EntityWrapper<>();
wrapper.eq("type",type);
if(StringUtils.isNotBlank(label)){
wrapper.eq("label",label);
}
if(StringUtils.isNotBlank(value)){
wrapper.eq("value",value);
}
wrapper.eq("del_flag",false);
return selectCount(wrapper);
}
@CacheEvict(value = "dictCache",key = "#dict.type",condition = "#dict.type ne null ")
@Transactional(readOnly = false, rollbackFor = Exception.class)
@Override
public void saveOrUpdateDict(Dict dict) {
insertOrUpdate(dict);
}
@CacheEvict(value = "dictCache",key = "#result",beforeInvocation = false,condition = "#result ne null ")
@Transactional(readOnly = false, rollbackFor = Exception.class)
@Override
public String deleteDict(Long id) {
Dict dict = baseMapper.selectById(id);
baseMapper.deleteById(id);
return dict.getType();
}
@CacheEvict(value = "dictCache",key = "#type",beforeInvocation = false)
@Override
public List<Dict> saveDictList(String type, List<Dict> list) {
insertBatch(list);
return list;
}
@CacheEvict(value = "dictCache",key = "#type",beforeInvocation = false)
@Transactional(readOnly = false, rollbackFor = Exception.class)
@Override
public void deleteByType(String type) {
EntityWrapper<Dict> wrapper = new EntityWrapper<>();
wrapper.eq("type",type);
delete(wrapper);
}
@CacheEvict(value = "dictCache",allEntries=true)
@Transactional(readOnly = false, rollbackFor = Exception.class)
@Override
public void deleteByTableName(String tableName) {
EntityWrapper<Dict> wrapper = new EntityWrapper<>();
wrapper.like("description","数据表【"+tableName+"】");
delete(wrapper);
}
@CacheEvict(value = "dictCache",allEntries=true)
@Transactional(readOnly = false, rollbackFor = Exception.class)
@Override
public void updateByType(String oldType,String newType) {
EntityWrapper<Dict> wrapper = new EntityWrapper<>();
wrapper.eq("type",oldType);
List<Dict> dicts = baseMapper.selectList(wrapper);
for (Dict dict : dicts){
dict.setType(newType);
}
updateBatchById(dicts);
}
}
|
package grammar;
import java.util.Queue;
import Tokenizing.Lexeme;
public class New_dash1 implements New_dash {
Expression expr;
public New_dash1(Expression expr) {
this.expr = expr;
}
@Override
public String getValue() {
String res = "";
res += "int";
res += " [";
res += expr.getValue();
res += "] ";
return res;
}
}
|
/*
* Copyright 2014 JBoss Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.apiman.gateway.test;
import io.apiman.gateway.test.junit.GatewayRestTestPlan;
import io.apiman.gateway.test.junit.GatewayRestTester;
import org.junit.runner.RunWith;
/**
* Make sure the blacklist policy works.
*
* @author eric.wittmann@redhat.com
*/
@RunWith(GatewayRestTester.class)
@GatewayRestTestPlan("test-plans/policies/ip-blacklist-testPlan.xml")
public class Policy_IPBlacklistTest {
}
|
/**
* Copyright (C) 2019-2021 Philip Helger
* http://www.helger.com
* philip[at]helger[dot]com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.helger.registry434.app;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import com.helger.commons.annotation.UsedViaReflection;
import com.helger.commons.debug.GlobalDebug;
import com.helger.commons.exception.InitializationException;
import com.helger.scope.singleton.AbstractGlobalSingleton;
import com.helger.settings.ISettings;
import com.helger.settings.exchange.configfile.ConfigFile;
import com.helger.settings.exchange.configfile.ConfigFileBuilder;
/**
* This class provides access to the settings as contained in the
* <code>webapp.properties</code> file.
*
* @author Philip Helger
*/
public class AppSettings extends AbstractGlobalSingleton
{
/** The name of the file containing the settings */
public static final String FILENAME = "webapp.properties";
private static final ConfigFile s_aCF;
static
{
s_aCF = new ConfigFileBuilder ().addPath ("private-webapp.properties").addPath (FILENAME).build ();
if (!s_aCF.isRead ())
throw new InitializationException ("Failed to read config file");
}
@Deprecated
@UsedViaReflection
private AppSettings ()
{}
@Nonnull
public static ISettings getSettingsObject ()
{
return s_aCF.getSettings ();
}
@Nullable
public static String getGlobalDebug ()
{
return getSettingsObject ().getAsString ("global.debug");
}
@Nullable
public static String getGlobalProduction ()
{
return getSettingsObject ().getAsString ("global.production");
}
@Nullable
public static String getDataPath ()
{
return getSettingsObject ().getAsString ("webapp.datapath");
}
public static boolean isCheckFileAccess ()
{
return getSettingsObject ().getAsBoolean ("webapp.checkfileaccess", true);
}
public static boolean isTestVersion ()
{
return getSettingsObject ().getAsBoolean ("webapp.testversion", GlobalDebug.isDebugMode ());
}
}
|
package com.gj.app.entity;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
public class SysLoginLogExample {
protected String orderByClause;
protected boolean distinct;
protected List<Criteria> oredCriteria;
public SysLoginLogExample() {
oredCriteria = new ArrayList<Criteria>();
}
public void setOrderByClause(String orderByClause) {
this.orderByClause = orderByClause;
}
public String getOrderByClause() {
return orderByClause;
}
public void setDistinct(boolean distinct) {
this.distinct = distinct;
}
public boolean isDistinct() {
return distinct;
}
public List<Criteria> getOredCriteria() {
return oredCriteria;
}
public void or(Criteria criteria) {
oredCriteria.add(criteria);
}
public Criteria or() {
Criteria criteria = createCriteriaInternal();
oredCriteria.add(criteria);
return criteria;
}
public Criteria createCriteria() {
Criteria criteria = createCriteriaInternal();
if (oredCriteria.size() == 0) {
oredCriteria.add(criteria);
}
return criteria;
}
protected Criteria createCriteriaInternal() {
Criteria criteria = new Criteria();
return criteria;
}
public void clear() {
oredCriteria.clear();
orderByClause = null;
distinct = false;
}
protected abstract static class GeneratedCriteria {
protected List<Criterion> criteria;
protected GeneratedCriteria() {
super();
criteria = new ArrayList<Criterion>();
}
public boolean isValid() {
return criteria.size() > 0;
}
public List<Criterion> getAllCriteria() {
return criteria;
}
public List<Criterion> getCriteria() {
return criteria;
}
protected void addCriterion(String condition) {
if (condition == null) {
throw new RuntimeException("Value for condition cannot be null");
}
criteria.add(new Criterion(condition));
}
protected void addCriterion(String condition, Object value, String property) {
if (value == null) {
throw new RuntimeException("Value for " + property + " cannot be null");
}
criteria.add(new Criterion(condition, value));
}
protected void addCriterion(String condition, Object value1, Object value2, String property) {
if (value1 == null || value2 == null) {
throw new RuntimeException("Between values for " + property + " cannot be null");
}
criteria.add(new Criterion(condition, value1, value2));
}
public Criteria andIdIsNull() {
addCriterion("id is null");
return (Criteria) this;
}
public Criteria andIdIsNotNull() {
addCriterion("id is not null");
return (Criteria) this;
}
public Criteria andIdEqualTo(Integer value) {
addCriterion("id =", value, "id");
return (Criteria) this;
}
public Criteria andIdNotEqualTo(Integer value) {
addCriterion("id <>", value, "id");
return (Criteria) this;
}
public Criteria andIdGreaterThan(Integer value) {
addCriterion("id >", value, "id");
return (Criteria) this;
}
public Criteria andIdGreaterThanOrEqualTo(Integer value) {
addCriterion("id >=", value, "id");
return (Criteria) this;
}
public Criteria andIdLessThan(Integer value) {
addCriterion("id <", value, "id");
return (Criteria) this;
}
public Criteria andIdLessThanOrEqualTo(Integer value) {
addCriterion("id <=", value, "id");
return (Criteria) this;
}
public Criteria andIdIn(List<Integer> values) {
addCriterion("id in", values, "id");
return (Criteria) this;
}
public Criteria andIdNotIn(List<Integer> values) {
addCriterion("id not in", values, "id");
return (Criteria) this;
}
public Criteria andIdBetween(Integer value1, Integer value2) {
addCriterion("id between", value1, value2, "id");
return (Criteria) this;
}
public Criteria andIdNotBetween(Integer value1, Integer value2) {
addCriterion("id not between", value1, value2, "id");
return (Criteria) this;
}
public Criteria andLognameIsNull() {
addCriterion("logname is null");
return (Criteria) this;
}
public Criteria andLognameIsNotNull() {
addCriterion("logname is not null");
return (Criteria) this;
}
public Criteria andLognameEqualTo(String value) {
addCriterion("logname =", value, "logname");
return (Criteria) this;
}
public Criteria andLognameNotEqualTo(String value) {
addCriterion("logname <>", value, "logname");
return (Criteria) this;
}
public Criteria andLognameGreaterThan(String value) {
addCriterion("logname >", value, "logname");
return (Criteria) this;
}
public Criteria andLognameGreaterThanOrEqualTo(String value) {
addCriterion("logname >=", value, "logname");
return (Criteria) this;
}
public Criteria andLognameLessThan(String value) {
addCriterion("logname <", value, "logname");
return (Criteria) this;
}
public Criteria andLognameLessThanOrEqualTo(String value) {
addCriterion("logname <=", value, "logname");
return (Criteria) this;
}
public Criteria andLognameLike(String value) {
addCriterion("logname like", value, "logname");
return (Criteria) this;
}
public Criteria andLognameNotLike(String value) {
addCriterion("logname not like", value, "logname");
return (Criteria) this;
}
public Criteria andLognameIn(List<String> values) {
addCriterion("logname in", values, "logname");
return (Criteria) this;
}
public Criteria andLognameNotIn(List<String> values) {
addCriterion("logname not in", values, "logname");
return (Criteria) this;
}
public Criteria andLognameBetween(String value1, String value2) {
addCriterion("logname between", value1, value2, "logname");
return (Criteria) this;
}
public Criteria andLognameNotBetween(String value1, String value2) {
addCriterion("logname not between", value1, value2, "logname");
return (Criteria) this;
}
public Criteria andUseridIsNull() {
addCriterion("userid is null");
return (Criteria) this;
}
public Criteria andUseridIsNotNull() {
addCriterion("userid is not null");
return (Criteria) this;
}
public Criteria andUseridEqualTo(Integer value) {
addCriterion("userid =", value, "userid");
return (Criteria) this;
}
public Criteria andUseridNotEqualTo(Integer value) {
addCriterion("userid <>", value, "userid");
return (Criteria) this;
}
public Criteria andUseridGreaterThan(Integer value) {
addCriterion("userid >", value, "userid");
return (Criteria) this;
}
public Criteria andUseridGreaterThanOrEqualTo(Integer value) {
addCriterion("userid >=", value, "userid");
return (Criteria) this;
}
public Criteria andUseridLessThan(Integer value) {
addCriterion("userid <", value, "userid");
return (Criteria) this;
}
public Criteria andUseridLessThanOrEqualTo(Integer value) {
addCriterion("userid <=", value, "userid");
return (Criteria) this;
}
public Criteria andUseridIn(List<Integer> values) {
addCriterion("userid in", values, "userid");
return (Criteria) this;
}
public Criteria andUseridNotIn(List<Integer> values) {
addCriterion("userid not in", values, "userid");
return (Criteria) this;
}
public Criteria andUseridBetween(Integer value1, Integer value2) {
addCriterion("userid between", value1, value2, "userid");
return (Criteria) this;
}
public Criteria andUseridNotBetween(Integer value1, Integer value2) {
addCriterion("userid not between", value1, value2, "userid");
return (Criteria) this;
}
public Criteria andCreatetimeIsNull() {
addCriterion("createtime is null");
return (Criteria) this;
}
public Criteria andCreatetimeIsNotNull() {
addCriterion("createtime is not null");
return (Criteria) this;
}
public Criteria andCreatetimeEqualTo(Date value) {
addCriterion("createtime =", value, "createtime");
return (Criteria) this;
}
public Criteria andCreatetimeNotEqualTo(Date value) {
addCriterion("createtime <>", value, "createtime");
return (Criteria) this;
}
public Criteria andCreatetimeGreaterThan(Date value) {
addCriterion("createtime >", value, "createtime");
return (Criteria) this;
}
public Criteria andCreatetimeGreaterThanOrEqualTo(Date value) {
addCriterion("createtime >=", value, "createtime");
return (Criteria) this;
}
public Criteria andCreatetimeLessThan(Date value) {
addCriterion("createtime <", value, "createtime");
return (Criteria) this;
}
public Criteria andCreatetimeLessThanOrEqualTo(Date value) {
addCriterion("createtime <=", value, "createtime");
return (Criteria) this;
}
public Criteria andCreatetimeIn(List<Date> values) {
addCriterion("createtime in", values, "createtime");
return (Criteria) this;
}
public Criteria andCreatetimeNotIn(List<Date> values) {
addCriterion("createtime not in", values, "createtime");
return (Criteria) this;
}
public Criteria andCreatetimeBetween(Date value1, Date value2) {
addCriterion("createtime between", value1, value2, "createtime");
return (Criteria) this;
}
public Criteria andCreatetimeNotBetween(Date value1, Date value2) {
addCriterion("createtime not between", value1, value2, "createtime");
return (Criteria) this;
}
public Criteria andSucceedIsNull() {
addCriterion("succeed is null");
return (Criteria) this;
}
public Criteria andSucceedIsNotNull() {
addCriterion("succeed is not null");
return (Criteria) this;
}
public Criteria andSucceedEqualTo(String value) {
addCriterion("succeed =", value, "succeed");
return (Criteria) this;
}
public Criteria andSucceedNotEqualTo(String value) {
addCriterion("succeed <>", value, "succeed");
return (Criteria) this;
}
public Criteria andSucceedGreaterThan(String value) {
addCriterion("succeed >", value, "succeed");
return (Criteria) this;
}
public Criteria andSucceedGreaterThanOrEqualTo(String value) {
addCriterion("succeed >=", value, "succeed");
return (Criteria) this;
}
public Criteria andSucceedLessThan(String value) {
addCriterion("succeed <", value, "succeed");
return (Criteria) this;
}
public Criteria andSucceedLessThanOrEqualTo(String value) {
addCriterion("succeed <=", value, "succeed");
return (Criteria) this;
}
public Criteria andSucceedLike(String value) {
addCriterion("succeed like", value, "succeed");
return (Criteria) this;
}
public Criteria andSucceedNotLike(String value) {
addCriterion("succeed not like", value, "succeed");
return (Criteria) this;
}
public Criteria andSucceedIn(List<String> values) {
addCriterion("succeed in", values, "succeed");
return (Criteria) this;
}
public Criteria andSucceedNotIn(List<String> values) {
addCriterion("succeed not in", values, "succeed");
return (Criteria) this;
}
public Criteria andSucceedBetween(String value1, String value2) {
addCriterion("succeed between", value1, value2, "succeed");
return (Criteria) this;
}
public Criteria andSucceedNotBetween(String value1, String value2) {
addCriterion("succeed not between", value1, value2, "succeed");
return (Criteria) this;
}
public Criteria andIpIsNull() {
addCriterion("ip is null");
return (Criteria) this;
}
public Criteria andIpIsNotNull() {
addCriterion("ip is not null");
return (Criteria) this;
}
public Criteria andIpEqualTo(String value) {
addCriterion("ip =", value, "ip");
return (Criteria) this;
}
public Criteria andIpNotEqualTo(String value) {
addCriterion("ip <>", value, "ip");
return (Criteria) this;
}
public Criteria andIpGreaterThan(String value) {
addCriterion("ip >", value, "ip");
return (Criteria) this;
}
public Criteria andIpGreaterThanOrEqualTo(String value) {
addCriterion("ip >=", value, "ip");
return (Criteria) this;
}
public Criteria andIpLessThan(String value) {
addCriterion("ip <", value, "ip");
return (Criteria) this;
}
public Criteria andIpLessThanOrEqualTo(String value) {
addCriterion("ip <=", value, "ip");
return (Criteria) this;
}
public Criteria andIpLike(String value) {
addCriterion("ip like", value, "ip");
return (Criteria) this;
}
public Criteria andIpNotLike(String value) {
addCriterion("ip not like", value, "ip");
return (Criteria) this;
}
public Criteria andIpIn(List<String> values) {
addCriterion("ip in", values, "ip");
return (Criteria) this;
}
public Criteria andIpNotIn(List<String> values) {
addCriterion("ip not in", values, "ip");
return (Criteria) this;
}
public Criteria andIpBetween(String value1, String value2) {
addCriterion("ip between", value1, value2, "ip");
return (Criteria) this;
}
public Criteria andIpNotBetween(String value1, String value2) {
addCriterion("ip not between", value1, value2, "ip");
return (Criteria) this;
}
}
public static class Criteria extends GeneratedCriteria {
protected Criteria() {
super();
}
}
public static class Criterion {
private String condition;
private Object value;
private Object secondValue;
private boolean noValue;
private boolean singleValue;
private boolean betweenValue;
private boolean listValue;
private String typeHandler;
public String getCondition() {
return condition;
}
public Object getValue() {
return value;
}
public Object getSecondValue() {
return secondValue;
}
public boolean isNoValue() {
return noValue;
}
public boolean isSingleValue() {
return singleValue;
}
public boolean isBetweenValue() {
return betweenValue;
}
public boolean isListValue() {
return listValue;
}
public String getTypeHandler() {
return typeHandler;
}
protected Criterion(String condition) {
super();
this.condition = condition;
this.typeHandler = null;
this.noValue = true;
}
protected Criterion(String condition, Object value, String typeHandler) {
super();
this.condition = condition;
this.value = value;
this.typeHandler = typeHandler;
if (value instanceof List<?>) {
this.listValue = true;
} else {
this.singleValue = true;
}
}
protected Criterion(String condition, Object value) {
this(condition, value, null);
}
protected Criterion(String condition, Object value, Object secondValue, String typeHandler) {
super();
this.condition = condition;
this.value = value;
this.secondValue = secondValue;
this.typeHandler = typeHandler;
this.betweenValue = true;
}
protected Criterion(String condition, Object value, Object secondValue) {
this(condition, value, secondValue, null);
}
}
}
|
package br.com.zupacademy.transacoes.kafka;
import br.com.zupacademy.transacoes.transacoes.DTO.TransacaoListened;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.support.serializer.JsonDeserializer;
import java.util.HashMap;
import java.util.Map;
@Configuration
public class KafkaConfigurations {
private KafkaProperties kafkaProperties;
public KafkaConfigurations(KafkaProperties kafkaProperties) {
this.kafkaProperties = kafkaProperties;
}
public Map<String, Object> consumerConfigurations() {
Map<String, Object> properties = new HashMap<>();
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaProperties.getBootstrapServers());
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, kafkaProperties.getConsumer().getKeyDeserializer());
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, kafkaProperties.getConsumer().getValueDeserializer());
properties.put(ConsumerConfig.GROUP_ID_CONFIG, kafkaProperties.getConsumer().getGroupId());
properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, kafkaProperties.getConsumer().getAutoOffsetReset());
return properties;
}
@Bean
public ConsumerFactory<String, TransacaoListened> transactionConsumerFactory() {
StringDeserializer stringDeserializer = new StringDeserializer();
JsonDeserializer<TransacaoListened> jsonDeserializer = new JsonDeserializer<>(TransacaoListened.class, false);
return new DefaultKafkaConsumerFactory<>(consumerConfigurations(), stringDeserializer, jsonDeserializer);
}
@Bean
public ConcurrentKafkaListenerContainerFactory<String, TransacaoListened> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, TransacaoListened> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(transactionConsumerFactory());
return factory;
}
}
|
package org.drools.testcoverage.kieci.withoutdomain;
import org.drools.testcoverage.kieci.withoutdomain.util.KJarLoadUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.kie.api.KieServices;
import org.kie.api.builder.ReleaseId;
import org.kie.api.runtime.KieContainer;
import org.kie.api.runtime.KieSession;
import static org.assertj.core.api.Assertions.assertThat;
/**
* Tests loading a KJAR with non-trivial pom.xml (dependencies, parent pom, ...).
*
* Tests must NOT have access to domain classes in test-domain module (BZ 1305798).
*/
public class KJarLoadingTest {
private static final KieServices KS = KieServices.Factory.get();
private static final ReleaseId KJAR_RELEASE_ID = KJarLoadUtils.loadKJarGAV("testKJarGAV.properties", KJarLoadingTest.class);
private KieSession kieSession;
@Before
public void init() {
final KieContainer container = KS.newKieContainer(KJAR_RELEASE_ID);
this.kieSession = container.newKieSession();
}
@After
public void dispose() {
if (this.kieSession != null) {
this.kieSession.dispose();
}
}
@Test
public void testLoadingKJarWithDeps() {
// BZ 1305798
assertThat(this.kieSession).as("Failed to create KieSession.").isNotNull();
assertThat(this.kieSession.getKieBase().getKiePackages()).as("No rules compiled.").isNotEmpty();
}
}
|
package com.bumptech.glide.load.engine.b;
import android.content.Context;
// compiled from: InternalCacheDiskCacheFactory.java
public final class f extends d {
public f(Context context) {
this(context, "image_manager_disk_cache");
}
private f(Context context, String str) {
super(new g(context, str));
}
}
|
/**
*
*/
package com.sliit.af.util;
/**
* @author vimukthi_r
*
*/
public class Param {
private Param() {
}
// this secret key is used for json web token signing
public static final String SECRET = "af-final-2019";
public static final String TOKEN_PREFIX = "Bearer ";
public static final String HEADER_STRING = "Authorization";
public static final long EXPIRATION_TIME = 2_400_000; // 40 minutes
public static final String ADMIN = "ADMIN";
public static final String INSTRUCTOR = "INSTRUCTOR";
public static final String STUDENT = "STUDENT";
}
|
package com.example.androidaptdecoder;
import androidx.annotation.NonNull;
import androidx.appcompat.app.AppCompatActivity;
import android.Manifest;
import android.app.Activity;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.net.Uri;
import android.os.AsyncTask;
import android.os.Build;
import android.os.Build.VERSION;
import android.os.Build.VERSION_CODES;
import android.os.Bundle;
import android.os.Environment;
import android.os.FileObserver;
import android.provider.DocumentsContract;
import android.util.Log;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.ProgressBar;
import android.widget.TextView;
import android.widget.Toast;
import com.chaquo.python.Python;
import com.chaquo.python.android.AndroidPlatform;
import com.codekidlabs.storagechooser.StorageChooser;
import com.github.chrisbanes.photoview.PhotoView;
import com.nbsp.materialfilepicker.MaterialFilePicker;
import com.nbsp.materialfilepicker.ui.FilePickerActivity;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.regex.Pattern;
import lib.folderpicker.FolderPicker;
import static java.lang.Thread.sleep;
public class MainActivity extends AppCompatActivity {
Button infileButton;
Button outfileButton;
Button decodeButton;
TextView infileTextView;
TextView outfileTextView;
TextView decodeTextView;
ProgressBar progressBar;
PhotoView photoView;
String infile = "";
String outfile = "";
Float sharpen = 0f;
Boolean combine = false;
Float contrast = 0f;
Boolean filter = false;
Boolean a = false;
Boolean b = false;
String status = "IDLE";
private static final int INFILE_REQUEST_CODE = 1000;
private static final int OUTFILE_REQUEST_CODE = 1001;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
if (VERSION.SDK_INT > VERSION_CODES.M && checkSelfPermission(Manifest.permission.WRITE_EXTERNAL_STORAGE) != PackageManager.PERMISSION_GRANTED) {
requestPermissions(new String[] {Manifest.permission.WRITE_EXTERNAL_STORAGE}, 1001);
}
if (VERSION.SDK_INT > VERSION_CODES.M && checkSelfPermission(Manifest.permission.READ_EXTERNAL_STORAGE) != PackageManager.PERMISSION_GRANTED) {
requestPermissions(new String[] {Manifest.permission.READ_EXTERNAL_STORAGE}, 1002);
}
if (!Python.isStarted()) {
Python.start(new AndroidPlatform(this));
}
infileButton = (Button) findViewById(R.id.infileButton);
outfileButton = (Button) findViewById(R.id.outfileButton);
decodeButton = (Button) findViewById(R.id.decodeButton);
infileTextView = (TextView) findViewById(R.id.infileTextView);
outfileTextView = (TextView) findViewById(R.id.outfileTextView);
decodeTextView = (TextView) findViewById(R.id.decodeTextView);
progressBar = (ProgressBar) findViewById(R.id.progressBar);
photoView = (PhotoView) findViewById(R.id.photoView);
infileButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
try {
new MaterialFilePicker()
// Pass a source of context. Can be:
// .withActivity(Activity activity)
// .withFragment(Fragment fragment)
// .withSupportFragment(androidx.fragment.app.Fragment fragment)
.withActivity(MainActivity.this)
// With cross icon on the right side of toolbar for closing picker straight away
//.withCloseMenu(true)
// Entry point path (user will start from it)
//.withPath(alarmsFolder.absolutePath)
// Root path (user won't be able to come higher than it)
//.withRootPath(externalStorage.absolutePath)
// Showing hidden files
//.withHiddenFiles(true)
// Want to choose only jpg images
.withFilter(Pattern.compile(".*\\.wav$"))
// Don't apply filter to directories names
//.withFilterDirectories(false)
.withTitle("Choose Audio File")
.withRequestCode(INFILE_REQUEST_CODE)
.start();
} catch (Exception e) {
e.printStackTrace();
}
}
});
outfileButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent intent = new Intent(MainActivity.this, FolderPicker.class);
startActivityForResult(intent, OUTFILE_REQUEST_CODE);
}
});
decodeButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
if (infile != "") {
if (outfile != "") {
decodeTextView.setText("Decoding...");
progressBar.setVisibility(View.VISIBLE);
String fileName = new File(infile).getName();
try {
copy(new File(infile), new File(getFilesDir().toString() + "/chaquopy/AssetFinder/app/" + fileName));
} catch (IOException e) {
e.printStackTrace();
}
AsyncTask.execute(new Runnable() {
@Override
public void run() {
Python py = Python.getInstance();
try {
(new Thread(new Runnable()
{
@Override
public void run()
{
while (!Thread.interrupted() && status == "DECODING")
try
{
Thread.sleep(500);
runOnUiThread(new Runnable() // start actions in UI thread
{
@Override
public void run()
{
try {
String fileContent = getFileContent();
if (fileContent.length() > 0) {
decodeTextView.setText(fileContent);
} // this action have to be in UI thread
} catch (Exception e){
e.printStackTrace();
}
}
});
}
catch (InterruptedException e)
{
// ooops
}
}
})).start(); // the while thread will start in BG thread
//outfile = "/storage/emulated/0/" + fileName.substring(0, fileName.length() - 3) + "png";
status = "DECODING";
System.out.println(infile);
py.getModule("app").callAttr("main", fileName, outfile);
PostDecode(outfile);
} catch (Exception e) {
e.printStackTrace();
}
}
});
} else {
decodeTextView.setText("Please select an output location");
}
} else {
decodeTextView.setText("Please select an input file");
}
}
});
}
private String getFileContent() {
File file = new File(getFilesDir().toString() + "/chaquopy/AssetFinder/app/" + "test.txt");
if (!file.exists()){
String line = "Need to add smth";
return line;
}
String line = null;
//Read text from file
//StringBuilder text = new StringBuilder();
try {
BufferedReader br = new BufferedReader(new FileReader(file));
line = br.readLine();
}
catch (IOException e) {
e.printStackTrace();
//You'll need to add proper error handling here
}
return line;
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
if (requestCode == 1000 && resultCode == RESULT_OK) {
String filePath = data.getStringExtra(FilePickerActivity.RESULT_FILE_PATH);
infile = filePath;
infileTextView.setText(infile);
} if (requestCode == OUTFILE_REQUEST_CODE && resultCode == Activity.RESULT_OK) {
outfile = data.getExtras().getString("data");
outfileTextView.setText(outfile);
}
}
public void PostDecode(String src) {
runOnUiThread(new Runnable() {
@Override
public void run() {
status = "IDLE";
decodeTextView.setText("DONE");
progressBar.setVisibility(View.INVISIBLE);
//show origianl
File file = new File(infile);
File imgFile = new File(src+"/"+file.getName().substring(0, file.getName().length()-4)+ "_original.png");
if(imgFile.exists()){
Bitmap myBitmap = BitmapFactory.decodeFile(imgFile.getAbsolutePath());
photoView.setImageBitmap(myBitmap);
};
}
});
}
@Override
public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions, @NonNull int[] grantResults) {
switch (requestCode) {
case 1001: {
if (grantResults[0] == PackageManager.PERMISSION_GRANTED) {
Toast.makeText(this, "Permission Granted!", Toast.LENGTH_SHORT).show();
} else {
Toast.makeText(this, "Permission Denied.", Toast.LENGTH_SHORT).show();
finish();
}
}
case 1002: {
if (grantResults[0] == PackageManager.PERMISSION_GRANTED) {
Toast.makeText(this, "Permission Granted!", Toast.LENGTH_SHORT).show();
} else {
Toast.makeText(this, "Permission Denied.", Toast.LENGTH_SHORT).show();
finish();
}
}
}
}
public static void copy(File src, File dst) throws IOException {
try (InputStream in = new FileInputStream(src)) {
try (OutputStream out = new FileOutputStream(dst)) {
// Transfer bytes from in to out
byte[] buf = new byte[1024];
int len;
while ((len = in.read(buf)) > 0) {
out.write(buf, 0, len);
}
}
}
}
}
|
package cc.mrbird.job.domain;
import java.util.Date;
public class PMobileSuccess {
private Long id;
private String mobile;
private Date createDate;
private Integer status;
private String createBy;
private String extend1;
private String extend2;
private Integer userId;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getMobile() {
return mobile;
}
public void setMobile(String mobile) {
this.mobile = mobile == null ? null : mobile.trim();
}
public Date getCreateDate() {
return createDate;
}
public void setCreateDate(Date createDate) {
this.createDate = createDate;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
public String getCreateBy() {
return createBy;
}
public void setCreateBy(String createBy) {
this.createBy = createBy == null ? null : createBy.trim();
}
public String getExtend1() {
return extend1;
}
public void setExtend1(String extend1) {
this.extend1 = extend1 == null ? null : extend1.trim();
}
public String getExtend2() {
return extend2;
}
public void setExtend2(String extend2) {
this.extend2 = extend2 == null ? null : extend2.trim();
}
public Integer getUserId() {
return userId;
}
public void setUserId(Integer userId) {
this.userId = userId;
}
}
|
//,temp,TestKeyValueTextInputFormat.java,51,131,temp,TestMRKeyValueTextInputFormat.java,70,156
//,3
public class xxx {
@Test
public void testFormat() throws Exception {
Job job = Job.getInstance(new Configuration(defaultConf));
Path file = new Path(workDir, "test.txt");
int seed = new Random().nextInt();
LOG.info("seed = " + seed);
Random random = new Random(seed);
localFs.delete(workDir, true);
FileInputFormat.setInputPaths(job, workDir);
final int MAX_LENGTH = 10000;
// for a variety of lengths
for (int length = 0; length < MAX_LENGTH;
length += random.nextInt(MAX_LENGTH / 10) + 1) {
LOG.debug("creating; entries = " + length);
// create a file with length entries
Writer writer = new OutputStreamWriter(localFs.create(file));
try {
for (int i = 0; i < length; i++) {
writer.write(Integer.toString(i * 2));
writer.write("\t");
writer.write(Integer.toString(i));
writer.write("\n");
}
} finally {
writer.close();
}
// try splitting the file in a variety of sizes
KeyValueTextInputFormat format = new KeyValueTextInputFormat();
for (int i = 0; i < 3; i++) {
int numSplits = random.nextInt(MAX_LENGTH / 20) + 1;
LOG.debug("splitting: requesting = " + numSplits);
List<InputSplit> splits = format.getSplits(job);
LOG.debug("splitting: got = " + splits.size());
// check each split
BitSet bits = new BitSet(length);
for (int j = 0; j < splits.size(); j++) {
LOG.debug("split["+j+"]= " + splits.get(j));
TaskAttemptContext context = MapReduceTestUtil.
createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader<Text, Text> reader = format.createRecordReader(
splits.get(j), context);
Class<?> clazz = reader.getClass();
assertEquals("reader class is KeyValueLineRecordReader.",
KeyValueLineRecordReader.class, clazz);
MapContext<Text, Text, Text, Text> mcontext =
new MapContextImpl<Text, Text, Text, Text>(job.getConfiguration(),
context.getTaskAttemptID(), reader, null, null,
MapReduceTestUtil.createDummyReporter(), splits.get(j));
reader.initialize(splits.get(j), mcontext);
Text key = null;
Text value = null;
try {
int count = 0;
while (reader.nextKeyValue()) {
key = reader.getCurrentKey();
clazz = key.getClass();
assertEquals("Key class is Text.", Text.class, clazz);
value = reader.getCurrentValue();
clazz = value.getClass();
assertEquals("Value class is Text.", Text.class, clazz);
final int k = Integer.parseInt(key.toString());
final int v = Integer.parseInt(value.toString());
assertEquals("Bad key", 0, k % 2);
assertEquals("Mismatched key/value", k / 2, v);
LOG.debug("read " + v);
assertFalse("Key in multiple partitions.", bits.get(v));
bits.set(v);
count++;
}
LOG.debug("splits[" + j + "]=" + splits.get(j) +" count=" + count);
} finally {
reader.close();
}
}
assertEquals("Some keys in no partition.", length, bits.cardinality());
}
}
}
};
|
package com.bt.pi.core.continuation;
public interface UpdateResolver<T> {
/**
* The update method is called as a result of the DHT read to retrieve the existing entity. Because a DHT write may
* fail due to version conflict this update method should be written to deal with multiple calls.
*
* @param existingEntity
* @param requestedEntity
* @return
*/
T update(T existingEntity, T requestedEntity);
}
|
/**
* Copyright (C) 2007-2017 Tatsuo Satoh <multisqllib@gmail.com>
*
* This file is part of sqlapp-core-derby.
*
* sqlapp-core-derby is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* sqlapp-core-derby is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with sqlapp-core-derby. If not, see <http://www.gnu.org/licenses/>.
*/
package com.sqlapp.data.db.dialect.derby.metadata;
import static com.sqlapp.util.CommonUtils.list;
import static com.sqlapp.util.CommonUtils.tripleKeyMap;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.List;
import com.sqlapp.data.db.dialect.Dialect;
import com.sqlapp.data.db.metadata.CheckConstraintReader;
import com.sqlapp.data.parameter.ParametersContext;
import com.sqlapp.data.schemas.CheckConstraint;
import com.sqlapp.data.schemas.Column;
import com.sqlapp.data.schemas.ProductVersionInfo;
import com.sqlapp.jdbc.ExResultSet;
import com.sqlapp.jdbc.sql.ResultSetNextHandler;
import com.sqlapp.jdbc.sql.node.SqlNode;
import com.sqlapp.util.TripleKeyMap;
/**
* Derbyのチェック制約読み込みクラス
*
* @author satoh
*
*/
public class DerbyCheckConstraintReader extends CheckConstraintReader {
public DerbyCheckConstraintReader(Dialect dialect) {
super(dialect);
}
@Override
protected List<CheckConstraint> doGetAll(Connection connection,
ParametersContext context,
final ProductVersionInfo productVersionInfo) {
SqlNode node = getSqlSqlNode(productVersionInfo);
final TripleKeyMap<String, String, String, List<Column>> colMap = tripleKeyMap();
final TripleKeyMap<String, String, String, CheckConstraint> tMap = tripleKeyMap();
execute(connection, node, context, new ResultSetNextHandler() {
@Override
public void handleResultSetNext(ExResultSet rs) throws SQLException {
String catalogName = null;
String schemaName = getString(rs, "SCHEMANAME");
String tableName = getString(rs, "TABLENAME");
String name = getString(rs, "CONSTRAINTNAME");
String source = getString(rs, "CHECKDEFINITION");
String column_name = getString(rs, "COLUMNNAME");
List<Column> cols = colMap.get(catalogName, schemaName, name);
CheckConstraint c = tMap.get(catalogName, schemaName, name);
if (c == null) {
c = new CheckConstraint(name, source);
c.setSchemaName(schemaName);
c.setTableName(tableName);
cols = list();
tMap.put(catalogName, schemaName, name, c);
colMap.put(catalogName, schemaName, name, cols);
}
Column column = new Column(column_name);
column.setSchemaName(schemaName);
column.setTableName(tableName);
cols.add(column);
}
});
for (CheckConstraint c : tMap.toList()) {
List<Column> cols = colMap.get(c.getCatalogName(),
c.getSchemaName(), c.getName());
if (cols.size() == 1) {
c.addColumns(cols);
}
}
return tMap.toList();
}
protected SqlNode getSqlSqlNode(ProductVersionInfo productVersionInfo) {
return getSqlNodeCache().getString("checkConstraints.sql");
}
}
|
package ru.resql.orm.converters.instances;
import ru.resql.SqlException;
import ru.resql.orm.converters.*;
public class IntegralTypeToOrdinalEnumConvertor implements Converter<Enum<?>> {
private final Enum<?>[] enumValues;
public IntegralTypeToOrdinalEnumConvertor(Class<Enum<?>> enumType) {
enumValues = enumType.getEnumConstants();
}
@Override
public Enum<?> convert(Object columnValue) throws ConverterException {
int ordinal = ((Number)columnValue).intValue();
if (ordinal < 0) {
throw new SqlException(
"Value " + ordinal + " cannot be converted to enum constant");
}
if (ordinal >= enumValues.length) {
throw new SqlException(
"Value " + ordinal + " cannot be converted to enum constant. Only "
+ enumValues.length + " constants available"
);
}
return enumValues[ordinal];
}
}
|
package net.iretailer.model;
// Generated 2016-2-1 23:19:37 by Hibernate Tools 4.3.1.Final
/**
* TrStoreBusinessTime generated by hbm2java
*/
public class TrStoreBusinessTime implements java.io.Serializable {
private String id;
private String storeId;
private String timeId;
public TrStoreBusinessTime() {
}
public TrStoreBusinessTime(String id, String storeId, String timeId) {
this.id = id;
this.storeId = storeId;
this.timeId = timeId;
}
public String getId() {
return this.id;
}
public void setId(String id) {
this.id = id;
}
public String getStoreId() {
return this.storeId;
}
public void setStoreId(String storeId) {
this.storeId = storeId;
}
public String getTimeId() {
return this.timeId;
}
public void setTimeId(String timeId) {
this.timeId = timeId;
}
}
|
package ch.so.agi.suchedreinull;
import java.util.List;
import org.springframework.data.solr.repository.SolrCrudRepository;
//public interface ParcelRepository extends SolrCrudRepository<Parcel, String> {
// public Parcel findByEgrid(String egrid);
//
// public List<Parcel> findByEgridContaining(String egrid);
//}
public interface ParcelRepository extends CustomParcelRepository, SolrCrudRepository<Parcel, String> {
}
|
package com.anysoftkeyboard.ui;
import static androidx.test.core.app.ApplicationProvider.getApplicationContext;
import android.view.MotionEvent;
import android.view.View;
import com.anysoftkeyboard.AnySoftKeyboardRobolectricTestRunner;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
@RunWith(AnySoftKeyboardRobolectricTestRunner.class)
public class ViewPagerWithDisableTest {
private ViewPagerWithDisable mUnderTest;
@Before
public void setup() {
mUnderTest = new ViewPagerWithDisable(getApplicationContext());
mUnderTest.addView(new View(getApplicationContext()));
}
@Test
public void testOnTouchEventDisabled() throws Exception {
mUnderTest.setEnabled(false);
Assert.assertFalse(
mUnderTest.onTouchEvent(
MotionEvent.obtain(10, 10, MotionEvent.ACTION_DOWN, 1f, 1f, 0)));
}
@Test
public void onInterceptTouchEventDisabled() throws Exception {
mUnderTest.setEnabled(false);
Assert.assertFalse(
mUnderTest.onInterceptTouchEvent(
MotionEvent.obtain(10, 10, MotionEvent.ACTION_DOWN, 1f, 1f, 0)));
}
}
|
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package modelo;
/**
*
* @author flynn
*/
import javax.swing.JOptionPane;
public class PopUp
{
public static void InfoBox(String infoMessage, String titleBar){
JOptionPane.showMessageDialog(null, infoMessage,titleBar, JOptionPane.INFORMATION_MESSAGE);
}
public static void warningBox(String infoMessage, String titleBar)
{
JOptionPane.showMessageDialog(null, infoMessage,titleBar, JOptionPane.ERROR_MESSAGE);
}
}
|
package grondag.mcmd.node;
public class Heading extends Block {
private int level;
@Override
public void accept(Visitor visitor) {
visitor.visit(this);
}
public int getLevel() {
return level;
}
public void setLevel(int level) {
this.level = level;
}
}
|
/***
Copyright (c) 2008-2012 CommonsWare, LLC
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy
of the License at http://www.apache.org/licenses/LICENSE-2.0. Unless required
by applicable law or agreed to in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
OF ANY KIND, either express or implied. See the License for the specific
language governing permissions and limitations under the License.
Covered in detail in the book _The Busy Coder's Guide to Android Development_
https://commonsware.com/Android
*/
package com.commonsware.android.list;
import android.app.ListActivity;
import android.os.Bundle;
import android.view.View;
import android.widget.ArrayAdapter;
import android.widget.ListView;
import android.widget.TextView;
public class ListViewDemo extends ListActivity {
private TextView selection;
private static final String[] items={"lorem", "ipsum", "dolor",
"sit", "amet",
"consectetuer", "adipiscing", "elit", "morbi", "vel",
"ligula", "vitae", "arcu", "aliquet", "mollis",
"etiam", "vel", "erat", "placerat", "ante",
"porttitor", "sodales", "pellentesque", "augue", "purus"};
@Override
public void onCreate(Bundle icicle) {
super.onCreate(icicle);
setContentView(R.layout.main);
setListAdapter(new ArrayAdapter<String>(this,
android.R.layout.simple_list_item_1,
items));
selection=(TextView)findViewById(R.id.selection);
}
@Override
public void onListItemClick(ListView parent, View v, int position,
long id) {
selection.setText(items[position]);
}
}
|
package com.sx.sxblog.controller;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.sx.sxblog.common.SendMailUtil;
import com.sx.sxblog.common.UUIDUtil;
import com.sx.sxblog.entity.User;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.http.MediaType;
import org.springframework.mock.web.MockHttpSession;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.test.web.servlet.MockMvc;
import org.springframework.test.web.servlet.request.MockMvcRequestBuilders;
import org.springframework.test.web.servlet.result.MockMvcResultHandlers;
import org.springframework.test.web.servlet.result.MockMvcResultMatchers;
import org.springframework.test.web.servlet.setup.MockMvcBuilders;
import org.springframework.web.context.WebApplicationContext;
import java.util.HashMap;
import java.util.Map;
import java.util.SimpleTimeZone;
@RunWith(SpringRunner.class)
@SpringBootTest
public class UserControllerTest {
// @Autowired
// private WebApplicationContext wac;
// private MockMvc mvc;
// private MockHttpSession session;
// @Before
// public void setupMock()
// {
// //mvc = MockMvcBuilders.webAppContextSetup(wac).build();
// mvc = MockMvcBuilders.webAppContextSetup(wac).addFilter(((request, response, chain) -> {
// response.setCharacterEncoding("UTF-8");
// chain.doFilter(request, response);
// })).build();
// session = new MockHttpSession();
// User user=new User();
// user.setUserId(10005);
//// user.setUserName("lwl");
//// user.setPassword("123");
// session.setAttribute("user",user); //拦截器那边会判断用户是否登录,所以这里注入一个用户
// }
// @Test
// public void signIn() throws Exception
// {
// Map<String,String> map=new HashMap<>();
//
// map.put("user_name","lwl");
// map.put("password","123");
//
// ObjectMapper mapper = new ObjectMapper();
// String json = mapper.writeValueAsString(map);
// System.out.println(json);
// mvc.perform(MockMvcRequestBuilders.post("/signIn")
// .contentType(MediaType.APPLICATION_JSON)
// .content(json)
// .session(session))
// .andExpect(MockMvcResultMatchers.status().isOk())
// .andDo(MockMvcResultHandlers.print());
// }
//
//
// @Test
// public void signUp() throws Exception{
// User user = new User();
// user.setUserName("rl");
// user.setPassword("13579");
// user.setEmail("8888@");
// user.setLevel(0);
// user.setSex("男");
// user.setBirthday(null);
// user.setDescription("软件1803");
// user.setCompany("csu");
// ObjectMapper mapper = new ObjectMapper();
// String json = mapper.writeValueAsString(user);
// System.out.println(json);
// mvc.perform(MockMvcRequestBuilders.post("/signUp")
// .contentType(MediaType.APPLICATION_JSON)
// .content(json)
// .session(session))
// .andExpect(MockMvcResultMatchers.status().isOk())
// .andDo(MockMvcResultHandlers.print());
// }
//
// @Test
// public void updateUserInfo() throws Exception{
// User user = new User();
// user.setUserId(10005);
// user.setUserName("rl1");
// user.setPassword("13579");
// user.setEmail("88887777@.com");
// user.setLevel(0);
// user.setSex("男");
// user.setBirthday(null);
// user.setDescription("1803");
// user.setCompany("csu");
//
//
// ObjectMapper mapper = new ObjectMapper();
// String json = mapper.writeValueAsString(user);
// System.out.println(json);
// mvc.perform(MockMvcRequestBuilders.post("/updateUserInfo")
// .contentType(MediaType.APPLICATION_JSON)
//
// .content(json)
// .session(session))
//
//
// .andExpect(MockMvcResultMatchers.status().isOk())
// .andDo(MockMvcResultHandlers.print());
// }
//
// @Test
// public void getUserInfo() throws Exception
// {
// User user = new User();
// user.setUserId(10005);
// ObjectMapper mapper = new ObjectMapper();
// String json = mapper.writeValueAsString(user);
// mvc.perform(MockMvcRequestBuilders.get("/getUserInfo")
// .contentType(MediaType.APPLICATION_JSON)
// .accept(MediaType.APPLICATION_JSON)
// .content(json)
// .session(session))
// .andExpect(MockMvcResultMatchers.status().isOk())
// .andDo(MockMvcResultHandlers.print());
// }
// @Test
// public void test()throws Exception{
// SendMailUtil.sendEmail("1370865669@qq.com", "test", "test");
// }
// @Autowired
// JavaMailSenderImpl mailSender;
// private String emailServiceCode;
//
// @Test
// public void test(){
// emailServiceCode = "1234";
// SimpleMailMessage message = new SimpleMailMessage();
// message.setSubject("注册验证码");
// message.setText("注册验证码是:" + emailServiceCode);
// message.setTo("203839501@qq.com");
// message.setFrom("437205418@qq.com");
// mailSender.send(message);
// }
}
|
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.codepipeline.model.transform;
import java.math.*;
import javax.annotation.Generated;
import com.amazonaws.services.codepipeline.model.*;
import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*;
import com.amazonaws.transform.*;
import com.fasterxml.jackson.core.JsonToken;
import static com.fasterxml.jackson.core.JsonToken.*;
/**
* TooManyTagsException JSON Unmarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class TooManyTagsExceptionUnmarshaller extends EnhancedJsonErrorUnmarshaller {
private TooManyTagsExceptionUnmarshaller() {
super(com.amazonaws.services.codepipeline.model.TooManyTagsException.class, "TooManyTagsException");
}
@Override
public com.amazonaws.services.codepipeline.model.TooManyTagsException unmarshallFromContext(JsonUnmarshallerContext context) throws Exception {
com.amazonaws.services.codepipeline.model.TooManyTagsException tooManyTagsException = new com.amazonaws.services.codepipeline.model.TooManyTagsException(
null);
int originalDepth = context.getCurrentDepth();
String currentParentElement = context.getCurrentParentElement();
int targetDepth = originalDepth + 1;
JsonToken token = context.getCurrentToken();
if (token == null)
token = context.nextToken();
if (token == VALUE_NULL) {
return null;
}
while (true) {
if (token == null)
break;
if (token == FIELD_NAME || token == START_OBJECT) {
} else if (token == END_ARRAY || token == END_OBJECT) {
if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) {
if (context.getCurrentDepth() <= originalDepth)
break;
}
}
token = context.nextToken();
}
return tooManyTagsException;
}
private static TooManyTagsExceptionUnmarshaller instance;
public static TooManyTagsExceptionUnmarshaller getInstance() {
if (instance == null)
instance = new TooManyTagsExceptionUnmarshaller();
return instance;
}
}
|
package com.google.android.exoplayer2.upstream;
public interface Allocator {
Allocation allocate();
int getIndividualAllocationLength();
int getTotalBytesAllocated();
void release(Allocation allocation);
void release(Allocation[] allocationArr);
void trim();
}
|
class Solution {
public int XXX(int[] nums, int val) {
// 快慢指针
int fastIndex = 0;
int slowIndex;
for (slowIndex = 0; fastIndex < nums.length; fastIndex++) {
if (nums[fastIndex] != val) {
nums[slowIndex] = nums[fastIndex];
slowIndex++;
}
}
return slowIndex;
}
}
|
package mage.cards.b;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import mage.MageInt;
import mage.abilities.Ability;
import mage.abilities.common.AttacksOrBlocksTriggeredAbility;
import mage.abilities.effects.OneShotEffect;
import mage.abilities.keyword.FlyingAbility;
import mage.abilities.keyword.VigilanceAbility;
import mage.cards.Card;
import mage.cards.CardImpl;
import mage.cards.CardSetInfo;
import mage.constants.*;
import mage.filter.FilterCard;
import mage.filter.FilterPermanent;
import mage.filter.predicate.Predicates;
import mage.filter.predicate.mageobject.CardIdPredicate;
import mage.filter.predicate.card.AuraCardCanAttachToPermanentId;
import mage.filter.predicate.permanent.AuraPermanentCanAttachToPermanentId;
import mage.game.Game;
import mage.game.permanent.Permanent;
import mage.players.Player;
import mage.target.Target;
import mage.target.TargetCard;
import mage.target.TargetPermanent;
/**
* @author noxx
*/
public final class BrunaLightOfAlabaster extends CardImpl {
public BrunaLightOfAlabaster(UUID ownerId, CardSetInfo setInfo) {
super(ownerId, setInfo, new CardType[]{CardType.CREATURE}, "{3}{W}{W}{U}");
addSuperType(SuperType.LEGENDARY);
this.subtype.add(SubType.ANGEL);
this.power = new MageInt(5);
this.toughness = new MageInt(5);
this.addAbility(FlyingAbility.getInstance());
this.addAbility(VigilanceAbility.getInstance());
// Whenever Bruna, Light of Alabaster attacks or blocks, you may attach to it any number of Auras on the battlefield and you may put onto the battlefield attached to it any number of Aura cards that could enchant it from your graveyard and/or hand.
this.addAbility(new AttacksOrBlocksTriggeredAbility(new BrunaLightOfAlabasterEffect(), true));
}
private BrunaLightOfAlabaster(final BrunaLightOfAlabaster card) {
super(card);
}
@Override
public BrunaLightOfAlabaster copy() {
return new BrunaLightOfAlabaster(this);
}
}
class BrunaLightOfAlabasterEffect extends OneShotEffect {
public BrunaLightOfAlabasterEffect() {
super(Outcome.Benefit);
this.staticText = "attach to it any number of Auras on the battlefield and you may put onto the battlefield attached to it any number of Aura cards that could enchant it from your graveyard and/or hand";
}
public BrunaLightOfAlabasterEffect(final BrunaLightOfAlabasterEffect effect) {
super(effect);
}
@Override
public BrunaLightOfAlabasterEffect copy() {
return new BrunaLightOfAlabasterEffect(this);
}
@Override
public boolean apply(Game game, Ability source) {
Player controller = game.getPlayer(source.getControllerId());
if (controller == null) { return false; }
UUID bruna = source.getSourceId();
FilterPermanent filterAura = new FilterPermanent("Aura");
FilterCard filterAuraCard = new FilterCard("Aura card");
filterAura.add(CardType.ENCHANTMENT.getPredicate());
filterAura.add(SubType.AURA.getPredicate());
filterAura.add(new AuraPermanentCanAttachToPermanentId(bruna));
filterAuraCard.add(CardType.ENCHANTMENT.getPredicate());
filterAuraCard.add(SubType.AURA.getPredicate());
filterAuraCard.add(new AuraCardCanAttachToPermanentId(bruna));
Permanent sourcePermanent = game.getPermanent(source.getSourceId());
if (sourcePermanent == null) { return false; }
List<Permanent> fromBattlefield = new ArrayList<>();
List<Card> fromHandGraveyard = new ArrayList<>();
int countBattlefield = game.getBattlefield().getAllActivePermanents(filterAura, game).size() - sourcePermanent.getAttachments().size();
while (controller.canRespond()
&& countBattlefield > 0
&& controller.chooseUse(Outcome.Benefit, "Attach an Aura from the battlefield?", source, game)) {
Target targetAura = new TargetPermanent(filterAura);
targetAura.setNotTarget(true);
if (!controller.choose(Outcome.Benefit, targetAura, source.getSourceId(), game)) { continue; }
Permanent aura = game.getPermanent(targetAura.getFirstTarget());
if (aura == null) { continue; }
Target target = aura.getSpellAbility().getTargets().get(0);
if (target == null) { continue; }
fromBattlefield.add(aura);
filterAura.add(Predicates.not(new CardIdPredicate(aura.getId())));
countBattlefield = game.getBattlefield().getAllActivePermanents(filterAura, game).size() - sourcePermanent.getAttachments().size();
}
int countHand = controller.getHand().count(filterAuraCard, game);
while (controller.canRespond()
&& countHand > 0
&& controller.chooseUse(Outcome.Benefit, "Attach an Aura from your hand?", source, game)) {
TargetCard targetAura = new TargetCard(Zone.HAND, filterAuraCard);
if (!controller.choose(Outcome.Benefit, controller.getHand(), targetAura, game)) { continue; }
Card aura = game.getCard(targetAura.getFirstTarget());
if (aura == null) { continue; }
Target target = aura.getSpellAbility().getTargets().get(0);
if (target == null) { continue; }
fromHandGraveyard.add(aura);
filterAuraCard.add(Predicates.not(new CardIdPredicate(aura.getId())));
countHand = controller.getHand().count(filterAuraCard, game);
}
int countGraveyard = controller.getGraveyard().count(filterAuraCard, game);
while (controller.canRespond()
&& countGraveyard > 0
&& controller.chooseUse(Outcome.Benefit, "Attach an Aura from your graveyard?", source, game)) {
TargetCard targetAura = new TargetCard(Zone.GRAVEYARD, filterAuraCard);
if (!controller.choose(Outcome.Benefit, controller.getGraveyard(), targetAura, game)) { continue; }
Card aura = game.getCard(targetAura.getFirstTarget());
if (aura == null) { continue; }
Target target = aura.getSpellAbility().getTargets().get(0);
if (target == null) { continue; }
fromHandGraveyard.add(aura);
filterAuraCard.add(Predicates.not(new CardIdPredicate(aura.getId())));
countGraveyard = controller.getGraveyard().count(filterAuraCard, game);
}
// Move permanents
for (Permanent aura : fromBattlefield) {
Permanent attachedTo = game.getPermanent(aura.getAttachedTo());
if (attachedTo != null) {
attachedTo.removeAttachment(aura.getId(), source, game);
}
sourcePermanent.addAttachment(aura.getId(), source, game);
}
// Move cards
for (Card aura : fromHandGraveyard) {
if (aura == null) { continue; }
game.getState().setValue("attachTo:" + aura.getId(), sourcePermanent);
controller.moveCards(aura, Zone.BATTLEFIELD, source, game);
sourcePermanent.addAttachment(aura.getId(), source, game);
}
return true;
}
}
|
/*
* Copyright 2002-2013 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.web.socket.sockjs.transport;
import java.io.IOException;
import org.springframework.web.socket.WebSocketHandler;
import org.springframework.web.socket.sockjs.SockJsConfiguration;
import org.springframework.web.socket.sockjs.SockJsFrame;
/**
* A SockJS session for use with polling HTTP transports.
*
* @author Rossen Stoyanchev
*/
public class PollingSockJsSession extends AbstractHttpSockJsSession {
public PollingSockJsSession(String sessionId, SockJsConfiguration config, WebSocketHandler handler) {
super(sessionId, config, handler);
}
@Override
protected void flushCache() throws IOException {
cancelHeartbeat();
String[] messages = getMessageCache().toArray(new String[getMessageCache().size()]);
getMessageCache().clear();
writeFrame(SockJsFrame.messageFrame(messages));
}
@Override
protected void writeFrame(SockJsFrame frame) throws IOException {
super.writeFrame(frame);
resetRequest();
}
}
|
/*
* author: Keerthana Sadam ; Date: 11/17/2017
* Included test cases which covers all functions
*
*/
package iProlog;
import java.util.Arrays;
/**
* runtime representation of an immutable list of goals
* together with top of heap and trail pointers
* and current clause tried out by head goal
* as well as registers associated to it
*
* note that parts of this immutable lists
* are shared among alternative branches
*/
class Spine {
/**
* creates a spine - as a snapshot of some runtime elements
*/
Spine(final int[] gs0, final int base, final IntList gs, final int ttop, final int k, final int[] cs) {
hd = gs0[0];
this.base = base;
this.gs = IntList.tail(IntList.app(gs0, gs)); // prepends the goals of clause with head hs
this.ttop = ttop;
this.k = k;
this.cs = cs;
}
/**
* creates a specialized spine returning an answer (with no goals left to solve)
*/
Spine(final int hd, final int ttop) {
this.hd = hd;
base = 0;
gs = IntList.empty;
this.ttop = ttop;
k = -1;
cs = null;
}
final int hd; // head of the clause to which this corresponds
final int base; // top of the heap when this was created
final IntList gs; // goals - with the top one ready to unfold
final int ttop; // top of the trail when this was created
int k;
int[] xs; // index elements
int[] cs; // array of clauses known to be unifiable with top goal in gs
public static void main(String[] args) {
IntList il = null;
Spine s1 = new Spine(0, -1);
Spine s2 = new Spine(new int[]{11,22,33},-2,il,5,10, new int[]{0,1,2});
//System.out.println(s2.toString());
System.out.println("head: "+s1.hd);
System.out.println("base: "+s1.base);
System.out.println("Intlist: "+s2.gs);
System.out.println("ttop: "+s2.ttop);
System.out.println("s2 head : "+s2.hd);
System.out.println("s2 k: "+ s2.k);
System.out.println("s2.cs: "+ Arrays.toString(s2.cs));
}
}
|
package javalearning;
public class APP {
}
|
package legoset;
import lombok.Data;
import javax.xml.bind.annotation.*;
@XmlAccessorType(XmlAccessType.FIELD)
@Data
public class Minifig {
@XmlValue
private String title;
@XmlAttribute
private int count;
public Minifig() {}
public Minifig(String title, int count) {
this.title = title;
this.count = count;
}
}
|
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.waf.model.waf_regional.transform;
import java.math.*;
import javax.annotation.Generated;
import com.amazonaws.services.waf.model.*;
import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*;
import com.amazonaws.transform.*;
import com.fasterxml.jackson.core.JsonToken;
import static com.fasterxml.jackson.core.JsonToken.*;
/**
* UpdateRegexPatternSetResult JSON Unmarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class UpdateRegexPatternSetResultJsonUnmarshaller implements Unmarshaller<UpdateRegexPatternSetResult, JsonUnmarshallerContext> {
public UpdateRegexPatternSetResult unmarshall(JsonUnmarshallerContext context) throws Exception {
UpdateRegexPatternSetResult updateRegexPatternSetResult = new UpdateRegexPatternSetResult();
int originalDepth = context.getCurrentDepth();
String currentParentElement = context.getCurrentParentElement();
int targetDepth = originalDepth + 1;
JsonToken token = context.getCurrentToken();
if (token == null)
token = context.nextToken();
if (token == VALUE_NULL) {
return updateRegexPatternSetResult;
}
while (true) {
if (token == null)
break;
if (token == FIELD_NAME || token == START_OBJECT) {
if (context.testExpression("ChangeToken", targetDepth)) {
context.nextToken();
updateRegexPatternSetResult.setChangeToken(context.getUnmarshaller(String.class).unmarshall(context));
}
} else if (token == END_ARRAY || token == END_OBJECT) {
if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) {
if (context.getCurrentDepth() <= originalDepth)
break;
}
}
token = context.nextToken();
}
return updateRegexPatternSetResult;
}
private static UpdateRegexPatternSetResultJsonUnmarshaller instance;
public static UpdateRegexPatternSetResultJsonUnmarshaller getInstance() {
if (instance == null)
instance = new UpdateRegexPatternSetResultJsonUnmarshaller();
return instance;
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.IOException;
import java.net.Authenticator;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang.StringUtils;
import org.apache.gobblin.destination.DestinationDatasetHandlerService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
import com.codahale.metrics.MetricRegistry;
import com.google.common.base.CaseFormat;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.eventbus.EventBus;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import javax.annotation.Nullable;
import lombok.RequiredArgsConstructor;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.commit.CommitSequence;
import org.apache.gobblin.commit.CommitSequenceStore;
import org.apache.gobblin.commit.DeliverySemantics;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.initializer.ConverterInitializerFactory;
import org.apache.gobblin.metrics.ContextAwareGauge;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.GobblinMetricsRegistry;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.ServiceMetricNames;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.metrics.event.EventName;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.JobEvent;
import org.apache.gobblin.metrics.event.TimingEvent;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.JobTemplate;
import org.apache.gobblin.runtime.api.MultiEventMetadataGenerator;
import org.apache.gobblin.runtime.api.SpecNotFoundException;
import org.apache.gobblin.runtime.job_spec.JobSpecResolver;
import org.apache.gobblin.runtime.listeners.CloseableJobListener;
import org.apache.gobblin.runtime.listeners.JobExecutionEventSubmitterListener;
import org.apache.gobblin.runtime.listeners.JobListener;
import org.apache.gobblin.runtime.listeners.JobListeners;
import org.apache.gobblin.runtime.locks.JobLock;
import org.apache.gobblin.runtime.locks.JobLockEventListener;
import org.apache.gobblin.runtime.locks.JobLockException;
import org.apache.gobblin.runtime.locks.LegacyJobLockFactoryManager;
import org.apache.gobblin.runtime.util.JobMetrics;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.source.WorkUnitStreamSource;
import org.apache.gobblin.source.extractor.JobCommitPolicy;
import org.apache.gobblin.source.workunit.BasicWorkUnitStream;
import org.apache.gobblin.source.workunit.MultiWorkUnit;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.source.workunit.WorkUnitStream;
import org.apache.gobblin.util.ClusterNameTags;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.Id;
import org.apache.gobblin.util.JobLauncherUtils;
import org.apache.gobblin.util.ParallelRunner;
import org.apache.gobblin.util.PropertiesUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import org.apache.gobblin.writer.initializer.WriterInitializerFactory;
/**
* An abstract implementation of {@link JobLauncher} that handles common tasks for launching and running a job.
*
* @author Yinan Li
*/
public abstract class AbstractJobLauncher implements JobLauncher {
static final Logger LOG = LoggerFactory.getLogger(AbstractJobLauncher.class);
public static final String TASK_STATE_STORE_TABLE_SUFFIX = ".tst";
public static final String JOB_STATE_FILE_NAME = "job.state";
public static final String WORK_UNIT_FILE_EXTENSION = ".wu";
public static final String MULTI_WORK_UNIT_FILE_EXTENSION = ".mwu";
public static final String GOBBLIN_JOB_TEMPLATE_KEY = "gobblin.template.uri";
public static final String NUM_WORKUNITS = "numWorkUnits";
/** Making {@link AbstractJobLauncher} capable of loading multiple job templates.
* Keep the original {@link #GOBBLIN_JOB_TEMPLATE_KEY} for backward-compatibility.
* TODO: Expand support to Gobblin-as-a-Service in FlowTemplateCatalog.
* */
public static final String GOBBLIN_JOB_MULTI_TEMPLATE_KEY = "gobblin.template.uris";
// Job configuration properties
protected final Properties jobProps;
// This contains all job context information
protected final JobContext jobContext;
// This (optional) JobLock is used to prevent the next scheduled run
// of the job from starting if the current run has not finished yet
protected Optional<JobLock> jobLockOptional = Optional.absent();
// A conditional variable for which the condition is satisfied if a cancellation is requested
protected final Object cancellationRequest = new Object();
// A flag indicating whether a cancellation has been requested or not
protected volatile boolean cancellationRequested = false;
// A conditional variable for which the condition is satisfied if the cancellation is executed
protected final Object cancellationExecution = new Object();
// A flag indicating whether a cancellation has been executed or not
protected volatile boolean cancellationExecuted = false;
// A single-thread executor for executing job cancellation
protected final ExecutorService cancellationExecutor;
// An MetricContext to track runtime metrics only if metrics are enabled.
protected final Optional<MetricContext> runtimeMetricContext;
// An EventBuilder with basic metadata.
protected final EventSubmitter eventSubmitter;
// This is for dispatching events related to job launching and execution to registered subscribers
protected final EventBus eventBus = new EventBus(AbstractJobLauncher.class.getSimpleName());
// A list of JobListeners that will be injected into the user provided JobListener
private final List<JobListener> mandatoryJobListeners = Lists.newArrayList();
// Used to generate additional metadata to emit in timing events
private final MultiEventMetadataGenerator multiEventMetadataGenerator;
public AbstractJobLauncher(Properties jobProps, List<? extends Tag<?>> metadataTags)
throws Exception {
this(jobProps, metadataTags, null);
}
public AbstractJobLauncher(Properties jobProps, List<? extends Tag<?>> metadataTags,
@Nullable SharedResourcesBroker<GobblinScopeTypes> instanceBroker)
throws Exception {
Preconditions.checkArgument(jobProps.containsKey(ConfigurationKeys.JOB_NAME_KEY),
"A job must have a job name specified by job.name");
// Add clusterIdentifier tag so that it is added to any new TaskState created
List<Tag<?>> clusterNameTags = Lists.newArrayList();
clusterNameTags.addAll(Tag.fromMap(ClusterNameTags.getClusterNameTags()));
GobblinMetrics.addCustomTagsToProperties(jobProps, clusterNameTags);
// Make a copy for both the system and job configuration properties and resolve the job-template if any.
this.jobProps = new Properties();
this.jobProps.putAll(jobProps);
resolveGobblinJobTemplateIfNecessary(this.jobProps);
if (!tryLockJob(this.jobProps)) {
throw new JobException(String.format("Previous instance of job %s is still running, skipping this scheduled run",
this.jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY)));
}
try {
setDefaultAuthenticator(this.jobProps);
if (instanceBroker == null) {
instanceBroker = createDefaultInstanceBroker(jobProps);
}
this.jobContext = new JobContext(this.jobProps, LOG, instanceBroker);
this.eventBus.register(this.jobContext);
this.cancellationExecutor = Executors.newSingleThreadExecutor(
ExecutorsUtils.newDaemonThreadFactory(Optional.of(LOG), Optional.of("CancellationExecutor")));
this.runtimeMetricContext =
this.jobContext.getJobMetricsOptional().transform(new Function<JobMetrics, MetricContext>() {
@Override
public MetricContext apply(JobMetrics input) {
return input.getMetricContext();
}
});
this.eventSubmitter = buildEventSubmitter(metadataTags);
// Add all custom tags to the JobState so that tags are added to any new TaskState created
GobblinMetrics.addCustomTagToState(this.jobContext.getJobState(), metadataTags);
JobExecutionEventSubmitter jobExecutionEventSubmitter = new JobExecutionEventSubmitter(this.eventSubmitter);
this.mandatoryJobListeners.add(new JobExecutionEventSubmitterListener(jobExecutionEventSubmitter));
this.multiEventMetadataGenerator = new MultiEventMetadataGenerator(
PropertiesUtils.getPropAsList(jobProps, ConfigurationKeys.EVENT_METADATA_GENERATOR_CLASS_KEY,
ConfigurationKeys.DEFAULT_EVENT_METADATA_GENERATOR_CLASS_KEY));
} catch (Exception e) {
unlockJob();
throw e;
}
}
/**
* Set default {@link Authenticator} to the one provided in {@link ConfigurationKeys#DEFAULT_AUTHENTICATOR_CLASS},
* calling the constructor using the provided {@link Properties}
*/
public static void setDefaultAuthenticator(Properties properties) {
String authenticatorClass = properties.getProperty(ConfigurationKeys.DEFAULT_AUTHENTICATOR_CLASS);
if (!Strings.isNullOrEmpty(authenticatorClass)) {
Authenticator authenticator = GobblinConstructorUtils.invokeConstructor(Authenticator.class, authenticatorClass, properties);
Authenticator.setDefault(authenticator);
}
}
/**
* To supporting 'gobblin.template.uri' in any types of jobLauncher, place this resolution as a public-static method
* to make it accessible for all implementation of JobLauncher and **AzkabanJobLauncher**.
*
* @param jobProps Gobblin Job-level properties.
*/
public static void resolveGobblinJobTemplateIfNecessary(Properties jobProps) throws IOException, URISyntaxException,
SpecNotFoundException,
JobTemplate.TemplateException {
Config config = ConfigUtils.propertiesToConfig(jobProps);
JobSpecResolver resolver = JobSpecResolver.builder(config).build();
JobSpec jobSpec = null;
if (jobProps.containsKey(GOBBLIN_JOB_TEMPLATE_KEY)) {
URI templateUri = new URI(jobProps.getProperty(GOBBLIN_JOB_TEMPLATE_KEY));
jobSpec = JobSpec.builder().withConfig(config).withTemplate(templateUri).build();
} else if (jobProps.containsKey(GOBBLIN_JOB_MULTI_TEMPLATE_KEY)) {
List<URI> templatesURIs = new ArrayList<>();
for (String uri : jobProps.getProperty(GOBBLIN_JOB_MULTI_TEMPLATE_KEY).split(",")) {
templatesURIs.add(new URI(uri));
}
jobSpec = JobSpec.builder().withConfig(config).withResourceTemplates(templatesURIs).build();
}
if (jobSpec != null ) {
jobProps.putAll(ConfigUtils.configToProperties(resolver.resolveJobSpec(jobSpec).getConfig()));
}
}
private static SharedResourcesBroker<GobblinScopeTypes> createDefaultInstanceBroker(Properties jobProps) {
LOG.warn("Creating a job specific {}. Objects will only be shared at the job level.",
SharedResourcesBroker.class.getSimpleName());
return SharedResourcesBrokerFactory.createDefaultTopLevelBroker(ConfigFactory.parseProperties(jobProps),
GobblinScopeTypes.GLOBAL.defaultScopeInstance());
}
/**
* The JobContext of the particular job.
*
* @return {@link JobContext} of the job
*/
JobContext getJobContext() {
return this.jobContext;
}
/**
* A default implementation of {@link JobLauncher#cancelJob(JobListener)}.
*
* <p>
* This implementation relies on two conditional variables: one for the condition that a cancellation
* is requested, and the other for the condition that the cancellation is executed. Upon entrance, the
* method notifies the cancellation executor started by {@link #startCancellationExecutor()} on the
* first conditional variable to indicate that a cancellation has been requested so the executor is
* unblocked. Then it waits on the second conditional variable for the cancellation to be executed.
* </p>
*
* <p>
* The actual execution of the cancellation is handled by the cancellation executor started by the
* method {@link #startCancellationExecutor()} that uses the {@link #executeCancellation()} method
* to execute the cancellation.
* </p>
*
* {@inheritDoc JobLauncher#cancelJob(JobListener)}
*/
@Override
public void cancelJob(JobListener jobListener)
throws JobException {
synchronized (this.cancellationRequest) {
if (this.cancellationRequested) {
// Return immediately if a cancellation has already been requested
return;
}
this.cancellationRequested = true;
// Notify the cancellation executor that a cancellation has been requested
this.cancellationRequest.notify();
}
synchronized (this.cancellationExecution) {
try {
while (!this.cancellationExecuted) {
// Wait for the cancellation to be executed
this.cancellationExecution.wait();
}
try {
LOG.info("Current job state is: " + this.jobContext.getJobState().getState());
if (this.jobContext.getJobState().getState() != JobState.RunningState.COMMITTED && (
this.jobContext.getJobCommitPolicy() == JobCommitPolicy.COMMIT_SUCCESSFUL_TASKS
|| this.jobContext.getJobCommitPolicy() == JobCommitPolicy.COMMIT_ON_PARTIAL_SUCCESS)) {
this.jobContext.finalizeJobStateBeforeCommit();
this.jobContext.commit(true);
}
this.jobContext.close();
} catch (IOException ioe) {
LOG.error("Could not close job context.", ioe);
}
notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_CANCEL, new JobListenerAction() {
@Override
public void apply(JobListener jobListener, JobContext jobContext)
throws Exception {
jobListener.onJobCancellation(jobContext);
}
});
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
}
/**
* This predicate checks if a work unit should be skipped. If yes, then it will removed
* from the list of workUnits and it's state will be saved.
*/
@RequiredArgsConstructor
private static class SkippedWorkUnitsFilter implements Predicate<WorkUnit> {
private final JobState jobState;
@Override
public boolean apply(WorkUnit workUnit) {
if (workUnit instanceof MultiWorkUnit) {
Preconditions.checkArgument(!workUnit.contains(ConfigurationKeys.WORK_UNIT_SKIP_KEY),
"Error: MultiWorkUnit cannot be skipped");
for (WorkUnit wu : ((MultiWorkUnit) workUnit).getWorkUnits()) {
Preconditions.checkArgument(!wu.contains(ConfigurationKeys.WORK_UNIT_SKIP_KEY),
"Error: MultiWorkUnit cannot contain skipped WorkUnit");
}
}
if (workUnit.getPropAsBoolean(ConfigurationKeys.WORK_UNIT_SKIP_KEY, false)) {
WorkUnitState workUnitState = new WorkUnitState(workUnit, this.jobState);
workUnitState.setWorkingState(WorkUnitState.WorkingState.SKIPPED);
this.jobState.addSkippedTaskState(new TaskState(workUnitState));
return false;
}
return true;
}
}
@Override
public void launchJob(JobListener jobListener)
throws JobException {
String jobId = this.jobContext.getJobId();
final JobState jobState = this.jobContext.getJobState();
boolean isWorkUnitsEmpty = false;
try {
MDC.put(ConfigurationKeys.JOB_NAME_KEY, this.jobContext.getJobName());
MDC.put(ConfigurationKeys.JOB_KEY_KEY, this.jobContext.getJobKey());
TimingEvent launchJobTimer = this.eventSubmitter.getTimingEvent(TimingEvent.LauncherTimings.FULL_JOB_EXECUTION);
try (Closer closer = Closer.create()) {
closer.register(this.jobContext);
notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_PREPARE, new JobListenerAction() {
@Override
public void apply(JobListener jobListener, JobContext jobContext)
throws Exception {
jobListener.onJobPrepare(jobContext);
}
});
if (this.jobContext.getSemantics() == DeliverySemantics.EXACTLY_ONCE) {
// If exactly-once is used, commit sequences of the previous run must be successfully compelted
// before this run can make progress.
executeUnfinishedCommitSequences(jobState.getJobName());
}
TimingEvent workUnitsCreationTimer =
this.eventSubmitter.getTimingEvent(TimingEvent.LauncherTimings.WORK_UNITS_CREATION);
Source<?, ?> source = this.jobContext.getSource();
WorkUnitStream workUnitStream;
if (source instanceof WorkUnitStreamSource) {
workUnitStream = ((WorkUnitStreamSource) source).getWorkunitStream(jobState);
} else {
workUnitStream = new BasicWorkUnitStream.Builder(source.getWorkunits(jobState)).build();
}
workUnitsCreationTimer.stop(this.multiEventMetadataGenerator.getMetadata(this.jobContext,
EventName.WORK_UNITS_CREATION));
if (this.runtimeMetricContext.isPresent()) {
String workunitCreationGaugeName = MetricRegistry
.name(ServiceMetricNames.GOBBLIN_JOB_METRICS_PREFIX, TimingEvent.LauncherTimings.WORK_UNITS_CREATION,
jobState.getJobName());
long workUnitsCreationTime = workUnitsCreationTimer.getDuration() / TimeUnit.SECONDS.toMillis(1);
ContextAwareGauge<Integer> workunitCreationGauge = this.runtimeMetricContext.get()
.newContextAwareGauge(workunitCreationGaugeName, () -> (int) workUnitsCreationTime);
this.runtimeMetricContext.get().register(workunitCreationGaugeName, workunitCreationGauge);
}
// The absence means there is something wrong getting the work units
if (workUnitStream == null || workUnitStream.getWorkUnits() == null) {
this.eventSubmitter.submit(JobEvent.WORK_UNITS_MISSING);
jobState.setState(JobState.RunningState.FAILED);
String errMsg = "Failed to get work units for job " + jobId;
this.jobContext.getJobState().setJobFailureMessage(errMsg);
this.jobContext.getJobState().setProp(NUM_WORKUNITS, 0);
throw new JobException(errMsg);
}
// No work unit to run
if (!workUnitStream.getWorkUnits().hasNext()) {
this.eventSubmitter.submit(JobEvent.WORK_UNITS_EMPTY);
LOG.warn("No work units have been created for job " + jobId);
jobState.setState(JobState.RunningState.COMMITTED);
isWorkUnitsEmpty = true;
this.jobContext.getJobState().setProp(NUM_WORKUNITS, 0);
return;
}
// Perform work needed before writing is done
Boolean canCleanUp = this.canCleanStagingData(this.jobContext.getJobState());
closer.register(new DestinationDatasetHandlerService(jobState, canCleanUp, this.eventSubmitter))
.executeHandlers(workUnitStream);
//Initialize writer and converter(s)
closer.register(WriterInitializerFactory.newInstace(jobState, workUnitStream)).initialize();
closer.register(ConverterInitializerFactory.newInstance(jobState, workUnitStream)).initialize();
TimingEvent stagingDataCleanTimer =
this.eventSubmitter.getTimingEvent(TimingEvent.RunJobTimings.MR_STAGING_DATA_CLEAN);
// Cleanup left-over staging data possibly from the previous run. This is particularly
// important if the current batch of WorkUnits include failed WorkUnits from the previous
// run which may still have left-over staging data not cleaned up yet.
cleanLeftoverStagingData(workUnitStream, jobState);
stagingDataCleanTimer.stop(this.multiEventMetadataGenerator.getMetadata(this.jobContext,
EventName.MR_STAGING_DATA_CLEAN));
long startTime = System.currentTimeMillis();
jobState.setStartTime(startTime);
jobState.setState(JobState.RunningState.RUNNING);
try {
LOG.info("Starting job " + jobId);
notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_START, new JobListenerAction() {
@Override
public void apply(JobListener jobListener, JobContext jobContext)
throws Exception {
jobListener.onJobStart(jobContext);
}
});
TimingEvent workUnitsPreparationTimer =
this.eventSubmitter.getTimingEvent(TimingEvent.LauncherTimings.WORK_UNITS_PREPARATION);
// Add task ids
workUnitStream = prepareWorkUnits(workUnitStream, jobState);
// Remove skipped workUnits from the list of work units to execute.
workUnitStream = workUnitStream.filter(new SkippedWorkUnitsFilter(jobState));
// Add surviving tasks to jobState
workUnitStream = workUnitStream.transform(new MultiWorkUnitForEach() {
@Override
public void forWorkUnit(WorkUnit workUnit) {
jobState.incrementTaskCount();
jobState.addTaskState(new TaskState(new WorkUnitState(workUnit, jobState)));
}
});
// If it is a streaming source, workunits cannot be counted
this.jobContext.getJobState().setProp(NUM_WORKUNITS,
workUnitStream.isSafeToMaterialize() ? workUnitStream.getMaterializedWorkUnitCollection().size() : 0);
// dump the work unit if tracking logs are enabled
if (jobState.getPropAsBoolean(ConfigurationKeys.WORK_UNIT_ENABLE_TRACKING_LOGS)) {
workUnitStream = workUnitStream.transform(new Function<WorkUnit, WorkUnit>() {
@Nullable
@Override
public WorkUnit apply(@Nullable WorkUnit input) {
LOG.info("Work unit tracking log: {}", input);
return input;
}
});
}
workUnitsPreparationTimer.stop(this.multiEventMetadataGenerator.getMetadata(this.jobContext,
EventName.WORK_UNITS_PREPARATION));
// Write job execution info to the job history store before the job starts to run
this.jobContext.storeJobExecutionInfo();
TimingEvent jobRunTimer = this.eventSubmitter.getTimingEvent(TimingEvent.LauncherTimings.JOB_RUN);
// Start the job and wait for it to finish
runWorkUnitStream(workUnitStream);
jobRunTimer.stop(this.multiEventMetadataGenerator.getMetadata(this.jobContext,EventName.JOB_RUN));
this.eventSubmitter
.submit(CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.UPPER_CAMEL, "JOB_" + jobState.getState()));
// Check and set final job jobPropsState upon job completion
if (jobState.getState() == JobState.RunningState.CANCELLED) {
LOG.info(String.format("Job %s has been cancelled, aborting now", jobId));
return;
}
TimingEvent jobCommitTimer = this.eventSubmitter.getTimingEvent(TimingEvent.LauncherTimings.JOB_COMMIT);
this.jobContext.finalizeJobStateBeforeCommit();
this.jobContext.commit();
postProcessJobState(jobState);
jobCommitTimer.stop(this.multiEventMetadataGenerator.getMetadata(this.jobContext, EventName.JOB_COMMIT));
} finally {
long endTime = System.currentTimeMillis();
jobState.setEndTime(endTime);
jobState.setDuration(endTime - jobState.getStartTime());
}
} catch (Throwable t) {
jobState.setState(JobState.RunningState.FAILED);
String errMsg = "Failed to launch and run job " + jobId + " due to" + t.getMessage();
LOG.error(errMsg + ": " + t, t);
this.jobContext.getJobState().setJobFailureException(t);
} finally {
try {
TimingEvent jobCleanupTimer = this.eventSubmitter.getTimingEvent(TimingEvent.LauncherTimings.JOB_CLEANUP);
cleanupStagingData(jobState);
jobCleanupTimer.stop(this.multiEventMetadataGenerator.getMetadata(this.jobContext, EventName.JOB_CLEANUP));
// Write job execution info to the job history store upon job termination
this.jobContext.storeJobExecutionInfo();
} finally {
launchJobTimer.stop(this.multiEventMetadataGenerator.getMetadata(this.jobContext, EventName.FULL_JOB_EXECUTION));
if (isWorkUnitsEmpty) {
//If no WorkUnits are created, first send the JobCompleteTimer event.
notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_COMPLETE, new JobListenerAction() {
@Override
public void apply(JobListener jobListener, JobContext jobContext)
throws Exception {
jobListener.onJobCompletion(jobContext);
}
});
//Next, send the JobSucceededTimer event.
notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_SUCCEEDED, new JobListenerAction() {
@Override
public void apply(JobListener jobListener, JobContext jobContext)
throws Exception {
jobListener.onJobFailure(jobContext);
}
});
} else {
for (JobState.DatasetState datasetState : this.jobContext.getDatasetStatesByUrns().values()) {
// Set the overall job state to FAILED if the job failed to process any dataset
if (datasetState.getState() == JobState.RunningState.FAILED) {
jobState.setState(JobState.RunningState.FAILED);
LOG.warn("At least one dataset state is FAILED. Setting job state to FAILED.");
break;
}
}
notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_COMPLETE, new JobListenerAction() {
@Override
public void apply(JobListener jobListener, JobContext jobContext)
throws Exception {
jobListener.onJobCompletion(jobContext);
}
});
if (jobState.getState() == JobState.RunningState.FAILED) {
notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_FAILED, new JobListenerAction() {
@Override
public void apply(JobListener jobListener, JobContext jobContext)
throws Exception {
jobListener.onJobFailure(jobContext);
}
});
throw new JobException(String.format("Job %s failed", jobId));
} else {
notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_SUCCEEDED, new JobListenerAction() {
@Override
public void apply(JobListener jobListener, JobContext jobContext)
throws Exception {
jobListener.onJobFailure(jobContext);
}
});
}
}
}
}
} finally {
// Stop metrics reporting
if (this.jobContext.getJobMetricsOptional().isPresent()) {
JobMetrics.remove(jobState);
}
MDC.remove(ConfigurationKeys.JOB_NAME_KEY);
MDC.remove(ConfigurationKeys.JOB_KEY_KEY);
}
}
private void executeUnfinishedCommitSequences(String jobName)
throws IOException {
Preconditions.checkState(this.jobContext.getCommitSequenceStore().isPresent());
CommitSequenceStore commitSequenceStore = this.jobContext.getCommitSequenceStore().get();
for (String datasetUrn : commitSequenceStore.get(jobName)) {
Optional<CommitSequence> commitSequence = commitSequenceStore.get(jobName, datasetUrn);
if (commitSequence.isPresent()) {
commitSequence.get().execute();
}
commitSequenceStore.delete(jobName, datasetUrn);
}
}
/**
* Subclasses can override this method to do whatever processing on the {@link TaskState}s,
* e.g., aggregate task-level metrics into job-level metrics.
*
* @deprecated Use {@link #postProcessJobState(JobState)}
*/
@Deprecated
protected void postProcessTaskStates(@SuppressWarnings("unused") List<TaskState> taskStates) {
// Do nothing
}
/**
* Subclasses can override this method to do whatever processing on the {@link JobState} and its
* associated {@link TaskState}s, e.g., aggregate task-level metrics into job-level metrics.
*/
protected void postProcessJobState(JobState jobState) {
postProcessTaskStates(jobState.getTaskStates());
}
@Override
public void close()
throws IOException {
try {
this.cancellationExecutor.shutdownNow();
try {
this.jobContext.getSource().shutdown(this.jobContext.getJobState());
} finally {
if (GobblinMetrics.isEnabled(this.jobProps)) {
GobblinMetricsRegistry.getInstance().remove(this.jobContext.getJobId());
}
}
} finally {
unlockJob();
}
}
/**
* Run the given job.
*
* <p>
* The contract between {@link AbstractJobLauncher#launchJob(JobListener)} and this method is this method
* is responsible for for setting {@link JobState.RunningState} properly and upon returning from this method
* (either normally or due to exceptions) whatever {@link JobState.RunningState} is set in this method is
* used to determine if the job has finished.
* </p>
*
* @param workUnits List of {@link WorkUnit}s of the job
*/
protected abstract void runWorkUnits(List<WorkUnit> workUnits)
throws Exception;
/**
* Run the given job.
*
* <p>
* The contract between {@link AbstractJobLauncher#launchJob(JobListener)} and this method is this method
* is responsible for for setting {@link JobState.RunningState} properly and upon returning from this method
* (either normally or due to exceptions) whatever {@link JobState.RunningState} is set in this method is
* used to determine if the job has finished.
* </p>
*
* @param workUnitStream stream of {@link WorkUnit}s of the job
*/
protected void runWorkUnitStream(WorkUnitStream workUnitStream) throws Exception {
runWorkUnits(materializeWorkUnitList(workUnitStream));
}
/**
* Materialize a {@link WorkUnitStream} into an in-memory list. Note that infinite work unit streams cannot be materialized.
*/
private List<WorkUnit> materializeWorkUnitList(WorkUnitStream workUnitStream) {
if (!workUnitStream.isFiniteStream()) {
throw new UnsupportedOperationException("Cannot materialize an infinite work unit stream.");
}
return Lists.newArrayList(workUnitStream.getWorkUnits());
}
/**
* Get a {@link JobLock} to be used for the job.
*
* @param properties the job properties
* @param jobLockEventListener the listener for lock events.
* @return {@link JobLock} to be used for the job
* @throws JobLockException throw when the {@link JobLock} fails to initialize
*/
protected JobLock getJobLock(Properties properties, JobLockEventListener jobLockEventListener)
throws JobLockException {
return LegacyJobLockFactoryManager.getJobLock(properties, jobLockEventListener);
}
/**
* Execute the job cancellation.
* The implementation should not throw any exceptions because that will kill the `Cancellation Executor` thread
* and will create a deadlock.
*/
protected abstract void executeCancellation();
/**
* Start the scheduled executor for executing job cancellation.
*
* <p>
* The executor, upon started, waits on the condition variable indicating a cancellation is requested,
* i.e., it waits for a cancellation request to arrive. If a cancellation is requested, the executor
* is unblocked and calls {@link #executeCancellation()} to execute the cancellation. Upon completion
* of the cancellation execution, the executor notifies the caller that requested the cancellation on
* the conditional variable indicating the cancellation has been executed so the caller is unblocked.
* Upon successful execution of the cancellation, it sets the job state to
* {@link JobState.RunningState#CANCELLED}.
* </p>
*/
protected void startCancellationExecutor() {
this.cancellationExecutor.execute(new Runnable() {
@Override
public void run() {
synchronized (AbstractJobLauncher.this.cancellationRequest) {
try {
while (!AbstractJobLauncher.this.cancellationRequested) {
// Wait for a cancellation request to arrive
AbstractJobLauncher.this.cancellationRequest.wait();
}
LOG.info("Cancellation has been requested for job " + AbstractJobLauncher.this.jobContext.getJobId());
executeCancellation();
LOG.info("Cancellation has been executed for job " + AbstractJobLauncher.this.jobContext.getJobId());
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
synchronized (AbstractJobLauncher.this.cancellationExecution) {
AbstractJobLauncher.this.cancellationExecuted = true;
AbstractJobLauncher.this.jobContext.getJobState().setState(JobState.RunningState.CANCELLED);
// Notify that the cancellation has been executed
AbstractJobLauncher.this.cancellationExecution.notifyAll();
}
}
});
}
/**
* Prepare the flattened {@link WorkUnit}s for execution by populating the job and task IDs.
*/
private WorkUnitStream prepareWorkUnits(WorkUnitStream workUnits, JobState jobState) {
return workUnits.transform(new WorkUnitPreparator(this.jobContext.getJobId()));
}
private static abstract class MultiWorkUnitForEach implements Function<WorkUnit, WorkUnit> {
@Nullable
@Override
public WorkUnit apply(WorkUnit input) {
if (input instanceof MultiWorkUnit) {
for (WorkUnit wu : ((MultiWorkUnit) input).getWorkUnits()) {
forWorkUnit(wu);
}
} else {
forWorkUnit(input);
}
return input;
}
protected abstract void forWorkUnit(WorkUnit workUnit);
}
@RequiredArgsConstructor
private static class WorkUnitPreparator extends MultiWorkUnitForEach {
private int taskIdSequence = 0;
private final String jobId;
@Override
protected void forWorkUnit(WorkUnit workUnit) {
workUnit.setProp(ConfigurationKeys.JOB_ID_KEY, this.jobId);
String taskId = JobLauncherUtils.newTaskId(this.jobId, this.taskIdSequence++);
workUnit.setId(taskId);
workUnit.setProp(ConfigurationKeys.TASK_ID_KEY, taskId);
workUnit.setProp(ConfigurationKeys.TASK_KEY_KEY, Long.toString(Id.Task.parse(taskId).getSequence()));
}
}
/**
* Try acquiring the job lock and return whether the lock is successfully locked.
*
* @param properties the job properties
*/
private boolean tryLockJob(Properties properties) {
try {
if (Boolean.valueOf(properties.getProperty(ConfigurationKeys.JOB_LOCK_ENABLED_KEY, Boolean.TRUE.toString()))) {
this.jobLockOptional = Optional.of(getJobLock(properties, new JobLockEventListener() {
@Override
public void onLost() {
executeCancellation();
}
}));
}
return !this.jobLockOptional.isPresent() || this.jobLockOptional.get().tryLock();
} catch (JobLockException ioe) {
LOG.error(String.format("Failed to acquire job lock for job %s: %s", this.jobContext.getJobId(), ioe), ioe);
return false;
}
}
/**
* Unlock a completed or failed job.
*/
private void unlockJob() {
if (this.jobLockOptional.isPresent()) {
try {
// Unlock so the next run of the same job can proceed
this.jobLockOptional.get().unlock();
} catch (JobLockException ioe) {
LOG.error(String.format("Failed to unlock for job %s: %s", this.jobContext.getJobId(), ioe), ioe);
} finally {
try {
this.jobLockOptional.get().close();
} catch (IOException e) {
LOG.error(String.format("Failed to close job lock for job %s: %s", this.jobContext.getJobId(), e), e);
} finally {
this.jobLockOptional = Optional.absent();
}
}
}
}
/**
* Combines the specified {@link JobListener} with the {@link #mandatoryJobListeners} for this job. Uses
* {@link JobListeners#parallelJobListener(List)} to create a {@link CloseableJobListener} that will execute all
* the {@link JobListener}s in parallel.
*/
private CloseableJobListener getParallelCombinedJobListener(JobState jobState, JobListener jobListener) {
List<JobListener> jobListeners = Lists.newArrayList(this.mandatoryJobListeners);
jobListeners.add(jobListener);
Set<String> jobListenerClassNames = jobState.getPropAsSet(ConfigurationKeys.JOB_LISTENERS_KEY, StringUtils.EMPTY);
for (String jobListenerClassName : jobListenerClassNames) {
try {
@SuppressWarnings("unchecked")
Class<? extends JobListener> jobListenerClass =
(Class<? extends JobListener>) Class.forName(jobListenerClassName);
jobListeners.add(jobListenerClass.newInstance());
} catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) {
LOG.warn(String.format("JobListener could not be created due to %s", jobListenerClassName), e);
}
}
return JobListeners.parallelJobListener(jobListeners);
}
/**
* Takes a {@link List} of {@link Tag}s and returns a new {@link List} with the original {@link Tag}s as well as any
* additional {@link Tag}s returned by {@link ClusterNameTags#getClusterNameTags()}.
*
* @see ClusterNameTags
*/
private static List<Tag<?>> addClusterNameTags(List<? extends Tag<?>> tags) {
return ImmutableList.<Tag<?>>builder().addAll(tags).addAll(Tag.fromMap(ClusterNameTags.getClusterNameTags()))
.build();
}
/**
* Build the {@link EventSubmitter} for this class.
*/
private EventSubmitter buildEventSubmitter(List<? extends Tag<?>> tags) {
return new EventSubmitter.Builder(this.runtimeMetricContext, "gobblin.runtime")
.addMetadata(Tag.toMap(Tag.tagValuesToString(tags))).build();
}
/**
* Cleanup the left-over staging data possibly from the previous run of the job that may have failed
* and not cleaned up its staging data.
*
* Property {@link ConfigurationKeys#CLEANUP_STAGING_DATA_PER_TASK} controls whether to cleanup
* staging data per task, or to cleanup entire job's staging data at once.
*
* Staging data will not be cleaned if the job has unfinished {@link CommitSequence}s.
*/
private void cleanLeftoverStagingData(WorkUnitStream workUnits, JobState jobState)
throws JobException {
if (jobState.getPropAsBoolean(ConfigurationKeys.CLEANUP_STAGING_DATA_BY_INITIALIZER, false)) {
//Clean up will be done by initializer.
return;
}
try {
if (!canCleanStagingData(jobState)) {
LOG.error("Job " + jobState.getJobName() + " has unfinished commit sequences. Will not clean up staging data.");
return;
}
} catch (IOException e) {
throw new JobException("Failed to check unfinished commit sequences", e);
}
try {
if (this.jobContext.shouldCleanupStagingDataPerTask()) {
if (workUnits.isSafeToMaterialize()) {
Closer closer = Closer.create();
Map<String, ParallelRunner> parallelRunners = Maps.newHashMap();
try {
for (WorkUnit workUnit : JobLauncherUtils.flattenWorkUnits(workUnits.getMaterializedWorkUnitCollection())) {
JobLauncherUtils.cleanTaskStagingData(new WorkUnitState(workUnit, jobState), LOG, closer, parallelRunners);
}
} catch (Throwable t) {
throw closer.rethrow(t);
} finally {
closer.close();
}
} else {
throw new RuntimeException("Work unit streams do not support cleaning staging data per task.");
}
} else {
if (jobState.getPropAsBoolean(ConfigurationKeys.CLEANUP_OLD_JOBS_DATA, ConfigurationKeys.DEFAULT_CLEANUP_OLD_JOBS_DATA)) {
JobLauncherUtils.cleanUpOldJobData(jobState, LOG, jobContext.getStagingDirProvided(), jobContext.getOutputDirProvided());
}
JobLauncherUtils.cleanJobStagingData(jobState, LOG);
}
} catch (Throwable t) {
// Catch Throwable instead of just IOException to make sure failure of this won't affect the current run
LOG.error("Failed to clean leftover staging data", t);
}
}
private static String getJobIdPrefix(String jobId) {
return jobId.substring(0, jobId.lastIndexOf(Id.Job.SEPARATOR) + 1);
}
/**
* Cleanup the job's task staging data. This is not doing anything in case job succeeds
* and data is successfully committed because the staging data has already been moved
* to the job output directory. But in case the job fails and data is not committed,
* we want the staging data to be cleaned up.
*
* Property {@link ConfigurationKeys#CLEANUP_STAGING_DATA_PER_TASK} controls whether to cleanup
* staging data per task, or to cleanup entire job's staging data at once.
*
* Staging data will not be cleaned if the job has unfinished {@link CommitSequence}s.
*/
private void cleanupStagingData(JobState jobState)
throws JobException {
if (jobState.getPropAsBoolean(ConfigurationKeys.CLEANUP_STAGING_DATA_BY_INITIALIZER, false)) {
//Clean up will be done by initializer.
return;
}
try {
if (!canCleanStagingData(jobState)) {
LOG.error("Job " + jobState.getJobName() + " has unfinished commit sequences. Will not clean up staging data.");
return;
}
} catch (IOException e) {
throw new JobException("Failed to check unfinished commit sequences", e);
}
if (this.jobContext.shouldCleanupStagingDataPerTask()) {
cleanupStagingDataPerTask(jobState);
} else {
cleanupStagingDataForEntireJob(jobState);
}
}
public boolean isEarlyStopped() {
return this.jobContext.getSource().isEarlyStopped();
}
/**
* Staging data cannot be cleaned if exactly once semantics is used, and the job has unfinished
* commit sequences.
*/
private boolean canCleanStagingData(JobState jobState)
throws IOException {
return this.jobContext.getSemantics() != DeliverySemantics.EXACTLY_ONCE || !this.jobContext.getCommitSequenceStore()
.get().exists(jobState.getJobName());
}
private static void cleanupStagingDataPerTask(JobState jobState) {
Closer closer = Closer.create();
Map<String, ParallelRunner> parallelRunners = Maps.newHashMap();
try {
for (TaskState taskState : jobState.getTaskStates()) {
try {
JobLauncherUtils.cleanTaskStagingData(taskState, LOG, closer, parallelRunners);
} catch (IOException e) {
LOG.error(String.format("Failed to clean staging data for task %s: %s", taskState.getTaskId(), e), e);
}
}
} finally {
try {
closer.close();
} catch (IOException e) {
LOG.error("Failed to clean staging data", e);
}
}
}
private static void cleanupStagingDataForEntireJob(JobState jobState) {
try {
JobLauncherUtils.cleanJobStagingData(jobState, LOG);
} catch (IOException e) {
LOG.error("Failed to clean staging data for job " + jobState.getJobId(), e);
}
}
private void notifyListeners(JobContext jobContext, JobListener jobListener, String timerEventName,
JobListenerAction action)
throws JobException {
TimingEvent timer = this.eventSubmitter.getTimingEvent(timerEventName);
try (CloseableJobListener parallelJobListener = getParallelCombinedJobListener(this.jobContext.getJobState(),
jobListener)) {
action.apply(parallelJobListener, jobContext);
} catch (Exception e) {
throw new JobException("Failed to execute all JobListeners", e);
} finally {
LOG.info("Submitting {}", timerEventName);
timer.stop(this.multiEventMetadataGenerator.getMetadata(this.jobContext,
EventName.getEnumFromEventId(timerEventName)));
}
}
private interface JobListenerAction {
void apply(JobListener jobListener, JobContext jobContext)
throws Exception;
}
}
|
/*
* msmwallet
*
* Created by Ed Gamble <ed@msmwallet.com> on 1/22/18.
* Copyright (c) 2018 msmwallet LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.msmwallet.core;
/**
*
*/
public class BRCoreAddress extends BRCoreJniReference {
public static BRCoreAddress createAddress (String address) {
return null == address || address.isEmpty()
? null
: new BRCoreAddress (address);
}
public BRCoreAddress (String address) {
this (createCoreAddress (address));
}
protected BRCoreAddress (long jniReferenceAddress) {
super (jniReferenceAddress);
}
protected static native long createCoreAddress (String address);
protected static native long createCoreAddressFromScriptPubKey (byte[] script);
public static BRCoreAddress fromScriptPubKey (byte[] script) {
return new BRCoreAddress (createCoreAddressFromScriptPubKey (script));
}
protected static native long createCoreAddressFromScriptSignature (byte[] script);
public static BRCoreAddress fromScriptSignature (byte[] script) {
return new BRCoreAddress (createCoreAddressFromScriptSignature (script));
}
public native String stringify ();
public native boolean isValid ();
public native byte[] getPubKeyScript();
/**
* Decode a bitcash address into a bitcoin address.
*
* @param bcashAddr the bitcash address
* @return the bitcoin address or NULL if unable to decode
*/
public static native String bcashDecodeBitcoin (String bcashAddr);
/**
* Encode a bitcash address from a bitcoin address.
*
* @param bitcoinAddr the bitcoin address
* @return a bitcash address or NULL if unable to encode
*/
public static native String bcashEncodeBitcoin (String bitcoinAddr);
}
|
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.iottwinmaker.model.transform;
import java.math.*;
import javax.annotation.Generated;
import com.amazonaws.services.iottwinmaker.model.*;
import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*;
import com.amazonaws.transform.*;
import com.fasterxml.jackson.core.JsonToken;
import static com.fasterxml.jackson.core.JsonToken.*;
/**
* GetWorkspaceResult JSON Unmarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class GetWorkspaceResultJsonUnmarshaller implements Unmarshaller<GetWorkspaceResult, JsonUnmarshallerContext> {
public GetWorkspaceResult unmarshall(JsonUnmarshallerContext context) throws Exception {
GetWorkspaceResult getWorkspaceResult = new GetWorkspaceResult();
int originalDepth = context.getCurrentDepth();
String currentParentElement = context.getCurrentParentElement();
int targetDepth = originalDepth + 1;
JsonToken token = context.getCurrentToken();
if (token == null)
token = context.nextToken();
if (token == VALUE_NULL) {
return getWorkspaceResult;
}
while (true) {
if (token == null)
break;
if (token == FIELD_NAME || token == START_OBJECT) {
if (context.testExpression("arn", targetDepth)) {
context.nextToken();
getWorkspaceResult.setArn(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("creationDateTime", targetDepth)) {
context.nextToken();
getWorkspaceResult.setCreationDateTime(DateJsonUnmarshallerFactory.getInstance("unixTimestamp").unmarshall(context));
}
if (context.testExpression("description", targetDepth)) {
context.nextToken();
getWorkspaceResult.setDescription(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("role", targetDepth)) {
context.nextToken();
getWorkspaceResult.setRole(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("s3Location", targetDepth)) {
context.nextToken();
getWorkspaceResult.setS3Location(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("updateDateTime", targetDepth)) {
context.nextToken();
getWorkspaceResult.setUpdateDateTime(DateJsonUnmarshallerFactory.getInstance("unixTimestamp").unmarshall(context));
}
if (context.testExpression("workspaceId", targetDepth)) {
context.nextToken();
getWorkspaceResult.setWorkspaceId(context.getUnmarshaller(String.class).unmarshall(context));
}
} else if (token == END_ARRAY || token == END_OBJECT) {
if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) {
if (context.getCurrentDepth() <= originalDepth)
break;
}
}
token = context.nextToken();
}
return getWorkspaceResult;
}
private static GetWorkspaceResultJsonUnmarshaller instance;
public static GetWorkspaceResultJsonUnmarshaller getInstance() {
if (instance == null)
instance = new GetWorkspaceResultJsonUnmarshaller();
return instance;
}
}
|
/*
* ORY Keto
* Ory Keto is a cloud native access control server providing best-practice patterns (RBAC, ABAC, ACL, AWS IAM Policies, Kubernetes Roles, ...) via REST APIs.
*
* The version of the OpenAPI document: v0.0.0-alpha.58
* Contact: hi@ory.sh
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
package sh.ory.keto.model;
import com.google.gson.TypeAdapter;
import com.google.gson.annotations.JsonAdapter;
import com.google.gson.annotations.SerializedName;
import com.google.gson.stream.JsonReader;
import com.google.gson.stream.JsonWriter;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import java.io.IOException;
import org.junit.Assert;
import org.junit.Ignore;
import org.junit.Test;
/**
* Model tests for OryAccessControlPolicyAllowedInput
*/
public class OryAccessControlPolicyAllowedInputTest {
private final OryAccessControlPolicyAllowedInput model = new OryAccessControlPolicyAllowedInput();
/**
* Model tests for OryAccessControlPolicyAllowedInput
*/
@Test
public void testOryAccessControlPolicyAllowedInput() {
// TODO: test OryAccessControlPolicyAllowedInput
}
/**
* Test the property 'action'
*/
@Test
public void actionTest() {
// TODO: test action
}
/**
* Test the property 'context'
*/
@Test
public void contextTest() {
// TODO: test context
}
/**
* Test the property 'resource'
*/
@Test
public void resourceTest() {
// TODO: test resource
}
/**
* Test the property 'subject'
*/
@Test
public void subjectTest() {
// TODO: test subject
}
}
|
package com.vignesh.howzat.api;
import com.vignesh.howzat.model.Handshake;
import com.vignesh.howzat.model.SignUpInfo;
import com.vignesh.howzat.model.UserKeys;
import com.vignesh.howzat.service.HowzatService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
import java.util.Calendar;
@RequestMapping("api/v1")
@RestController
public class HowzatController {
private final HowzatService howzatService;
@Autowired
public HowzatController(HowzatService howzatService) {
this.howzatService = howzatService;
}
@GetMapping(path = "/buyer/hello")
public Handshake sayHello() {
long timeInMillis = Calendar.getInstance().getTimeInMillis();
return howzatService.sayHello(timeInMillis, "connection established");
}
@GetMapping(path = "/auctioneer/generateUserKeys/{no_of_teams}")
public UserKeys generateUserKeys(@PathVariable("no_of_teams") int noOfTeams) {
return howzatService.generateUserKeys(noOfTeams);
}
@PostMapping(path = "/buyer/signUp")
public SignUpInfo signUpUser(@RequestParam("username") String userName,
@RequestParam("password") String password,
@RequestParam("user_key") String userKey) {
return howzatService.signUpBuyer(userName, password, userKey);
}
@PostMapping(path = "/auctioneer/signUp")
public SignUpInfo signUpAuctioneer(@RequestParam("username") String userName,
@RequestParam("password") String password) {
return howzatService.signUpAuctioneer(userName, password);
}
}
|
/**
* Copyright (c) 2016-present, RxJava Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is
* distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See
* the License for the specific language governing permissions and limitations under the License.
*/
package io.reactivex.internal.operators.flowable;
import java.util.concurrent.atomic.AtomicInteger;
import org.reactivestreams.*;
import io.reactivex.exceptions.Exceptions;
import io.reactivex.functions.BooleanSupplier;
import io.reactivex.internal.subscriptions.SubscriptionArbiter;
public final class FlowableRepeatUntil<T> extends AbstractFlowableWithUpstream<T, T> {
final BooleanSupplier until;
public FlowableRepeatUntil(Publisher<T> source, BooleanSupplier until) {
super(source);
this.until = until;
}
@Override
public void subscribeActual(Subscriber<? super T> s) {
SubscriptionArbiter sa = new SubscriptionArbiter();
s.onSubscribe(sa);
RepeatSubscriber<T> rs = new RepeatSubscriber<T>(s, until, sa, source);
rs.subscribeNext();
}
// FIXME update to a fresh Rsc algorithm
static final class RepeatSubscriber<T> extends AtomicInteger implements Subscriber<T> {
private static final long serialVersionUID = -7098360935104053232L;
final Subscriber<? super T> actual;
final SubscriptionArbiter sa;
final Publisher<? extends T> source;
final BooleanSupplier stop;
RepeatSubscriber(Subscriber<? super T> actual, BooleanSupplier until, SubscriptionArbiter sa, Publisher<? extends T> source) {
this.actual = actual;
this.sa = sa;
this.source = source;
this.stop = until;
}
@Override
public void onSubscribe(Subscription s) {
sa.setSubscription(s);
}
@Override
public void onNext(T t) {
actual.onNext(t);
sa.produced(1L);
}
@Override
public void onError(Throwable t) {
actual.onError(t);
}
@Override
public void onComplete() {
boolean b;
try {
b = stop.getAsBoolean();
} catch (Throwable e) {
Exceptions.throwIfFatal(e);
actual.onError(e);
return;
}
if (b) {
actual.onComplete();
} else {
subscribeNext();
}
}
/**
* Subscribes to the source again via trampolining.
*/
void subscribeNext() {
if (getAndIncrement() == 0) {
int missed = 1;
for (;;) {
source.subscribe(this);
missed = addAndGet(-missed);
if (missed == 0) {
break;
}
}
}
}
}
}
|
package com.udaan.flagsnag.logic;
import java.util.Locale;
/*
* This class provides the grammatically correct translations
*/
public class Translations {
public static String scorePost(int score, String category, Locale locale) {
String translated = null;
if (locale.getLanguage().equals("es")) {
translated = "Marqu� " + score + " en la categor�a \"" + category + "\"";
translated = "I scored " + score + " in " + category;
}
else {
translated = "I scored " + score + " in \"" + category + "\" category";
}
return translated;
}
}
|
/*
* Copyright 2016-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.codestarconnections;
import javax.annotation.Generated;
import com.amazonaws.*;
import com.amazonaws.regions.*;
import com.amazonaws.services.codestarconnections.model.*;
/**
* Interface for accessing AWS CodeStar connections.
* <p>
* <b>Note:</b> Do not directly implement this interface, new methods are added to it regularly. Extend from
* {@link com.amazonaws.services.codestarconnections.AbstractAWSCodeStarconnections} instead.
* </p>
* <p>
* <fullname>AWS CodeStar Connections</fullname>
* <p>
* This AWS CodeStar Connections API Reference provides descriptions and usage examples of the operations and data types
* for the AWS CodeStar Connections API. You can use the connections API to work with connections and installations.
* </p>
* <p>
* <i>Connections</i> are configurations that you use to connect AWS resources to external code repositories. Each
* connection is a resource that can be given to services such as CodePipeline to connect to a third-party repository
* such as Bitbucket. For example, you can add the connection in CodePipeline so that it triggers your pipeline when a
* code change is made to your third-party code repository. Each connection is named and associated with a unique ARN
* that is used to reference the connection.
* </p>
* <p>
* When you create a connection, the console initiates a third-party connection handshake. <i>Installations</i> are the
* apps that are used to conduct this handshake. For example, the installation for the Bitbucket provider type is the
* Bitbucket app. When you create a connection, you can choose an existing installation or create one.
* </p>
* <p>
* When you want to create a connection to an installed provider type such as GitHub Enterprise Server, you create a
* <i>host</i> for your connections.
* </p>
* <p>
* You can work with connections by calling:
* </p>
* <ul>
* <li>
* <p>
* <a>CreateConnection</a>, which creates a uniquely named connection that can be referenced by services such as
* CodePipeline.
* </p>
* </li>
* <li>
* <p>
* <a>DeleteConnection</a>, which deletes the specified connection.
* </p>
* </li>
* <li>
* <p>
* <a>GetConnection</a>, which returns information about the connection, including the connection status.
* </p>
* </li>
* <li>
* <p>
* <a>ListConnections</a>, which lists the connections associated with your account.
* </p>
* </li>
* </ul>
* <p>
* You can work with hosts by calling:
* </p>
* <ul>
* <li>
* <p>
* <a>CreateHost</a>, which creates a host that represents the infrastructure where your provider is installed.
* </p>
* </li>
* <li>
* <p>
* <a>DeleteHost</a>, which deletes the specified host.
* </p>
* </li>
* <li>
* <p>
* <a>GetHost</a>, which returns information about the host, including the setup status.
* </p>
* </li>
* <li>
* <p>
* <a>ListHosts</a>, which lists the hosts associated with your account.
* </p>
* </li>
* </ul>
* <p>
* You can work with tags in AWS CodeStar Connections by calling the following:
* </p>
* <ul>
* <li>
* <p>
* <a>ListTagsForResource</a>, which gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS
* CodeStar Connections.
* </p>
* </li>
* <li>
* <p>
* <a>TagResource</a>, which adds or updates tags for a resource in AWS CodeStar Connections.
* </p>
* </li>
* <li>
* <p>
* <a>UntagResource</a>, which removes tags for a resource in AWS CodeStar Connections.
* </p>
* </li>
* </ul>
* <p>
* For information about how to use AWS CodeStar Connections, see the <a
* href="https://docs.aws.amazon.com/dtconsole/latest/userguide/welcome-connections.html">Developer Tools User
* Guide</a>.
* </p>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public interface AWSCodeStarconnections {
/**
* The region metadata service name for computing region endpoints. You can use this value to retrieve metadata
* (such as supported regions) of the service.
*
* @see RegionUtils#getRegionsForService(String)
*/
String ENDPOINT_PREFIX = "codestar-connections";
/**
* <p>
* Creates a connection that can then be given to other AWS services like CodePipeline so that it can access
* third-party code repositories. The connection is in pending status until the third-party connection handshake is
* completed from the console.
* </p>
*
* @param createConnectionRequest
* @return Result of the CreateConnection operation returned by the service.
* @throws LimitExceededException
* Exceeded the maximum limit for connections.
* @throws ResourceNotFoundException
* Resource not found. Verify the connection resource ARN and try again.
* @throws ResourceUnavailableException
* Resource not found. Verify the ARN for the host resource and try again.
* @sample AWSCodeStarconnections.CreateConnection
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/codestar-connections-2019-12-01/CreateConnection"
* target="_top">AWS API Documentation</a>
*/
CreateConnectionResult createConnection(CreateConnectionRequest createConnectionRequest);
/**
* <p>
* Creates a resource that represents the infrastructure where a third-party provider is installed. The host is used
* when you create connections to an installed third-party provider type, such as GitHub Enterprise Server. You
* create one host for all connections to that provider.
* </p>
* <note>
* <p>
* A host created through the CLI or the SDK is in `PENDING` status by default. You can make its status `AVAILABLE`
* by setting up the host in the console.
* </p>
* </note>
*
* @param createHostRequest
* @return Result of the CreateHost operation returned by the service.
* @throws LimitExceededException
* Exceeded the maximum limit for connections.
* @sample AWSCodeStarconnections.CreateHost
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/codestar-connections-2019-12-01/CreateHost"
* target="_top">AWS API Documentation</a>
*/
CreateHostResult createHost(CreateHostRequest createHostRequest);
/**
* <p>
* The connection to be deleted.
* </p>
*
* @param deleteConnectionRequest
* @return Result of the DeleteConnection operation returned by the service.
* @throws ResourceNotFoundException
* Resource not found. Verify the connection resource ARN and try again.
* @sample AWSCodeStarconnections.DeleteConnection
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/codestar-connections-2019-12-01/DeleteConnection"
* target="_top">AWS API Documentation</a>
*/
DeleteConnectionResult deleteConnection(DeleteConnectionRequest deleteConnectionRequest);
/**
* <p>
* The host to be deleted. Before you delete a host, all connections associated to the host must be deleted.
* </p>
* <note>
* <p>
* A host cannot be deleted if it is in the VPC_CONFIG_INITIALIZING or VPC_CONFIG_DELETING state.
* </p>
* </note>
*
* @param deleteHostRequest
* @return Result of the DeleteHost operation returned by the service.
* @throws ResourceNotFoundException
* Resource not found. Verify the connection resource ARN and try again.
* @throws ResourceUnavailableException
* Resource not found. Verify the ARN for the host resource and try again.
* @sample AWSCodeStarconnections.DeleteHost
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/codestar-connections-2019-12-01/DeleteHost"
* target="_top">AWS API Documentation</a>
*/
DeleteHostResult deleteHost(DeleteHostRequest deleteHostRequest);
/**
* <p>
* Returns the connection ARN and details such as status, owner, and provider type.
* </p>
*
* @param getConnectionRequest
* @return Result of the GetConnection operation returned by the service.
* @throws ResourceNotFoundException
* Resource not found. Verify the connection resource ARN and try again.
* @throws ResourceUnavailableException
* Resource not found. Verify the ARN for the host resource and try again.
* @sample AWSCodeStarconnections.GetConnection
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/codestar-connections-2019-12-01/GetConnection"
* target="_top">AWS API Documentation</a>
*/
GetConnectionResult getConnection(GetConnectionRequest getConnectionRequest);
/**
* <p>
* Returns the host ARN and details such as status, provider type, endpoint, and, if applicable, the VPC
* configuration.
* </p>
*
* @param getHostRequest
* @return Result of the GetHost operation returned by the service.
* @throws ResourceNotFoundException
* Resource not found. Verify the connection resource ARN and try again.
* @throws ResourceUnavailableException
* Resource not found. Verify the ARN for the host resource and try again.
* @sample AWSCodeStarconnections.GetHost
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/codestar-connections-2019-12-01/GetHost" target="_top">AWS
* API Documentation</a>
*/
GetHostResult getHost(GetHostRequest getHostRequest);
/**
* <p>
* Lists the connections associated with your account.
* </p>
*
* @param listConnectionsRequest
* @return Result of the ListConnections operation returned by the service.
* @sample AWSCodeStarconnections.ListConnections
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/codestar-connections-2019-12-01/ListConnections"
* target="_top">AWS API Documentation</a>
*/
ListConnectionsResult listConnections(ListConnectionsRequest listConnectionsRequest);
/**
* <p>
* Lists the hosts associated with your account.
* </p>
*
* @param listHostsRequest
* @return Result of the ListHosts operation returned by the service.
* @sample AWSCodeStarconnections.ListHosts
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/codestar-connections-2019-12-01/ListHosts" target="_top">AWS
* API Documentation</a>
*/
ListHostsResult listHosts(ListHostsRequest listHostsRequest);
/**
* <p>
* Gets the set of key-value pairs (metadata) that are used to manage the resource.
* </p>
*
* @param listTagsForResourceRequest
* @return Result of the ListTagsForResource operation returned by the service.
* @throws ResourceNotFoundException
* Resource not found. Verify the connection resource ARN and try again.
* @sample AWSCodeStarconnections.ListTagsForResource
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/codestar-connections-2019-12-01/ListTagsForResource"
* target="_top">AWS API Documentation</a>
*/
ListTagsForResourceResult listTagsForResource(ListTagsForResourceRequest listTagsForResourceRequest);
/**
* <p>
* Adds to or modifies the tags of the given resource. Tags are metadata that can be used to manage a resource.
* </p>
*
* @param tagResourceRequest
* @return Result of the TagResource operation returned by the service.
* @throws ResourceNotFoundException
* Resource not found. Verify the connection resource ARN and try again.
* @throws LimitExceededException
* Exceeded the maximum limit for connections.
* @sample AWSCodeStarconnections.TagResource
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/codestar-connections-2019-12-01/TagResource"
* target="_top">AWS API Documentation</a>
*/
TagResourceResult tagResource(TagResourceRequest tagResourceRequest);
/**
* <p>
* Removes tags from an AWS resource.
* </p>
*
* @param untagResourceRequest
* @return Result of the UntagResource operation returned by the service.
* @throws ResourceNotFoundException
* Resource not found. Verify the connection resource ARN and try again.
* @sample AWSCodeStarconnections.UntagResource
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/codestar-connections-2019-12-01/UntagResource"
* target="_top">AWS API Documentation</a>
*/
UntagResourceResult untagResource(UntagResourceRequest untagResourceRequest);
/**
* <p>
* Updates a specified host with the provided configurations.
* </p>
*
* @param updateHostRequest
* @return Result of the UpdateHost operation returned by the service.
* @throws ConflictException
* Two conflicting operations have been made on the same resource.
* @throws ResourceNotFoundException
* Resource not found. Verify the connection resource ARN and try again.
* @throws ResourceUnavailableException
* Resource not found. Verify the ARN for the host resource and try again.
* @throws UnsupportedOperationException
* The operation is not supported. Check the connection status and try again.
* @sample AWSCodeStarconnections.UpdateHost
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/codestar-connections-2019-12-01/UpdateHost"
* target="_top">AWS API Documentation</a>
*/
UpdateHostResult updateHost(UpdateHostRequest updateHostRequest);
/**
* Shuts down this client object, releasing any resources that might be held open. This is an optional method, and
* callers are not expected to call it, but can if they want to explicitly release any open resources. Once a client
* has been shutdown, it should not be used to make any more requests.
*/
void shutdown();
/**
* Returns additional metadata for a previously executed successful request, typically used for debugging issues
* where a service isn't acting as expected. This data isn't considered part of the result data returned by an
* operation, so it's available through this separate, diagnostic interface.
* <p>
* Response metadata is only cached for a limited period of time, so if you need to access this extra diagnostic
* information for an executed request, you should use this method to retrieve it as soon as possible after
* executing a request.
*
* @param request
* The originally executed request.
*
* @return The response metadata for the specified request, or null if none is available.
*/
ResponseMetadata getCachedResponseMetadata(AmazonWebServiceRequest request);
}
|
package service_requests;
import org.junit.*;
import static org.junit.Assert.*;
import java.util.*;
import java.util.Date;
import java.util.stream.*;
import java.text.*;
import java.util.function.*;
import javax.persistence.*;
import javax.enterprise.context.*;
import javax.inject.*;
import javax.ejb.*;
import java.sql.*;
import util.PersistenceHelper;
import service_requests.*;
public class SystemAdministratorCRUDTest {
private SystemAdministratorService systemAdministratorService;
private EntityManager em;
private EntityTransaction tx;
@Before
public void initEM() {
this.em = util.PersistenceHelper.createTestSchema();
util.PersistenceHelper.setEntityManager(em);
this.tx = this.em.getTransaction();
this.tx.begin();
this.systemAdministratorService = new SystemAdministratorService();
}
@After
public void tearDown() {
if (tx != null)
tx.rollback();
if (em != null)
em.close();
}
@Test
public void create() {
SystemAdministrator toCreate = new SystemAdministrator();
toCreate.setFirstName("");
toCreate.setLastName("");
SystemAdministrator created = systemAdministratorService.create(toCreate);
Object id = created.getId();
assertNotNull(id);
PersistenceHelper.flush(true);
SystemAdministrator retrieved = systemAdministratorService.find(id);
assertNotNull(retrieved);
assertEquals(id, retrieved.getId());
assertEquals(created.getFirstName(), retrieved.getFirstName());
assertEquals(created.getLastName(), retrieved.getLastName());
}
@Test
public void retrieve() {
SystemAdministrator toCreate1 = new SystemAdministrator();
toCreate1.setFirstName("");
toCreate1.setLastName("");
systemAdministratorService.create(toCreate1);
SystemAdministrator toCreate2 = new SystemAdministrator();
toCreate2.setFirstName("");
toCreate2.setLastName("");
systemAdministratorService.create(toCreate2);
PersistenceHelper.flush(true);
SystemAdministrator retrieved1 = systemAdministratorService.find(toCreate1.getId());
assertNotNull(retrieved1);
assertEquals(toCreate1.getId(), retrieved1.getId());
SystemAdministrator retrieved2 = systemAdministratorService.find(toCreate2.getId());
assertNotNull(retrieved2);
assertEquals(toCreate2.getId(), retrieved2.getId());
}
@Test
public void update() {
SystemAdministrator toCreate = new SystemAdministrator();
toCreate.setFirstName("");
toCreate.setLastName("");
Object id = systemAdministratorService.create(toCreate).getId();
PersistenceHelper.flush(true);
SystemAdministrator retrieved = systemAdministratorService.find(id);
String originalValue = retrieved.getFirstName();
retrieved.setFirstName("A string value");
systemAdministratorService.update(retrieved);
PersistenceHelper.flush(true);
SystemAdministrator updated = systemAdministratorService.find(id);
assertNotEquals(originalValue, updated.getFirstName());
}
@Test
public void delete() {
SystemAdministrator toDelete = new SystemAdministrator();
toDelete.setFirstName("");
toDelete.setLastName("");
Object id = systemAdministratorService.create(toDelete).getId();
assertNotNull(systemAdministratorService.find(id));
systemAdministratorService.delete(id);
assertNull(systemAdministratorService.find(id));
}
}
|
/*
* This class is distributed as part of the Botania Mod.
* Get the Source Code in github:
* https://github.com/Vazkii/Botania
*
* Botania is Open Source and distributed under the
* Botania License: http://botaniamod.net/license.php
*/
package vazkii.botania.common.block.subtile.generating;
import net.minecraft.core.BlockPos;
import net.minecraft.sounds.SoundSource;
import net.minecraft.tags.FluidTags;
import net.minecraft.tags.Tag;
import net.minecraft.world.level.block.state.BlockState;
import net.minecraft.world.level.material.Fluid;
import vazkii.botania.client.fx.WispParticleData;
import vazkii.botania.common.block.ModSubtiles;
import vazkii.botania.common.core.handler.ModSounds;
public class SubTileThermalily extends SubTileHydroangeas {
public SubTileThermalily(BlockPos pos, BlockState state) {
super(ModSubtiles.THERMALILY, pos, state);
}
@Override
public int getColor() {
return 0xD03C00;
}
@Override
public void doBurnParticles() {
WispParticleData data = WispParticleData.wisp((float) Math.random() / 6, 0.7F, 0.05F, 0.05F, 1);
level.addParticle(data, getEffectivePos().getX() + 0.55 + Math.random() * 0.2 - 0.1, getEffectivePos().getY() + 0.9 + Math.random() * 0.2 - 0.1, getEffectivePos().getZ() + 0.5, 0, (float) Math.random() / 60, 0);
}
@Override
public Tag<Fluid> getMaterialToSearchFor() {
return FluidTags.LAVA;
}
@Override
public void playSound() {
getLevel().playSound(null, getEffectivePos(), ModSounds.thermalily, SoundSource.BLOCKS, 1F, 1F);
}
@Override
public int getDelayBetweenPassiveGeneration() {
return 1;
}
@Override
public int getBurnTime() {
return 900;
}
@Override
public int getValueForPassiveGeneration() {
return 20;
}
@Override
public int getMaxMana() {
return 500;
}
@Override
public int getCooldown() {
return 6000;
}
@Override
public boolean isPassiveFlower() {
return false;
}
}
|
package io.ebean;
import javax.persistence.PersistenceException;
/**
* Captures and wraps IOException's occurring during ElasticSearch processing etc.
*/
public class PersistenceIOException extends PersistenceException {
private static final long serialVersionUID = -7630050437148176148L;
public PersistenceIOException(String msg, Exception cause) {
super(msg, cause);
}
public PersistenceIOException(Exception cause) {
super(cause);
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.configuration;
import java.io.Serializable;
import java.lang.management.ManagementFactory;
import java.util.Map;
import java.util.UUID;
import javax.cache.configuration.Factory;
import javax.cache.event.CacheEntryListener;
import javax.cache.expiry.ExpiryPolicy;
import javax.cache.integration.CacheLoader;
import javax.cache.processor.EntryProcessor;
import javax.management.MBeanServer;
import javax.net.ssl.SSLContext;
import org.apache.ignite.IgniteLogger;
import org.apache.ignite.IgniteSystemProperties;
import org.apache.ignite.Ignition;
import org.apache.ignite.cache.CacheKeyConfiguration;
import org.apache.ignite.cache.store.CacheStoreSessionListener;
import org.apache.ignite.cluster.ClusterGroup;
import org.apache.ignite.cluster.ClusterNode;
import org.apache.ignite.compute.ComputeJob;
import org.apache.ignite.compute.ComputeTask;
import org.apache.ignite.events.Event;
import org.apache.ignite.events.EventType;
import org.apache.ignite.internal.managers.eventstorage.GridEventStorageManager;
import org.apache.ignite.internal.util.typedef.internal.S;
import org.apache.ignite.lang.IgniteInClosure;
import org.apache.ignite.lang.IgnitePredicate;
import org.apache.ignite.lifecycle.LifecycleBean;
import org.apache.ignite.lifecycle.LifecycleEventType;
import org.apache.ignite.marshaller.Marshaller;
import org.apache.ignite.plugin.PluginConfiguration;
import org.apache.ignite.plugin.PluginProvider;
import org.apache.ignite.plugin.segmentation.SegmentationPolicy;
import org.apache.ignite.plugin.segmentation.SegmentationResolver;
import org.apache.ignite.services.ServiceConfiguration;
import org.apache.ignite.spi.checkpoint.CheckpointSpi;
import org.apache.ignite.spi.checkpoint.noop.NoopCheckpointSpi;
import org.apache.ignite.spi.collision.CollisionSpi;
import org.apache.ignite.spi.collision.noop.NoopCollisionSpi;
import org.apache.ignite.spi.communication.CommunicationSpi;
import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
import org.apache.ignite.spi.deployment.DeploymentSpi;
import org.apache.ignite.spi.deployment.local.LocalDeploymentSpi;
import org.apache.ignite.spi.discovery.DiscoverySpi;
import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
import org.apache.ignite.spi.eventstorage.EventStorageSpi;
import org.apache.ignite.spi.eventstorage.memory.MemoryEventStorageSpi;
import org.apache.ignite.spi.failover.FailoverSpi;
import org.apache.ignite.spi.failover.always.AlwaysFailoverSpi;
import org.apache.ignite.spi.indexing.IndexingSpi;
import org.apache.ignite.spi.loadbalancing.LoadBalancingSpi;
import org.apache.ignite.spi.loadbalancing.roundrobin.RoundRobinLoadBalancingSpi;
import org.apache.ignite.spi.swapspace.SwapSpaceSpi;
import org.apache.ignite.spi.swapspace.file.FileSwapSpaceSpi;
import org.apache.ignite.ssl.SslContextFactory;
import static org.apache.ignite.plugin.segmentation.SegmentationPolicy.STOP;
/**
* This class defines grid runtime configuration. This configuration is passed to
* {@link Ignition#start(IgniteConfiguration)} method. It defines all configuration
* parameters required to start a grid instance. Usually, a special
* class called "loader" will create an instance of this interface and apply
* {@link Ignition#start(IgniteConfiguration)} method to initialize Ignite instance.
* <p>
* Note that you should only set values that differ from defaults, as grid
* will automatically pick default values for all values that are not set.
* <p>
* For more information about grid configuration and startup refer to {@link Ignition}
* documentation.
*/
public class IgniteConfiguration {
/** Courtesy notice log category. */
public static final String COURTESY_LOGGER_NAME = "org.apache.ignite.CourtesyConfigNotice";
/**
* Default flag for peer class loading. By default the value is {@code false}
* which means that peer class loading is disabled.
*/
public static final boolean DFLT_P2P_ENABLED = false;
/** Default metrics history size (value is {@code 10000}). */
public static final int DFLT_METRICS_HISTORY_SIZE = 10000;
/** Default metrics update frequency. */
public static final long DFLT_METRICS_UPDATE_FREQ = 2000;
/**
* Default metrics expire time. The value is {@link Long#MAX_VALUE} which
* means that metrics never expire.
*/
public static final long DFLT_METRICS_EXPIRE_TIME = Long.MAX_VALUE;
/** Default maximum timeout to wait for network responses in milliseconds (value is {@code 5,000ms}). */
public static final long DFLT_NETWORK_TIMEOUT = 5000;
/** Default interval between message send retries. */
public static final long DFLT_SEND_RETRY_DELAY = 1000;
/** Default message send retries count. */
public static final int DFLT_SEND_RETRY_CNT = 3;
/** Default number of clock sync samples. */
public static final int DFLT_CLOCK_SYNC_SAMPLES = 8;
/** Default clock synchronization frequency. */
public static final int DFLT_CLOCK_SYNC_FREQUENCY = 120000;
/** Default discovery startup delay in milliseconds (value is {@code 60,000ms}). */
public static final long DFLT_DISCOVERY_STARTUP_DELAY = 60000;
/** Default deployment mode (value is {@link DeploymentMode#SHARED}). */
public static final DeploymentMode DFLT_DEPLOYMENT_MODE = DeploymentMode.SHARED;
/** Default cache size for missed resources. */
public static final int DFLT_P2P_MISSED_RESOURCES_CACHE_SIZE = 100;
/** Default time server port base. */
public static final int DFLT_TIME_SERVER_PORT_BASE = 31100;
/** Default time server port range. */
public static final int DFLT_TIME_SERVER_PORT_RANGE = 100;
/** Default core size of public thread pool. */
public static final int AVAILABLE_PROC_CNT = Runtime.getRuntime().availableProcessors();
/** Default core size of public thread pool. */
public static final int DFLT_PUBLIC_THREAD_CNT = Math.max(8, AVAILABLE_PROC_CNT) * 2;
/** Default keep alive time for public thread pool. */
public static final long DFLT_PUBLIC_KEEP_ALIVE_TIME = 0;
/** Default limit of threads used for rebalance. */
public static final int DFLT_REBALANCE_THREAD_POOL_SIZE = 1;
/** Default max queue capacity of public thread pool. */
public static final int DFLT_PUBLIC_THREADPOOL_QUEUE_CAP = Integer.MAX_VALUE;
/** Default size of system thread pool. */
public static final int DFLT_SYSTEM_CORE_THREAD_CNT = DFLT_PUBLIC_THREAD_CNT;
/** Default max size of system thread pool. */
public static final int DFLT_SYSTEM_MAX_THREAD_CNT = DFLT_PUBLIC_THREAD_CNT;
/** Default keep alive time for system thread pool. */
public static final long DFLT_SYSTEM_KEEP_ALIVE_TIME = 0;
/** Default keep alive time for utility thread pool. */
public static final long DFLT_UTILITY_KEEP_ALIVE_TIME = 10_000;
/** Default max queue capacity of system thread pool. */
public static final int DFLT_SYSTEM_THREADPOOL_QUEUE_CAP = Integer.MAX_VALUE;
/** Default size of peer class loading thread pool. */
public static final int DFLT_P2P_THREAD_CNT = 2;
/** Default size of management thread pool. */
public static final int DFLT_MGMT_THREAD_CNT = 4;
/** Default segmentation policy. */
public static final SegmentationPolicy DFLT_SEG_PLC = STOP;
/** Default value for wait for segment on startup flag. */
public static final boolean DFLT_WAIT_FOR_SEG_ON_START = true;
/** Default value for all segmentation resolvers pass required. */
public static final boolean DFLT_ALL_SEG_RESOLVERS_PASS_REQ = true;
/** Default value segmentation resolve attempts count. */
public static final int DFLT_SEG_RESOLVE_ATTEMPTS = 2;
/** Default segment check frequency in discovery manager. */
public static final long DFLT_SEG_CHK_FREQ = 10000;
/** Default frequency of metrics log print out. */
public static final long DFLT_METRICS_LOG_FREQ = 60000;
/** Default TCP server port. */
public static final int DFLT_TCP_PORT = 11211;
/** Default marshal local jobs flag. */
public static final boolean DFLT_MARSHAL_LOCAL_JOBS = false;
/** Default value for cache sanity check enabled flag. */
public static final boolean DFLT_CACHE_SANITY_CHECK_ENABLED = true;
/** Default failure detection timeout in millis. */
@SuppressWarnings("UnnecessaryBoxing")
public static final Long DFLT_FAILURE_DETECTION_TIMEOUT = new Long(10_000);
/** Optional grid name. */
private String gridName;
/** User attributes. */
private Map<String, ?> userAttrs;
/** Logger. */
private IgniteLogger log;
/** Public pool size. */
private int pubPoolSize = DFLT_PUBLIC_THREAD_CNT;
/** System pool size. */
private int sysPoolSize = DFLT_SYSTEM_CORE_THREAD_CNT;
/** Management pool size. */
private int mgmtPoolSize = DFLT_MGMT_THREAD_CNT;
/** IGFS pool size. */
private int igfsPoolSize = AVAILABLE_PROC_CNT;
/** Utility cache pool size. */
private int utilityCachePoolSize = DFLT_SYSTEM_CORE_THREAD_CNT;
/** Utility cache pool keep alive time. */
private long utilityCacheKeepAliveTime = DFLT_UTILITY_KEEP_ALIVE_TIME;
/** Marshaller pool size. */
private int marshCachePoolSize = DFLT_SYSTEM_CORE_THREAD_CNT;
/** Marshaller pool keep alive time. */
private long marshCacheKeepAliveTime = DFLT_UTILITY_KEEP_ALIVE_TIME;
/** P2P pool size. */
private int p2pPoolSize = DFLT_P2P_THREAD_CNT;
/** Ignite installation folder. */
private String ggHome;
/** Ignite work folder. */
private String ggWork;
/** MBean server. */
private MBeanServer mbeanSrv;
/** Local node ID. */
private UUID nodeId;
/** Marshaller. */
private Marshaller marsh;
/** Marshal local jobs. */
private boolean marshLocJobs = DFLT_MARSHAL_LOCAL_JOBS;
/** Daemon flag. */
private boolean daemon;
/** Whether or not peer class loading is enabled. */
private boolean p2pEnabled = DFLT_P2P_ENABLED;
/** List of package prefixes from the system class path that should be P2P loaded. */
private String[] p2pLocClsPathExcl;
/** Events of these types should be recorded. */
private int[] inclEvtTypes;
/** Maximum network requests timeout. */
private long netTimeout = DFLT_NETWORK_TIMEOUT;
/** Interval between message send retries. */
private long sndRetryDelay = DFLT_SEND_RETRY_DELAY;
/** Message send retries delay. */
private int sndRetryCnt = DFLT_SEND_RETRY_CNT;
/** Number of samples for clock synchronization. */
private int clockSyncSamples = DFLT_CLOCK_SYNC_SAMPLES;
/** Clock synchronization frequency. */
private long clockSyncFreq = DFLT_CLOCK_SYNC_FREQUENCY;
/** Metrics history time. */
private int metricsHistSize = DFLT_METRICS_HISTORY_SIZE;
/** Full metrics enabled flag. */
private long metricsUpdateFreq = DFLT_METRICS_UPDATE_FREQ;
/** Metrics expire time. */
private long metricsExpTime = DFLT_METRICS_EXPIRE_TIME;
/** Collection of life-cycle beans. */
private LifecycleBean[] lifecycleBeans;
/** Discovery SPI. */
private DiscoverySpi discoSpi;
/** Segmentation policy. */
private SegmentationPolicy segPlc = DFLT_SEG_PLC;
/** Segmentation resolvers. */
private SegmentationResolver[] segResolvers;
/** Segmentation resolve attempts count. */
private int segResolveAttempts = DFLT_SEG_RESOLVE_ATTEMPTS;
/** Wait for segment on startup flag. */
private boolean waitForSegOnStart = DFLT_WAIT_FOR_SEG_ON_START;
/** All segmentation resolvers pass required flag. */
private boolean allResolversPassReq = DFLT_ALL_SEG_RESOLVERS_PASS_REQ;
/** Segment check frequency. */
private long segChkFreq = DFLT_SEG_CHK_FREQ;
/** Communication SPI. */
private CommunicationSpi commSpi;
/** Event storage SPI. */
private EventStorageSpi evtSpi;
/** Collision SPI. */
private CollisionSpi colSpi;
/** Deployment SPI. */
private DeploymentSpi deploySpi;
/** Checkpoint SPI. */
private CheckpointSpi[] cpSpi;
/** Failover SPI. */
private FailoverSpi[] failSpi;
/** Load balancing SPI. */
private LoadBalancingSpi[] loadBalancingSpi;
/** Checkpoint SPI. */
private SwapSpaceSpi swapSpaceSpi;
/** Indexing SPI. */
private IndexingSpi indexingSpi;
/** Address resolver. */
private AddressResolver addrRslvr;
/** Cache configurations. */
private CacheConfiguration[] cacheCfg;
/** Client mode flag. */
private Boolean clientMode;
/** Rebalance thread pool size. */
private int rebalanceThreadPoolSize = DFLT_REBALANCE_THREAD_POOL_SIZE;
/** Transactions configuration. */
private TransactionConfiguration txCfg = new TransactionConfiguration();
/** */
private PluginConfiguration[] pluginCfgs;
/** Flag indicating whether cache sanity check is enabled. */
private boolean cacheSanityCheckEnabled = DFLT_CACHE_SANITY_CHECK_ENABLED;
/** Discovery startup delay. */
private long discoStartupDelay = DFLT_DISCOVERY_STARTUP_DELAY;
/** Tasks classes sharing mode. */
private DeploymentMode deployMode = DFLT_DEPLOYMENT_MODE;
/** Cache size of missed resources. */
private int p2pMissedCacheSize = DFLT_P2P_MISSED_RESOURCES_CACHE_SIZE;
/** Local host. */
private String locHost;
/** Base port number for time server. */
private int timeSrvPortBase = DFLT_TIME_SERVER_PORT_BASE;
/** Port number range for time server. */
private int timeSrvPortRange = DFLT_TIME_SERVER_PORT_RANGE;
/** Failure detection timeout. */
private Long failureDetectionTimeout = DFLT_FAILURE_DETECTION_TIMEOUT;
/** Property names to include into node attributes. */
private String[] includeProps;
/** Frequency of metrics log print out. */
@SuppressWarnings("RedundantFieldInitialization")
private long metricsLogFreq = DFLT_METRICS_LOG_FREQ;
/** Local event listeners. */
private Map<IgnitePredicate<? extends Event>, int[]> lsnrs;
/** IGFS configuration. */
private FileSystemConfiguration[] igfsCfg;
/** Service configuration. */
private ServiceConfiguration[] svcCfgs;
/** Hadoop configuration. */
private HadoopConfiguration hadoopCfg;
/** Client access configuration. */
private ConnectorConfiguration connectorCfg = new ConnectorConfiguration();
/** Warmup closure. Will be invoked before actual grid start. */
private IgniteInClosure<IgniteConfiguration> warmupClos;
/** */
private AtomicConfiguration atomicCfg = new AtomicConfiguration();
/** User's class loader. */
private ClassLoader classLdr;
/** Cache store session listeners. */
private Factory<CacheStoreSessionListener>[] storeSesLsnrs;
/** Consistent globally unique node ID which survives node restarts. */
private Serializable consistentId;
/** SSL connection factory. */
private Factory<SSLContext> sslCtxFactory;
/** Platform configuration. */
private PlatformConfiguration platformCfg;
/** Cache key configuration. */
private CacheKeyConfiguration[] cacheKeyCfg;
/** */
private BinaryConfiguration binaryCfg;
/**
* Creates valid grid configuration with all default values.
*/
public IgniteConfiguration() {
// No-op.
}
/**
* Creates grid configuration by coping all configuration properties from
* given configuration.
*
* @param cfg Grid configuration to copy from.
*/
public IgniteConfiguration(IgniteConfiguration cfg) {
assert cfg != null;
// SPIs.
discoSpi = cfg.getDiscoverySpi();
commSpi = cfg.getCommunicationSpi();
deploySpi = cfg.getDeploymentSpi();
evtSpi = cfg.getEventStorageSpi();
cpSpi = cfg.getCheckpointSpi();
colSpi = cfg.getCollisionSpi();
failSpi = cfg.getFailoverSpi();
loadBalancingSpi = cfg.getLoadBalancingSpi();
indexingSpi = cfg.getIndexingSpi();
swapSpaceSpi = cfg.getSwapSpaceSpi();
/*
* Order alphabetically for maintenance purposes.
*/
addrRslvr = cfg.getAddressResolver();
allResolversPassReq = cfg.isAllSegmentationResolversPassRequired();
atomicCfg = cfg.getAtomicConfiguration();
binaryCfg = cfg.getBinaryConfiguration();
daemon = cfg.isDaemon();
cacheCfg = cfg.getCacheConfiguration();
cacheKeyCfg = cfg.getCacheKeyConfiguration();
cacheSanityCheckEnabled = cfg.isCacheSanityCheckEnabled();
connectorCfg = cfg.getConnectorConfiguration();
classLdr = cfg.getClassLoader();
clientMode = cfg.isClientMode();
clockSyncFreq = cfg.getClockSyncFrequency();
clockSyncSamples = cfg.getClockSyncSamples();
consistentId = cfg.getConsistentId();
deployMode = cfg.getDeploymentMode();
discoStartupDelay = cfg.getDiscoveryStartupDelay();
failureDetectionTimeout = cfg.getFailureDetectionTimeout();
ggHome = cfg.getIgniteHome();
ggWork = cfg.getWorkDirectory();
gridName = cfg.getGridName();
igfsCfg = cfg.getFileSystemConfiguration();
igfsPoolSize = cfg.getIgfsThreadPoolSize();
hadoopCfg = cfg.getHadoopConfiguration();
inclEvtTypes = cfg.getIncludeEventTypes();
includeProps = cfg.getIncludeProperties();
lifecycleBeans = cfg.getLifecycleBeans();
locHost = cfg.getLocalHost();
log = cfg.getGridLogger();
lsnrs = cfg.getLocalEventListeners();
marsh = cfg.getMarshaller();
marshLocJobs = cfg.isMarshalLocalJobs();
marshCacheKeepAliveTime = cfg.getMarshallerCacheKeepAliveTime();
marshCachePoolSize = cfg.getMarshallerCacheThreadPoolSize();
mbeanSrv = cfg.getMBeanServer();
metricsHistSize = cfg.getMetricsHistorySize();
metricsExpTime = cfg.getMetricsExpireTime();
metricsLogFreq = cfg.getMetricsLogFrequency();
metricsUpdateFreq = cfg.getMetricsUpdateFrequency();
mgmtPoolSize = cfg.getManagementThreadPoolSize();
netTimeout = cfg.getNetworkTimeout();
nodeId = cfg.getNodeId();
p2pEnabled = cfg.isPeerClassLoadingEnabled();
p2pLocClsPathExcl = cfg.getPeerClassLoadingLocalClassPathExclude();
p2pMissedCacheSize = cfg.getPeerClassLoadingMissedResourcesCacheSize();
p2pPoolSize = cfg.getPeerClassLoadingThreadPoolSize();
platformCfg = cfg.getPlatformConfiguration();
pluginCfgs = cfg.getPluginConfigurations();
pubPoolSize = cfg.getPublicThreadPoolSize();
rebalanceThreadPoolSize = cfg.getRebalanceThreadPoolSize();
segChkFreq = cfg.getSegmentCheckFrequency();
segPlc = cfg.getSegmentationPolicy();
segResolveAttempts = cfg.getSegmentationResolveAttempts();
segResolvers = cfg.getSegmentationResolvers();
sndRetryCnt = cfg.getNetworkSendRetryCount();
sndRetryDelay = cfg.getNetworkSendRetryDelay();
sslCtxFactory = cfg.getSslContextFactory();
storeSesLsnrs = cfg.getCacheStoreSessionListenerFactories();
svcCfgs = cfg.getServiceConfiguration();
sysPoolSize = cfg.getSystemThreadPoolSize();
timeSrvPortBase = cfg.getTimeServerPortBase();
timeSrvPortRange = cfg.getTimeServerPortRange();
txCfg = cfg.getTransactionConfiguration();
userAttrs = cfg.getUserAttributes();
utilityCacheKeepAliveTime = cfg.getUtilityCacheKeepAliveTime();
utilityCachePoolSize = cfg.getUtilityCacheThreadPoolSize();
waitForSegOnStart = cfg.isWaitForSegmentOnStart();
warmupClos = cfg.getWarmupClosure();
}
/**
* Gets optional grid name. Returns {@code null} if non-default grid name was not
* provided.
*
* @return Optional grid name. Can be {@code null}, which is default grid name, if
* non-default grid name was not provided.
*/
public String getGridName() {
return gridName;
}
/**
* Whether or not this node should be a daemon node.
* <p>
* Daemon nodes are the usual grid nodes that participate in topology but not
* visible on the main APIs, i.e. they are not part of any cluster groups. The only
* way to see daemon nodes is to use {@link ClusterGroup#forDaemons()} method.
* <p>
* Daemon nodes are used primarily for management and monitoring functionality that
* is build on Ignite and needs to participate in the topology, but also needs to be
* excluded from the "normal" topology, so that it won't participate in the task execution
* or in-memory data grid storage.
*
* @return {@code True} if this node should be a daemon node, {@code false} otherwise.
* @see ClusterGroup#forDaemons()
*/
public boolean isDaemon() {
return daemon;
}
/**
* Sets daemon flag.
* <p>
* Daemon nodes are the usual grid nodes that participate in topology but not
* visible on the main APIs, i.e. they are not part of any cluster group. The only
* way to see daemon nodes is to use {@link ClusterGroup#forDaemons()} method.
* <p>
* Daemon nodes are used primarily for management and monitoring functionality that
* is build on Ignite and needs to participate in the topology, but also needs to be
* excluded from the "normal" topology, so that it won't participate in the task execution
* or in-memory data grid storage.
*
* @param daemon Daemon flag.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setDaemon(boolean daemon) {
this.daemon = daemon;
return this;
}
/**
* Sets grid name. Note that {@code null} is a default grid name.
*
* @param gridName Grid name to set. Can be {@code null}, which is default
* grid name.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setGridName(String gridName) {
this.gridName = gridName;
return this;
}
/**
* Sets consistent globally unique node ID which survives node restarts.
*
* @param consistentId Node consistent ID.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setConsistentId(Serializable consistentId) {
this.consistentId = consistentId;
return this;
}
/**
* Gets consistent globally unique node ID which survives node restarts.
*
* @return Node consistent ID.
*/
public Serializable getConsistentId() {
return consistentId;
}
/**
* Should return any user-defined attributes to be added to this node. These attributes can
* then be accessed on nodes by calling {@link ClusterNode#attribute(String)} or
* {@link ClusterNode#attributes()} methods.
* <p>
* Note that system adds the following (among others) attributes automatically:
* <ul>
* <li>{@code {@link System#getProperties()}} - All system properties.</li>
* <li>{@code {@link System#getenv(String)}} - All environment properties.</li>
* </ul>
* <p>
* Note that grid will add all System properties and environment properties
* to grid node attributes also. SPIs may also add node attributes that are
* used for SPI implementation.
* <p>
* <b>NOTE:</b> attributes names starting with {@code org.apache.ignite} are reserved
* for internal use.
*
* @return User defined attributes for this node.
*/
public Map<String, ?> getUserAttributes() {
return userAttrs;
}
/**
* Sets user attributes for this node.
*
* @param userAttrs User attributes for this node.
* @see IgniteConfiguration#getUserAttributes()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setUserAttributes(Map<String, ?> userAttrs) {
this.userAttrs = userAttrs;
return this;
}
/**
* Should return an instance of logger to use in grid. If not provided,
* {@ignitelink org.apache.ignite.logger.log4j.Log4JLogger}
* will be used.
*
* @return Logger to use in grid.
*/
public IgniteLogger getGridLogger() {
return log;
}
/**
* Sets logger to use within grid.
*
* @param log Logger to use within grid.
* @see IgniteConfiguration#getGridLogger()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setGridLogger(IgniteLogger log) {
this.log = log;
return this;
}
/**
* Should return a thread pool size to be used in grid.
* This executor service will be in charge of processing {@link ComputeJob GridJobs}
* and user messages sent to node.
* <p>
* If not provided, executor service will have size {@link #DFLT_PUBLIC_THREAD_CNT}.
*
* @return Thread pool size to be used in grid to process job execution
* requests and user messages sent to the node.
*/
public int getPublicThreadPoolSize() {
return pubPoolSize;
}
/**
* Size of thread pool that is in charge of processing internal system messages.
* <p>
* If not provided, executor service will have size {@link #DFLT_SYSTEM_CORE_THREAD_CNT}.
*
* @return Thread pool size to be used in grid for internal system messages.
*/
public int getSystemThreadPoolSize() {
return sysPoolSize;
}
/**
* Size of thread pool that is in charge of processing internal and Visor
* {@link ComputeJob GridJobs}.
* <p>
* If not provided, executor service will have size {@link #DFLT_MGMT_THREAD_CNT}
*
* @return Thread pool size to be used in grid for internal and Visor
* jobs processing.
*/
public int getManagementThreadPoolSize() {
return mgmtPoolSize;
}
/**
* Size of thread pool which is in charge of peer class loading requests/responses. If you don't use
* peer class loading and use GAR deployment only we would recommend to decrease
* the value of total threads to {@code 1}.
* <p>
* If not provided, executor service will have size {@link #DFLT_P2P_THREAD_CNT}.
*
* @return Thread pool size to be used for peer class loading
* requests handling.
*/
public int getPeerClassLoadingThreadPoolSize() {
return p2pPoolSize;
}
/**
* Size of thread pool that is in charge of processing outgoing IGFS messages.
* <p>
* If not provided, executor service will have size equals number of processors available in system.
*
* @return Thread pool size to be used for IGFS outgoing message sending.
*/
public int getIgfsThreadPoolSize() {
return igfsPoolSize;
}
/**
* Default size of thread pool that is in charge of processing utility cache messages.
* <p>
* If not provided, executor service will have size {@link #DFLT_SYSTEM_CORE_THREAD_CNT}.
*
* @return Default thread pool size to be used in grid for utility cache messages.
*/
public int getUtilityCacheThreadPoolSize() {
return utilityCachePoolSize;
}
/**
* Keep alive time of thread pool that is in charge of processing utility cache messages.
* <p>
* If not provided, executor service will have keep alive time {@link #DFLT_UTILITY_KEEP_ALIVE_TIME}.
*
* @return Thread pool keep alive time (in milliseconds) to be used in grid for utility cache messages.
*/
public long getUtilityCacheKeepAliveTime() {
return utilityCacheKeepAliveTime;
}
/**
* Default size of thread pool that is in charge of processing marshaller messages.
* <p>
* If not provided, executor service will have size {@link #DFLT_SYSTEM_CORE_THREAD_CNT}.
*
* @return Default thread pool size to be used in grid for marshaller messages.
*/
public int getMarshallerCacheThreadPoolSize() {
return marshCachePoolSize;
}
/**
* Keep alive time of thread pool that is in charge of processing marshaller messages.
* <p>
* If not provided, executor service will have keep alive time {@link #DFLT_UTILITY_KEEP_ALIVE_TIME}.
*
* @return Thread pool keep alive time (in milliseconds) to be used in grid for marshaller messages.
*/
public long getMarshallerCacheKeepAliveTime() {
return marshCacheKeepAliveTime;
}
/**
* Sets thread pool size to use within grid.
*
* @param poolSize Thread pool size to use within grid.
* @see IgniteConfiguration#getPublicThreadPoolSize()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setPublicThreadPoolSize(int poolSize) {
pubPoolSize = poolSize;
return this;
}
/**
* Sets system thread pool size to use within grid.
*
* @param poolSize Thread pool size to use within grid.
* @see IgniteConfiguration#getSystemThreadPoolSize()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setSystemThreadPoolSize(int poolSize) {
sysPoolSize = poolSize;
return this;
}
/**
* Sets management thread pool size to use within grid.
*
* @param poolSize Thread pool size to use within grid.
* @see IgniteConfiguration#getManagementThreadPoolSize()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setManagementThreadPoolSize(int poolSize) {
mgmtPoolSize = poolSize;
return this;
}
/**
* Sets thread pool size to use for peer class loading.
*
* @param poolSize Thread pool size to use within grid.
* @see IgniteConfiguration#getPeerClassLoadingThreadPoolSize()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setPeerClassLoadingThreadPoolSize(int poolSize) {
p2pPoolSize = poolSize;
return this;
}
/**
* Set thread pool size that will be used to process outgoing IGFS messages.
*
* @param poolSize Executor service to use for outgoing IGFS messages.
* @see IgniteConfiguration#getIgfsThreadPoolSize()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setIgfsThreadPoolSize(int poolSize) {
igfsPoolSize = poolSize;
return this;
}
/**
* Sets default thread pool size that will be used to process utility cache messages.
*
* @param poolSize Default executor service size to use for utility cache messages.
* @see IgniteConfiguration#getUtilityCacheThreadPoolSize()
* @see IgniteConfiguration#getUtilityCacheKeepAliveTime()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setUtilityCachePoolSize(int poolSize) {
utilityCachePoolSize = poolSize;
return this;
}
/**
* Sets keep alive time of thread pool size that will be used to process utility cache messages.
*
* @param keepAliveTime Keep alive time of executor service to use for utility cache messages.
* @see IgniteConfiguration#getUtilityCacheThreadPoolSize()
* @see IgniteConfiguration#getUtilityCacheKeepAliveTime()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setUtilityCacheKeepAliveTime(long keepAliveTime) {
utilityCacheKeepAliveTime = keepAliveTime;
return this;
}
/**
* Sets default thread pool size that will be used to process marshaller messages.
*
* @param poolSize Default executor service size to use for marshaller messages.
* @see IgniteConfiguration#getMarshallerCacheThreadPoolSize()
* @see IgniteConfiguration#getMarshallerCacheKeepAliveTime()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setMarshallerCachePoolSize(int poolSize) {
marshCachePoolSize = poolSize;
return this;
}
/**
* Sets maximum thread pool size that will be used to process marshaller messages.
*
* @param keepAliveTime Keep alive time of executor service to use for marshaller messages.
* @see IgniteConfiguration#getMarshallerCacheThreadPoolSize()
* @see IgniteConfiguration#getMarshallerCacheKeepAliveTime()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setMarshallerCacheKeepAliveTime(long keepAliveTime) {
marshCacheKeepAliveTime = keepAliveTime;
return this;
}
/**
* Should return Ignite installation home folder. If not provided, the system will check
* {@code IGNITE_HOME} system property and environment variable in that order. If
* {@code IGNITE_HOME} still could not be obtained, then grid will not start and exception
* will be thrown.
*
* @return Ignite installation home or {@code null} to make the system attempt to
* infer it automatically.
* @see IgniteSystemProperties#IGNITE_HOME
*/
public String getIgniteHome() {
return ggHome;
}
/**
* Sets Ignite installation folder.
*
* @param ggHome {@code Ignition} installation folder.
* @see IgniteConfiguration#getIgniteHome()
* @see IgniteSystemProperties#IGNITE_HOME
* @return {@code this} for chaining.
*/
public IgniteConfiguration setIgniteHome(String ggHome) {
this.ggHome = ggHome;
return this;
}
/**
* Gets Ignite work folder. If not provided, the method will use work folder under
* {@code IGNITE_HOME} specified by {@link IgniteConfiguration#setIgniteHome(String)} or
* {@code IGNITE_HOME} environment variable or system property.
* <p>
* If {@code IGNITE_HOME} is not provided, then system temp folder is used.
*
* @return Ignite work folder or {@code null} to make the system attempt to infer it automatically.
* @see IgniteConfiguration#getIgniteHome()
* @see IgniteSystemProperties#IGNITE_HOME
*/
public String getWorkDirectory() {
return ggWork;
}
/**
* Sets Ignite work folder.
*
* @param ggWork {@code Ignite} work folder.
* @see IgniteConfiguration#getWorkDirectory()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setWorkDirectory(String ggWork) {
this.ggWork = ggWork;
return this;
}
/**
* Should return MBean server instance. If not provided, the system will use default
* platform MBean server.
*
* @return MBean server instance or {@code null} to make the system create a default one.
* @see ManagementFactory#getPlatformMBeanServer()
*/
public MBeanServer getMBeanServer() {
return mbeanSrv;
}
/**
* Sets initialized and started MBean server.
*
* @param mbeanSrv Initialized and started MBean server.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setMBeanServer(MBeanServer mbeanSrv) {
this.mbeanSrv = mbeanSrv;
return this;
}
/**
* Unique identifier for this node within grid.
*
* @return Unique identifier for this node within grid.
*/
@Deprecated
public UUID getNodeId() {
return nodeId;
}
/**
* Sets unique identifier for local node.
*
* @param nodeId Unique identifier for local node.
* @see IgniteConfiguration#getNodeId()
* @return {@code this} for chaining.
* @deprecated Use {@link #setConsistentId(Serializable)} instead.
*/
@Deprecated
public IgniteConfiguration setNodeId(UUID nodeId) {
this.nodeId = nodeId;
return this;
}
/**
* Should return an instance of marshaller to use in grid. If not provided,
* default marshaller implementation that allows to read object field values
* without deserialization will be used.
*
* @return Marshaller to use in grid.
*/
public Marshaller getMarshaller() {
return marsh;
}
/**
* Sets marshaller to use within grid.
*
* @param marsh Marshaller to use within grid.
* @see IgniteConfiguration#getMarshaller()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setMarshaller(Marshaller marsh) {
this.marsh = marsh;
return this;
}
/**
* Returns {@code true} if peer class loading is enabled, {@code false}
* otherwise. Default value is {@code false} specified by {@link #DFLT_P2P_ENABLED}.
* <p>
* When peer class loading is enabled and task is not deployed on local node,
* local node will try to load classes from the node that initiated task
* execution. This way, a task can be physically deployed only on one node
* and then internally penetrate to all other nodes.
* <p>
* See {@link ComputeTask} documentation for more information about task deployment.
*
* @return {@code true} if peer class loading is enabled, {@code false}
* otherwise.
*/
public boolean isPeerClassLoadingEnabled() {
return p2pEnabled;
}
/**
* If this flag is set to {@code true}, jobs mapped to local node will be
* marshalled as if it was remote node.
* <p>
* If not provided, default value is defined by {@link #DFLT_MARSHAL_LOCAL_JOBS}.
*
* @return {@code True} if local jobs should be marshalled.
*/
public boolean isMarshalLocalJobs() {
return marshLocJobs;
}
/**
* Sets marshal local jobs flag.
*
* @param marshLocJobs {@code True} if local jobs should be marshalled.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setMarshalLocalJobs(boolean marshLocJobs) {
this.marshLocJobs = marshLocJobs;
return this;
}
/**
* Enables/disables peer class loading.
*
* @param p2pEnabled {@code true} if peer class loading is
* enabled, {@code false} otherwise.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setPeerClassLoadingEnabled(boolean p2pEnabled) {
this.p2pEnabled = p2pEnabled;
return this;
}
/**
* Should return list of packages from the system classpath that need to
* be peer-to-peer loaded from task originating node.
* '*' is supported at the end of the package name which means
* that all sub-packages and their classes are included like in Java
* package import clause.
*
* @return List of peer-to-peer loaded package names.
*/
public String[] getPeerClassLoadingLocalClassPathExclude() {
return p2pLocClsPathExcl;
}
/**
* Sets list of packages in a system class path that should be P2P
* loaded even if they exist locally.
*
* @param p2pLocClsPathExcl List of P2P loaded packages. Package
* name supports '*' at the end like in package import clause.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setPeerClassLoadingLocalClassPathExclude(String... p2pLocClsPathExcl) {
this.p2pLocClsPathExcl = p2pLocClsPathExcl;
return this;
}
/**
* Number of node metrics to keep in memory to calculate totals and averages.
* If not provided (value is {@code 0}), then default value
* {@link #DFLT_METRICS_HISTORY_SIZE} is used.
*
* @return Metrics history size.
* @see #DFLT_METRICS_HISTORY_SIZE
*/
public int getMetricsHistorySize() {
return metricsHistSize;
}
/**
* Sets number of metrics kept in history to compute totals and averages.
* If not explicitly set, then default value is {@code 10,000}.
*
* @param metricsHistSize Number of metrics kept in history to use for
* metric totals and averages calculations.
* @see #DFLT_METRICS_HISTORY_SIZE
* @return {@code this} for chaining.
*/
public IgniteConfiguration setMetricsHistorySize(int metricsHistSize) {
this.metricsHistSize = metricsHistSize;
return this;
}
/**
* Gets job metrics update frequency in milliseconds.
* <p>
* Updating metrics too frequently may have negative performance impact.
* <p>
* The following values are accepted:
* <ul>
* <li>{@code -1} job metrics are never updated.</li>
* <li>{@code 0} job metrics are updated on each job start and finish.</li>
* <li>Positive value defines the actual update frequency. If not provided, then default value
* {@link #DFLT_METRICS_UPDATE_FREQ} is used.</li>
* </ul>
* If not provided, then default value {@link #DFLT_METRICS_UPDATE_FREQ} is used.
*
* @return Job metrics update frequency in milliseconds.
* @see #DFLT_METRICS_UPDATE_FREQ
*/
public long getMetricsUpdateFrequency() {
return metricsUpdateFreq;
}
/**
* Sets job metrics update frequency in milliseconds.
* <p>
* If set to {@code -1} job metrics are never updated.
* If set to {@code 0} job metrics are updated on each job start and finish.
* Positive value defines the actual update frequency.
* If not provided, then default value
* {@link #DFLT_METRICS_UPDATE_FREQ} is used.
*
* @param metricsUpdateFreq Job metrics update frequency in milliseconds.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setMetricsUpdateFrequency(long metricsUpdateFreq) {
this.metricsUpdateFreq = metricsUpdateFreq;
return this;
}
/**
* Elapsed time in milliseconds after which node metrics are considered expired.
* If not provided, then default value
* {@link #DFLT_METRICS_EXPIRE_TIME} is used.
*
* @return Metrics expire time.
* @see #DFLT_METRICS_EXPIRE_TIME
*/
public long getMetricsExpireTime() {
return metricsExpTime;
}
/**
* Sets time in milliseconds after which a certain metric value is considered expired.
* If not set explicitly, then default value is {@code 600,000} milliseconds (10 minutes).
*
* @param metricsExpTime The metricsExpTime to set.
* @see #DFLT_METRICS_EXPIRE_TIME
* @return {@code this} for chaining.
*/
public IgniteConfiguration setMetricsExpireTime(long metricsExpTime) {
this.metricsExpTime = metricsExpTime;
return this;
}
/**
* Maximum timeout in milliseconds for network requests.
* <p>
* If not provided, then default value
* {@link #DFLT_NETWORK_TIMEOUT} is used.
*
* @return Maximum timeout for network requests.
* @see #DFLT_NETWORK_TIMEOUT
*/
public long getNetworkTimeout() {
return netTimeout;
}
/**
* Maximum timeout in milliseconds for network requests.
* <p>
* If not provided (value is {@code 0}), then default value
* {@link #DFLT_NETWORK_TIMEOUT} is used.
*
* @param netTimeout Maximum timeout for network requests.
* @see #DFLT_NETWORK_TIMEOUT
* @return {@code this} for chaining.
*/
public IgniteConfiguration setNetworkTimeout(long netTimeout) {
this.netTimeout = netTimeout;
return this;
}
/**
* Interval in milliseconds between message send retries.
* <p>
* If not provided, then default value
* {@link #DFLT_SEND_RETRY_DELAY} is used.
*
* @return Interval between message send retries.
* @see #getNetworkSendRetryCount()
* @see #DFLT_SEND_RETRY_DELAY
*/
public long getNetworkSendRetryDelay() {
return sndRetryDelay;
}
/**
* Sets interval in milliseconds between message send retries.
* <p>
* If not provided, then default value
* {@link #DFLT_SEND_RETRY_DELAY} is used.
*
* @param sndRetryDelay Interval between message send retries.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setNetworkSendRetryDelay(long sndRetryDelay) {
this.sndRetryDelay = sndRetryDelay;
return this;
}
/**
* Message send retries count.
* <p>
* If not provided, then default value
* {@link #DFLT_SEND_RETRY_CNT} is used.
*
* @return Message send retries count.
* @see #getNetworkSendRetryDelay()
* @see #DFLT_SEND_RETRY_CNT
*/
public int getNetworkSendRetryCount() {
return sndRetryCnt;
}
/**
* Sets message send retries count.
* <p>
* If not provided, then default value
* {@link #DFLT_SEND_RETRY_CNT} is used.
*
* @param sndRetryCnt Message send retries count.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setNetworkSendRetryCount(int sndRetryCnt) {
this.sndRetryCnt = sndRetryCnt;
return this;
}
/**
* Gets number of samples used to synchronize clocks between different nodes.
* <p>
* Clock synchronization is used for cache version assignment in {@code CLOCK} order mode.
*
* @return Number of samples for one synchronization round.
*/
public int getClockSyncSamples() {
return clockSyncSamples;
}
/**
* Sets number of samples used for clock synchronization.
*
* @param clockSyncSamples Number of samples.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setClockSyncSamples(int clockSyncSamples) {
this.clockSyncSamples = clockSyncSamples;
return this;
}
/**
* Gets frequency at which clock is synchronized between nodes, in milliseconds.
* <p>
* Clock synchronization is used for cache version assignment in {@code CLOCK} order mode.
*
* @return Clock synchronization frequency, in milliseconds.
*/
public long getClockSyncFrequency() {
return clockSyncFreq;
}
/**
* Sets clock synchronization frequency in milliseconds.
*
* @param clockSyncFreq Clock synchronization frequency.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setClockSyncFrequency(long clockSyncFreq) {
this.clockSyncFreq = clockSyncFreq;
return this;
}
/**
* Gets Max count of threads can be used at rebalancing.
* Minimum is 1.
* @return count.
*/
public int getRebalanceThreadPoolSize() {
return rebalanceThreadPoolSize;
}
/**
* Sets Max count of threads can be used at rebalancing.
*
* Default is {@code 1} which has minimal impact on the operation of the grid.
*
* @param rebalanceThreadPoolSize Number of system threads that will be assigned for partition transfer during
* rebalancing.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setRebalanceThreadPoolSize(int rebalanceThreadPoolSize) {
this.rebalanceThreadPoolSize = rebalanceThreadPoolSize;
return this;
}
/**
* Returns a collection of life-cycle beans. These beans will be automatically
* notified of grid life-cycle events. Use life-cycle beans whenever you
* want to perform certain logic before and after grid startup and stopping
* routines.
*
* @return Collection of life-cycle beans.
* @see LifecycleBean
* @see LifecycleEventType
*/
public LifecycleBean[] getLifecycleBeans() {
return lifecycleBeans;
}
/**
* Sets a collection of lifecycle beans. These beans will be automatically
* notified of grid lifecycle events. Use lifecycle beans whenever you
* want to perform certain logic before and after grid startup and stopping
* routines.
*
* @param lifecycleBeans Collection of lifecycle beans.
* @see LifecycleEventType
* @return {@code this} for chaining.
*/
public IgniteConfiguration setLifecycleBeans(LifecycleBean... lifecycleBeans) {
this.lifecycleBeans = lifecycleBeans;
return this;
}
/**
* Sets SSL context factory that will be used for creating a secure socket layer.
*
* @param sslCtxFactory Ssl context factory.
* @see SslContextFactory
*/
public IgniteConfiguration setSslContextFactory(Factory<SSLContext> sslCtxFactory) {
this.sslCtxFactory = sslCtxFactory;
return this;
}
/**
* Returns SSL context factory that will be used for creating a secure socket layer.
*
* @return SSL connection factory.
* @see SslContextFactory
*/
public Factory<SSLContext> getSslContextFactory() {
return sslCtxFactory;
}
/**
* Should return fully configured event SPI implementation. If not provided,
* {@link MemoryEventStorageSpi} will be used.
*
* @return Grid event SPI implementation or {@code null} to use default implementation.
*/
public EventStorageSpi getEventStorageSpi() {
return evtSpi;
}
/**
* Sets fully configured instance of {@link EventStorageSpi}.
*
* @param evtSpi Fully configured instance of {@link EventStorageSpi}.
* @see IgniteConfiguration#getEventStorageSpi()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setEventStorageSpi(EventStorageSpi evtSpi) {
this.evtSpi = evtSpi;
return this;
}
/**
* Should return fully configured discovery SPI implementation. If not provided,
* {@link TcpDiscoverySpi} will be used by default.
*
* @return Grid discovery SPI implementation or {@code null} to use default implementation.
*/
public DiscoverySpi getDiscoverySpi() {
return discoSpi;
}
/**
* Sets fully configured instance of {@link DiscoverySpi}.
*
* @param discoSpi Fully configured instance of {@link DiscoverySpi}.
* @see IgniteConfiguration#getDiscoverySpi()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setDiscoverySpi(DiscoverySpi discoSpi) {
this.discoSpi = discoSpi;
return this;
}
/**
* Returns segmentation policy. Default is {@link #DFLT_SEG_PLC}.
*
* @return Segmentation policy.
*/
public SegmentationPolicy getSegmentationPolicy() {
return segPlc;
}
/**
* Sets segmentation policy.
*
* @param segPlc Segmentation policy.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setSegmentationPolicy(SegmentationPolicy segPlc) {
this.segPlc = segPlc;
return this;
}
/**
* Gets wait for segment on startup flag. Default is {@link #DFLT_WAIT_FOR_SEG_ON_START}.
* <p>
* Returns {@code true} if node should wait for correct segment on start.
* If node detects that segment is incorrect on startup and this method
* returns {@code true}, node waits until segment becomes correct.
* If segment is incorrect on startup and this method returns {@code false},
* exception is thrown.
*
* @return {@code True} to wait for segment on startup, {@code false} otherwise.
*/
public boolean isWaitForSegmentOnStart() {
return waitForSegOnStart;
}
/**
* Sets wait for segment on start flag.
*
* @param waitForSegOnStart {@code True} to wait for segment on start.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setWaitForSegmentOnStart(boolean waitForSegOnStart) {
this.waitForSegOnStart = waitForSegOnStart;
return this;
}
/**
* Gets all segmentation resolvers pass required flag.
* <p>
* Returns {@code true} if all segmentation resolvers should succeed
* for node to be in correct segment.
* Returns {@code false} if at least one segmentation resolver should succeed
* for node to be in correct segment.
* <p>
* Default is {@link #DFLT_ALL_SEG_RESOLVERS_PASS_REQ}.
*
* @return {@code True} if all segmentation resolvers should succeed,
* {@code false} if only one is enough.
*/
public boolean isAllSegmentationResolversPassRequired() {
return allResolversPassReq;
}
/**
* Sets all segmentation resolvers pass required flag.
*
* @param allResolversPassReq {@code True} if all segmentation resolvers should
* succeed for node to be in the correct segment.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setAllSegmentationResolversPassRequired(boolean allResolversPassReq) {
this.allResolversPassReq = allResolversPassReq;
return this;
}
/**
* Gets segmentation resolve attempts. Each configured resolver will have
* this attempts number to pass segmentation check prior to check failure.
*
* Default is {@link #DFLT_SEG_RESOLVE_ATTEMPTS}.
*
* @return Segmentation resolve attempts.
*/
public int getSegmentationResolveAttempts() {
return segResolveAttempts;
}
/**
* Sets segmentation resolve attempts count.
*
* @param segResolveAttempts Segmentation resolve attempts.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setSegmentationResolveAttempts(int segResolveAttempts) {
this.segResolveAttempts = segResolveAttempts;
return this;
}
/**
* Returns a collection of segmentation resolvers.
* <p>
* If array is {@code null} or empty, periodical and on-start network
* segment checks do not happen.
*
* @return Segmentation resolvers.
*/
public SegmentationResolver[] getSegmentationResolvers() {
return segResolvers;
}
/**
* Sets segmentation resolvers.
*
* @param segResolvers Segmentation resolvers.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setSegmentationResolvers(SegmentationResolver... segResolvers) {
this.segResolvers = segResolvers;
return this;
}
/**
* Returns frequency of network segment check by discovery manager.
* <p>
* if 0, periodic segment check is disabled and segment is checked only
* on topology changes (if segmentation resolvers are configured).
* <p>
* Default is {@link #DFLT_SEG_CHK_FREQ}.
*
* @return Segment check frequency.
*/
public long getSegmentCheckFrequency() {
return segChkFreq;
}
/**
* Sets network segment check frequency.
*
* @param segChkFreq Segment check frequency.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setSegmentCheckFrequency(long segChkFreq) {
this.segChkFreq = segChkFreq;
return this;
}
/**
* Should return fully configured SPI communication implementation. If not provided,
* {@link TcpCommunicationSpi} will be used by default.
*
* @return Grid communication SPI implementation or {@code null} to use default implementation.
*/
public CommunicationSpi getCommunicationSpi() {
return commSpi;
}
/**
* Sets fully configured instance of {@link CommunicationSpi}.
*
* @param commSpi Fully configured instance of {@link CommunicationSpi}.
* @see IgniteConfiguration#getCommunicationSpi()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setCommunicationSpi(CommunicationSpi commSpi) {
this.commSpi = commSpi;
return this;
}
/**
* Should return fully configured collision SPI implementation. If not provided,
* {@link NoopCollisionSpi} is used and jobs get activated immediately
* on arrive to mapped node. This approach suits well for large amount of small
* jobs (which is a wide-spread use case). User still can control the number
* of concurrent jobs by setting maximum thread pool size defined by
* IgniteConfiguration.getPublicThreadPoolSize() configuration property.
*
* @return Grid collision SPI implementation or {@code null} to use default implementation.
*/
public CollisionSpi getCollisionSpi() {
return colSpi;
}
/**
* Sets fully configured instance of {@link CollisionSpi}.
*
* @param colSpi Fully configured instance of {@link CollisionSpi} or
* {@code null} if no SPI provided.
* @see IgniteConfiguration#getCollisionSpi()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setCollisionSpi(CollisionSpi colSpi) {
this.colSpi = colSpi;
return this;
}
/**
* Should return fully configured deployment SPI implementation. If not provided,
* {@link LocalDeploymentSpi} will be used.
*
* @return Grid deployment SPI implementation or {@code null} to use default implementation.
*/
public DeploymentSpi getDeploymentSpi() {
return deploySpi;
}
/**
* Sets fully configured instance of {@link DeploymentSpi}.
*
* @param deploySpi Fully configured instance of {@link DeploymentSpi}.
* @see IgniteConfiguration#getDeploymentSpi()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setDeploymentSpi(DeploymentSpi deploySpi) {
this.deploySpi = deploySpi;
return this;
}
/**
* Should return fully configured checkpoint SPI implementation. If not provided,
* {@link NoopCheckpointSpi} will be used.
*
* @return Grid checkpoint SPI implementation or {@code null} to use default implementation.
*/
public CheckpointSpi[] getCheckpointSpi() {
return cpSpi;
}
/**
* Sets fully configured instance of {@link CheckpointSpi}.
*
* @param cpSpi Fully configured instance of {@link CheckpointSpi}.
* @see IgniteConfiguration#getCheckpointSpi()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setCheckpointSpi(CheckpointSpi... cpSpi) {
this.cpSpi = cpSpi;
return this;
}
/**
* Should return fully configured failover SPI implementation. If not provided,
* {@link AlwaysFailoverSpi} will be used.
*
* @return Grid failover SPI implementation or {@code null} to use default implementation.
*/
public FailoverSpi[] getFailoverSpi() {
return failSpi;
}
/**
* Sets fully configured instance of {@link FailoverSpi}.
*
* @param failSpi Fully configured instance of {@link FailoverSpi} or
* {@code null} if no SPI provided.
* @see IgniteConfiguration#getFailoverSpi()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setFailoverSpi(FailoverSpi... failSpi) {
this.failSpi = failSpi;
return this;
}
/**
* Returns failure detection timeout used by {@link TcpDiscoverySpi} and {@link TcpCommunicationSpi}.
* <p>
* Default is {@link #DFLT_FAILURE_DETECTION_TIMEOUT}.
*
* @see #setFailureDetectionTimeout(long)
* @return Failure detection timeout in milliseconds.
*/
public Long getFailureDetectionTimeout() {
return failureDetectionTimeout;
}
/**
* Sets failure detection timeout to use in {@link TcpDiscoverySpi} and {@link TcpCommunicationSpi}.
* <p>
* Failure detection timeout is used to determine how long the communication or discovery SPIs should wait before
* considering a remote connection failed.
*
* @param failureDetectionTimeout Failure detection timeout in milliseconds.
*/
public void setFailureDetectionTimeout(long failureDetectionTimeout) {
this.failureDetectionTimeout = failureDetectionTimeout;
}
/**
* Should return fully configured load balancing SPI implementation. If not provided,
* {@link RoundRobinLoadBalancingSpi} will be used.
*
* @return Grid load balancing SPI implementation or {@code null} to use default implementation.
*/
public LoadBalancingSpi[] getLoadBalancingSpi() {
return loadBalancingSpi;
}
/**
* This value is used to expire messages from waiting list whenever node
* discovery discrepancies happen.
* <p>
* During startup, it is possible for some SPIs to have a small time window when
* <tt>Node A</tt> has discovered <tt>Node B</tt>, but <tt>Node B</tt>
* has not discovered <tt>Node A</tt> yet. Such time window is usually very small,
* a matter of milliseconds, but certain JMS providers, for example, may be very slow
* and hence have larger discovery delay window.
* <p>
* The default value of this property is {@code 60,000} specified by
* {@link #DFLT_DISCOVERY_STARTUP_DELAY}. This should be good enough for vast
* majority of configurations. However, if you do anticipate an even larger
* delay, you should increase this value.
*
* @return Time in milliseconds for when nodes can be out-of-sync.
*/
public long getDiscoveryStartupDelay() {
return discoStartupDelay;
}
/**
* Sets time in milliseconds after which a certain metric value is considered expired.
* If not set explicitly, then default value is {@code 600,000} milliseconds (10 minutes).
*
* @param discoStartupDelay Time in milliseconds for when nodes
* can be out-of-sync during startup.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setDiscoveryStartupDelay(long discoStartupDelay) {
this.discoStartupDelay = discoStartupDelay;
return this;
}
/**
* Sets fully configured instance of {@link LoadBalancingSpi}.
*
* @param loadBalancingSpi Fully configured instance of {@link LoadBalancingSpi} or
* {@code null} if no SPI provided.
* @see IgniteConfiguration#getLoadBalancingSpi()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setLoadBalancingSpi(LoadBalancingSpi... loadBalancingSpi) {
this.loadBalancingSpi = loadBalancingSpi;
return this;
}
/**
* Sets fully configured instances of {@link SwapSpaceSpi}.
*
* @param swapSpaceSpi Fully configured instances of {@link SwapSpaceSpi} or
* <tt>null</tt> if no SPI provided.
* @see IgniteConfiguration#getSwapSpaceSpi()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setSwapSpaceSpi(SwapSpaceSpi swapSpaceSpi) {
this.swapSpaceSpi = swapSpaceSpi;
return this;
}
/**
* Should return fully configured swap space SPI implementation. If not provided,
* {@link FileSwapSpaceSpi} will be used.
* <p>
* Note that user can provide one or multiple instances of this SPI (and select later which one
* is used in a particular context).
*
* @return Grid swap space SPI implementation or <tt>null</tt> to use default implementation.
*/
public SwapSpaceSpi getSwapSpaceSpi() {
return swapSpaceSpi;
}
/**
* Sets fully configured instances of {@link IndexingSpi}.
*
* @param indexingSpi Fully configured instance of {@link IndexingSpi}.
* @see IgniteConfiguration#getIndexingSpi()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setIndexingSpi(IndexingSpi indexingSpi) {
this.indexingSpi = indexingSpi;
return this;
}
/**
* Should return fully configured indexing SPI implementations.
*
* @return Indexing SPI implementation.
*/
public IndexingSpi getIndexingSpi() {
return indexingSpi;
}
/**
* Gets address resolver for addresses mapping determination.
*
* @return Address resolver.
*/
public AddressResolver getAddressResolver() {
return addrRslvr;
}
/**
* Sets address resolver for addresses mapping determination.
*
* @param addrRslvr Address resolver.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setAddressResolver(AddressResolver addrRslvr) {
this.addrRslvr = addrRslvr;
return this;
}
/**
* Sets task classes and resources sharing mode.
*
* @param deployMode Task classes and resources sharing mode.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setDeploymentMode(DeploymentMode deployMode) {
this.deployMode = deployMode;
return this;
}
/**
* Gets deployment mode for deploying tasks and other classes on this node.
* Refer to {@link DeploymentMode} documentation for more information.
*
* @return Deployment mode.
*/
public DeploymentMode getDeploymentMode() {
return deployMode;
}
/**
* Sets size of missed resources cache. Set 0 to avoid
* missed resources caching.
*
* @param p2pMissedCacheSize Size of missed resources cache.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setPeerClassLoadingMissedResourcesCacheSize(int p2pMissedCacheSize) {
this.p2pMissedCacheSize = p2pMissedCacheSize;
return this;
}
/**
* Returns missed resources cache size. If size greater than {@code 0}, missed
* resources will be cached and next resource request ignored. If size is {@code 0},
* then request for the resource will be sent to the remote node every time this
* resource is requested.
*
* @return Missed resources cache size.
*/
public int getPeerClassLoadingMissedResourcesCacheSize() {
return p2pMissedCacheSize;
}
/**
* Gets configuration (descriptors) for all caches.
*
* @return Array of fully initialized cache descriptors.
*/
public CacheConfiguration[] getCacheConfiguration() {
return cacheCfg;
}
/**
* Sets cache configurations.
*
* @param cacheCfg Cache configurations.
*/
@SuppressWarnings({"ZeroLengthArrayAllocation"})
public IgniteConfiguration setCacheConfiguration(CacheConfiguration... cacheCfg) {
this.cacheCfg = cacheCfg == null ? new CacheConfiguration[0] : cacheCfg;
return this;
}
/**
* Gets client mode flag. Client node cannot hold data in the caches. It's recommended to use
* {@link DiscoverySpi} in client mode if this property is {@code true}.
*
* @return Client mode flag.
* @see TcpDiscoverySpi#setForceServerMode(boolean)
*/
public Boolean isClientMode() {
return clientMode;
}
/**
* Sets client mode flag.
*
* @param clientMode Client mode flag.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setClientMode(boolean clientMode) {
this.clientMode = clientMode;
return this;
}
/**
* Gets cache key configuration.
*
* @return Cache key configuration.
*/
public CacheKeyConfiguration[] getCacheKeyConfiguration() {
return cacheKeyCfg;
}
/**
* Sets cache key configuration.
* Cache key configuration defines
*
* @param cacheKeyCfg Cache key configuration.
*/
public IgniteConfiguration setCacheKeyConfiguration(CacheKeyConfiguration... cacheKeyCfg) {
this.cacheKeyCfg = cacheKeyCfg;
return this;
}
/**
* Gets configuration for Ignite Binary objects.
*
* @return Binary configuration object.
*/
public BinaryConfiguration getBinaryConfiguration() {
return binaryCfg;
}
/**
* Sets configuration for Ignite Binary objects.
*
* @param binaryCfg Binary configuration object.
*/
public IgniteConfiguration setBinaryConfiguration(BinaryConfiguration binaryCfg) {
this.binaryCfg = binaryCfg;
return this;
}
/**
* Gets flag indicating whether cache sanity check is enabled. If enabled, then Ignite
* will perform the following checks and throw an exception if check fails:
* <ul>
* <li>Cache entry is not externally locked with {@code lock(...)} or {@code lockAsync(...)}
* methods when entry is enlisted to transaction.</li>
* <li>Each entry in affinity group-lock transaction has the same affinity key as was specified on
* affinity transaction start.</li>
* <li>Each entry in partition group-lock transaction belongs to the same partition as was specified
* on partition transaction start.</li>
* </ul>
* <p>
* These checks are not required for cache operation, but help to find subtle bugs. Disabling of this checks
* usually yields a noticeable performance gain.
* <p>
* If not provided, default value is {@link #DFLT_CACHE_SANITY_CHECK_ENABLED}.
*
* @return {@code True} if group lock sanity check is enabled.
*/
public boolean isCacheSanityCheckEnabled() {
return cacheSanityCheckEnabled;
}
/**
* Sets cache sanity check flag.
*
* @param cacheSanityCheckEnabled {@code True} if cache sanity check is enabled.
* @see #isCacheSanityCheckEnabled()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setCacheSanityCheckEnabled(boolean cacheSanityCheckEnabled) {
this.cacheSanityCheckEnabled = cacheSanityCheckEnabled;
return this;
}
/**
* Gets array of event types, which will be recorded.
* <p>
* Note that by default all events in Ignite are disabled. Ignite can and often does generate thousands
* events per seconds under the load and therefore it creates a significant additional load on the system.
* If these events are not needed by the application this load is unnecessary and leads to significant
* performance degradation. So it is <b>highly recommended</b> to enable only those events that your
* application logic requires. Note that certain events are required for Ignite's internal operations
* and such events will still be generated but not stored by event storage SPI if they are disabled
* in Ignite configuration.
*
* @return Include event types.
*/
public int[] getIncludeEventTypes() {
return inclEvtTypes;
}
/**
* Sets array of event types, which will be recorded by {@link GridEventStorageManager#record(Event)}.
* Note, that either the include event types or the exclude event types can be established.
*
* @param inclEvtTypes Include event types.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setIncludeEventTypes(int... inclEvtTypes) {
this.inclEvtTypes = inclEvtTypes;
return this;
}
/**
* Sets system-wide local address or host for all Ignite components to bind to. If provided it will
* override all default local bind settings within Ignite or any of its SPIs.
*
* @param locHost Local IP address or host to bind to.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setLocalHost(String locHost) {
this.locHost = locHost;
return this;
}
/**
* Gets system-wide local address or host for all Ignite components to bind to. If provided it will
* override all default local bind settings within Ignite or any of its SPIs.
* <p>
* If {@code null} then Ignite tries to use local wildcard address. That means that
* all services will be available on all network interfaces of the host machine.
* <p>
* It is strongly recommended to set this parameter for all production environments.
* <p>
* If not provided, default is {@code null}.
*
* @return Local address or host to bind to.
*/
public String getLocalHost() {
return locHost;
}
/**
* Gets base UPD port number for grid time server. Time server will be started on one of free ports in range
* {@code [timeServerPortBase, timeServerPortBase + timeServerPortRange - 1]}.
* <p>
* Time server provides clock synchronization between nodes.
*
* @return Time
*/
public int getTimeServerPortBase() {
return timeSrvPortBase;
}
/**
* Sets time server port base.
*
* @param timeSrvPortBase Time server port base.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setTimeServerPortBase(int timeSrvPortBase) {
this.timeSrvPortBase = timeSrvPortBase;
return this;
}
/**
* Defines port range to try for time server start.
*
* @return Number of ports to try before server initialization fails.
*/
public int getTimeServerPortRange() {
return timeSrvPortRange;
}
/**
* Sets time server port range.
*
* @param timeSrvPortRange Time server port range.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setTimeServerPortRange(int timeSrvPortRange) {
this.timeSrvPortRange = timeSrvPortRange;
return this;
}
/**
* Gets array of system or environment properties to include into node attributes.
* If this array is {@code null}, which is default, then all system and environment
* properties will be included. If this array is empty, then none will be included.
* Otherwise, for every name provided, first a system property will be looked up,
* and then, if it is not found, environment property will be looked up.
*
* @return Array of system or environment properties to include into node attributes.
*/
public String[] getIncludeProperties() {
return includeProps;
}
/**
* Sets array of system or environment property names to include into node attributes.
* See {@link #getIncludeProperties()} for more info.
*
* @param includeProps Array of system or environment property names to include into node attributes.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setIncludeProperties(String... includeProps) {
this.includeProps = includeProps;
return this;
}
/**
* Gets frequency of metrics log print out.
* <p>
* If {@code 0}, metrics print out is disabled.
* <p>
* If not provided, then default value {@link #DFLT_METRICS_LOG_FREQ} is used.
*
* @return Frequency of metrics log print out.
*/
public long getMetricsLogFrequency() {
return metricsLogFreq;
}
/**
* Sets frequency of metrics log print out.
* <p>
* If {@code 0}, metrics print out is disabled.
* <p>
* If not provided, then default value {@link #DFLT_METRICS_LOG_FREQ} is used.
*
* @param metricsLogFreq Frequency of metrics log print out.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setMetricsLogFrequency(long metricsLogFreq) {
this.metricsLogFreq = metricsLogFreq;
return this;
}
/**
* Gets IGFS (Ignite In-Memory File System) configurations.
*
* @return IGFS configurations.
*/
public FileSystemConfiguration[] getFileSystemConfiguration() {
return igfsCfg;
}
/**
* Sets IGFS (Ignite In-Memory File System) configurations.
*
* @param igfsCfg IGFS configurations.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setFileSystemConfiguration(FileSystemConfiguration... igfsCfg) {
this.igfsCfg = igfsCfg;
return this;
}
/**
* Gets hadoop configuration.
*
* @return Hadoop configuration.
*/
public HadoopConfiguration getHadoopConfiguration() {
return hadoopCfg;
}
/**
* Sets hadoop configuration.
*
* @param hadoopCfg Hadoop configuration.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setHadoopConfiguration(HadoopConfiguration hadoopCfg) {
this.hadoopCfg = hadoopCfg;
return this;
}
/**
* @return Connector configuration.
*/
public ConnectorConfiguration getConnectorConfiguration() {
return connectorCfg;
}
/**
* @param connectorCfg Connector configuration.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setConnectorConfiguration(ConnectorConfiguration connectorCfg) {
this.connectorCfg = connectorCfg;
return this;
}
/**
* Gets configurations for services to be deployed on the grid.
*
* @return Configurations for services to be deployed on the grid.
*/
public ServiceConfiguration[] getServiceConfiguration() {
return svcCfgs;
}
/**
* Sets configurations for services to be deployed on the grid.
*
* @param svcCfgs Configurations for services to be deployed on the grid.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setServiceConfiguration(ServiceConfiguration... svcCfgs) {
this.svcCfgs = svcCfgs;
return this;
}
/**
* Gets map of pre-configured local event listeners.
* Each listener is mapped to array of event types.
*
* @return Pre-configured event listeners map.
* @see EventType
*/
public Map<IgnitePredicate<? extends Event>, int[]> getLocalEventListeners() {
return lsnrs;
}
/**
* Sets map of pre-configured local event listeners.
* Each listener is mapped to array of event types.
*
* @param lsnrs Pre-configured event listeners map.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setLocalEventListeners(Map<IgnitePredicate<? extends Event>, int[]> lsnrs) {
this.lsnrs = lsnrs;
return this;
}
/**
* Gets grid warmup closure. This closure will be executed before actual grid instance start. Configuration of
* a starting instance will be passed to the closure so it can decide what operations to warm up.
*
* @return Warmup closure to execute.
*/
public IgniteInClosure<IgniteConfiguration> getWarmupClosure() {
return warmupClos;
}
/**
* Sets warmup closure to execute before grid startup.
*
* @param warmupClos Warmup closure to execute.
* @see #getWarmupClosure()
* @return {@code this} for chaining.
*/
public IgniteConfiguration setWarmupClosure(IgniteInClosure<IgniteConfiguration> warmupClos) {
this.warmupClos = warmupClos;
return this;
}
/**
* Gets transactions configuration.
*
* @return Transactions configuration.
*/
public TransactionConfiguration getTransactionConfiguration() {
return txCfg;
}
/**
* Sets transactions configuration.
*
* @param txCfg Transactions configuration.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setTransactionConfiguration(TransactionConfiguration txCfg) {
this.txCfg = txCfg;
return this;
}
/**
* Gets plugin configurations.
*
* @return Plugin configurations.
* @see PluginProvider
*/
public PluginConfiguration[] getPluginConfigurations() {
return pluginCfgs;
}
/**
* Sets plugin configurations.
*
* @param pluginCfgs Plugin configurations.
* @return {@code this} for chaining.
* @see PluginProvider
*/
public IgniteConfiguration setPluginConfigurations(PluginConfiguration... pluginCfgs) {
this.pluginCfgs = pluginCfgs;
return this;
}
/**
* @return Atomic data structures configuration.
*/
public AtomicConfiguration getAtomicConfiguration() {
return atomicCfg;
}
/**
* @param atomicCfg Atomic data structures configuration.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setAtomicConfiguration(AtomicConfiguration atomicCfg) {
this.atomicCfg = atomicCfg;
return this;
}
/**
* Sets loader which will be used for instantiating execution context ({@link EntryProcessor EntryProcessors},
* {@link CacheEntryListener CacheEntryListeners}, {@link CacheLoader CacheLoaders} and
* {@link ExpiryPolicy ExpiryPolicys}).
*
* @param classLdr Class loader.
* @return {@code this} for chaining.
*/
public IgniteConfiguration setClassLoader(ClassLoader classLdr) {
this.classLdr = classLdr;
return this;
}
/**
* @return User's class loader.
*/
public ClassLoader getClassLoader() {
return classLdr;
}
/**
* Gets cache store session listener factories.
*
* @return Cache store session listener factories.
* @see CacheStoreSessionListener
*/
public Factory<CacheStoreSessionListener>[] getCacheStoreSessionListenerFactories() {
return storeSesLsnrs;
}
/**
* Cache store session listener factories.
* <p>
* These are global store session listeners, so they are applied to
* all caches. If you need to override listeners for a
* particular cache, use {@link CacheConfiguration#setCacheStoreSessionListenerFactories(Factory[])}
* configuration property.
*
* @param storeSesLsnrs Cache store session listener factories.
* @return {@code this} for chaining.
* @see CacheStoreSessionListener
*/
public IgniteConfiguration setCacheStoreSessionListenerFactories(
Factory<CacheStoreSessionListener>... storeSesLsnrs) {
this.storeSesLsnrs = storeSesLsnrs;
return this;
}
/**
* Gets platform configuration.
*
* @return Platform configuration.
*/
public PlatformConfiguration getPlatformConfiguration() {
return platformCfg;
}
/**
* Sets platform configuration.
*
* @param platformCfg Platform configuration.
*/
public void setPlatformConfiguration(PlatformConfiguration platformCfg) {
this.platformCfg = platformCfg;
}
/** {@inheritDoc} */
@Override public String toString() {
return S.toString(IgniteConfiguration.class, this);
}
}
|
package com.benny.openlauncher.fragment;
import android.content.SharedPreferences;
import android.graphics.PorterDuff;
import android.graphics.drawable.Drawable;
import android.os.Bundle;
import android.support.v7.preference.Preference;
import android.support.v7.preference.PreferenceFragmentCompat;
import android.support.v7.preference.PreferenceGroup;
import android.support.v7.widget.Toolbar;
import android.util.TypedValue;
import com.benny.openlauncher.R;
import com.benny.openlauncher.util.AppSettings;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public abstract class SettingsBaseFragment extends PreferenceFragmentCompat implements SharedPreferences.OnSharedPreferenceChangeListener {
private static final List<Integer> noRestart = new ArrayList<>(Arrays.asList(
R.string.pref_key__gesture_double_tap,
R.string.pref_key__gesture_swipe_up,
R.string.pref_key__gesture_swipe_down,
R.string.pref_key__gesture_pinch_in,
R.string.pref_key__gesture_pinch_out
));
@Override
public void onCreatePreferences(Bundle savedInstanceState, String rootKey) {
getPreferenceManager().setSharedPreferencesName("app");
}
@Override
public void onResume() {
super.onResume();
Toolbar toolbar = getActivity().findViewById(R.id.toolbar);
if (toolbar != null) toolbar.setTitle(getPreferenceScreen().getTitle());
SharedPreferences sharedPreferences = AppSettings.get().getDefaultPreferences();
sharedPreferences.registerOnSharedPreferenceChangeListener(this);
updateIcons(getPreferenceScreen());
updateSummaries();
}
@Override
public void onPause() {
super.onPause();
SharedPreferences sharedPreferences = AppSettings.get().getDefaultPreferences();
sharedPreferences.unregisterOnSharedPreferenceChangeListener(this);
}
@Override
public void onSharedPreferenceChanged(SharedPreferences sharedPreferences, String key) {
updateSummaries();
if (!noRestart.contains(key)) {
AppSettings.get().setAppRestartRequired(true);
}
}
public void updateSummaries() {
// override in fragments
}
public void updateIcons(PreferenceGroup prefGroup) {
if (prefGroup != null && isAdded()) {
int prefCount = prefGroup.getPreferenceCount();
for (int i = 0; i < prefCount; i++) {
Preference preference = prefGroup.getPreference(i);
if (preference != null) {
Drawable drawable = preference.getIcon();
if (drawable != null) {
TypedValue color = new TypedValue();
getContext().getTheme().resolveAttribute(android.R.attr.textColor, color, true);
drawable.mutate().setColorFilter(getResources().getColor(color.resourceId), PorterDuff.Mode.SRC_IN);
}
if (preference instanceof PreferenceGroup) {
updateIcons((PreferenceGroup) preference);
}
}
}
}
}
}
|
public class Book implements Comparable<Book>{
private String bookName;
private int pages;
public Book(String bookName, int pages){
this.bookName = bookName;
this.pages = pages;
}
public String getBookName(){
return this.bookName;
}
public int getPages(){
return this.pages;
}
@Override
public int compareTo(Book comparedBook) {
return this.bookName.compareTo(comparedBook.getBookName());
//return this.bookName.compareTo(comparedBook.getBookName()) < 0 ? -1 : 1;
}
@Override
public String toString(){
return this.bookName + " : " + this.pages;
}
}
|
/*
Freeware License, some rights reserved
Copyright (c) 2019 Iuliana Cosmina
Permission is hereby granted, free of charge, to anyone obtaining a copy
of this software and associated documentation files (the "Software"),
to work with the Software within the limits of freeware distribution and fair use.
This includes the rights to use, copy, and modify the Software for personal use.
Users are also allowed and encouraged to submit corrections and modifications
to the Software for the benefit of other users.
It is not allowed to reuse, modify, or redistribute the Software for
commercial use in any way, or for a user's educational materials such as books
or blog articles without prior permission from the copyright holder.
The above copyright notice and this permission notice need to be included
in all copies or substantial portions of the software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS OR APRESS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package com.apress.cems.ex;
/**
* @author Iuliana Cosmina
* @since 1.0
*/
public class UnexpectedException extends RuntimeException {
public UnexpectedException(String message) {
super(message);
}
public UnexpectedException(String message, Throwable cause) {
super(message, cause);
}
}
|
/*
* 文件名称:BuyerService.java
* 系统名称:[系统名称]
* 模块名称:[模块名称]
* 软件版权:Copyright (c) 2011-2018, liming20110711@163.com All Rights Reserved.
* 功能说明:[请在此处输入功能说明]
* 开发人员:Rushing0711
* 创建日期:20180216 11:06
* 修改记录:
* <Version> <DateSerial> <Author> <Description>
* 1.0.0 20180216-01 Rushing0711 M201802161106 新建文件
********************************************************************************/
package com.coding.sell.service;
import com.coding.sell.DTO.OrderDTO;
/**
* 买家.
*
* <p>创建时间: <font style="color:#00FFFF">20180216 11:06</font><br>
* [请在此输入功能详述]
*
* @author Rushing0711
* @version 1.0.0
* @since 1.0.0
*/
public interface BuyerService {
/**
* 查询一个订单.
*
* <p>创建时间: <font style="color:#00FFFF">20180216 11:09</font><br>
* [请在此输入功能详述]
*
* @param openid
* @param orderId
* @return OrderDTO
* @author Rushing0711
* @since 1.0.0
*/
OrderDTO findOrderOne(String openid, String orderId);
/**
* 取消订单.
*
* <p>创建时间: <font style="color:#00FFFF">20180216 11:09</font><br>
* [请在此输入功能详述]
*
* @author Rushing0711
* @version 1.0.0
* @since 1.0.0
*/
OrderDTO cancelOrder(String openid, String orderId);
}
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.doris.analysis;
import org.apache.doris.catalog.AggregateFunction;
import org.apache.doris.catalog.Catalog;
import org.apache.doris.catalog.Column;
import org.apache.doris.catalog.Database;
import org.apache.doris.catalog.OlapTable;
import org.apache.doris.catalog.Table.TableType;
import org.apache.doris.catalog.Type;
import org.apache.doris.cluster.ClusterNamespace;
import org.apache.doris.common.AnalysisException;
import org.apache.doris.common.ColumnAliasGenerator;
import org.apache.doris.common.ErrorCode;
import org.apache.doris.common.ErrorReport;
import org.apache.doris.common.Pair;
import org.apache.doris.common.TableAliasGenerator;
import org.apache.doris.common.TreeNode;
import org.apache.doris.common.UserException;
import org.apache.doris.common.util.SqlUtils;
import org.apache.doris.mysql.privilege.PrivPredicate;
import org.apache.doris.qe.ConnectContext;
import org.apache.doris.rewrite.ExprRewriter;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicates;
import com.google.common.base.Strings;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.apache.commons.collections.CollectionUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
/**
* Representation of a single select block, including GROUP BY, ORDER BY and HAVING
* clauses.
*/
public class SelectStmt extends QueryStmt {
private final static Logger LOG = LogManager.getLogger(SelectStmt.class);
private UUID id = UUID.randomUUID();
// ///////////////////////////////////////
// BEGIN: Members that need to be reset()
protected SelectList selectList;
private final ArrayList<String> colLabels; // lower case column labels
protected final FromClause fromClause_;
protected GroupByClause groupByClause;
private List<Expr> originalExpr;
//
private Expr havingClause; // original having clause
protected Expr whereClause;
// havingClause with aliases and agg output resolved
private Expr havingPred;
// set if we have any kind of aggregation operation, include SELECT DISTINCT
private AggregateInfo aggInfo;
// set if we have analytic function
private AnalyticInfo analyticInfo;
// substitutes all exprs in this select block to reference base tables
// directly
private ExprSubstitutionMap baseTblSmap = new ExprSubstitutionMap();
private ValueList valueList;
// if we have grouping extensions like cube or rollup or grouping sets
private GroupingInfo groupingInfo;
// having clause which has been analyzed
// For example: select k1, sum(k2) a from t group by k1 having a>1;
// this parameter: sum(t.k2) > 1
private Expr havingClauseAfterAnaylzed;
// END: Members that need to be reset()
// ///////////////////////////////////////
// SQL string of this SelectStmt before inline-view expression substitution.
// Set in analyze().
protected String sqlString_;
// Table alias generator used during query rewriting.
private TableAliasGenerator tableAliasGenerator = null;
public SelectStmt(ValueList valueList, ArrayList<OrderByElement> orderByElement, LimitElement limitElement) {
super(orderByElement, limitElement);
this.valueList = valueList;
this.selectList = new SelectList();
this.fromClause_ = new FromClause();
this.colLabels = Lists.newArrayList();
}
SelectStmt(
SelectList selectList,
FromClause fromClause,
Expr wherePredicate,
GroupByClause groupByClause,
Expr havingPredicate,
ArrayList<OrderByElement> orderByElements,
LimitElement limitElement) {
super(orderByElements, limitElement);
this.selectList = selectList;
if (fromClause == null) {
fromClause_ = new FromClause();
} else {
fromClause_ = fromClause;
}
this.whereClause = wherePredicate;
this.groupByClause = groupByClause;
this.havingClause = havingPredicate;
this.colLabels = Lists.newArrayList();
this.havingPred = null;
this.aggInfo = null;
this.sortInfo = null;
this.groupingInfo = null;
}
protected SelectStmt(SelectStmt other) {
super(other);
this.id = other.id;
selectList = other.selectList.clone();
fromClause_ = other.fromClause_.clone();
whereClause = (other.whereClause != null) ? other.whereClause.clone() : null;
groupByClause = (other.groupByClause != null) ? other.groupByClause.clone() : null;
havingClause = (other.havingClause != null) ? other.havingClause.clone() : null;
colLabels = Lists.newArrayList(other.colLabels);
aggInfo = (other.aggInfo != null) ? other.aggInfo.clone() : null;
analyticInfo = (other.analyticInfo != null) ? other.analyticInfo.clone() : null;
sqlString_ = (other.sqlString_ != null) ? other.sqlString_ : null;
baseTblSmap = other.baseTblSmap.clone();
groupingInfo = null;
}
@Override
public void reset() {
super.reset();
selectList.reset();
colLabels.clear();
fromClause_.reset();
if (whereClause != null) {
whereClause.reset();
}
if (groupByClause != null) {
groupByClause.reset();
}
if (havingClause != null) {
havingClause.reset();
}
havingClauseAfterAnaylzed = null;
havingPred = null;
aggInfo = null;
analyticInfo = null;
baseTblSmap.clear();
groupingInfo = null;
}
@Override
public QueryStmt clone() {
return new SelectStmt(this);
}
public UUID getId() {
return id;
}
/**
* @return the original select list items from the query
*/
public SelectList getSelectList() {
return selectList;
}
public void setSelectList(SelectList selectList) {
this.selectList = selectList;
}
public ValueList getValueList() {
return valueList;
}
/**
* @return the HAVING clause post-analysis and with aliases resolved
*/
public Expr getHavingPred() {
return havingPred;
}
public Expr getHavingClauseAfterAnaylzed() {
return havingClauseAfterAnaylzed;
}
public List<TableRef> getTableRefs() {
return fromClause_.getTableRefs();
}
public Expr getWhereClause() {
return whereClause;
}
public void setWhereClause(Expr whereClause) {
this.whereClause = whereClause;
}
public AggregateInfo getAggInfo() {
return aggInfo;
}
public GroupingInfo getGroupingInfo() {
return groupingInfo;
}
public GroupByClause getGroupByClause() {
return groupByClause;
}
public AnalyticInfo getAnalyticInfo() {
return analyticInfo;
}
public boolean hasAnalyticInfo() {
return analyticInfo != null;
}
public boolean hasHavingClause() {
return havingClause != null;
}
public void removeHavingClause() {
havingClause = null;
}
@Override
public SortInfo getSortInfo() {
return sortInfo;
}
@Override
public ArrayList<String> getColLabels() {
return colLabels;
}
public ExprSubstitutionMap getBaseTblSmap() {
return baseTblSmap;
}
@Override
public void getDbs(Analyzer analyzer, Map<String, Database> dbs) throws AnalysisException {
getWithClauseDbs(analyzer, dbs);
for (TableRef tblRef : fromClause_) {
if (tblRef instanceof InlineViewRef) {
// Inline view reference
QueryStmt inlineStmt = ((InlineViewRef) tblRef).getViewStmt();
inlineStmt.getDbs(analyzer, dbs);
} else {
String dbName = tblRef.getName().getDb();
if (Strings.isNullOrEmpty(dbName)) {
dbName = analyzer.getDefaultDb();
} else {
dbName = ClusterNamespace.getFullName(analyzer.getClusterName(), tblRef.getName().getDb());
}
if (Strings.isNullOrEmpty(dbName)) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_NO_DB_ERROR);
}
Database db = analyzer.getCatalog().getDb(dbName);
if (db == null) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
// check auth
if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), dbName,
tblRef.getName().getTbl(),
PrivPredicate.SELECT)) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_TABLEACCESS_DENIED_ERROR, "SELECT",
ConnectContext.get().getQualifiedUser(),
ConnectContext.get().getRemoteIP(),
tblRef.getName().getTbl());
}
dbs.put(dbName, db);
}
}
}
// Column alias generator used during query rewriting.
private ColumnAliasGenerator columnAliasGenerator = null;
public ColumnAliasGenerator getColumnAliasGenerator() {
if (columnAliasGenerator == null) {
columnAliasGenerator = new ColumnAliasGenerator(colLabels, null);
}
return columnAliasGenerator;
}
public TableAliasGenerator getTableAliasGenerator() {
if (tableAliasGenerator == null) {
tableAliasGenerator = new TableAliasGenerator(analyzer, null);
}
return tableAliasGenerator;
}
public void setTableAliasGenerator(TableAliasGenerator tableAliasGenerator) {
this.tableAliasGenerator = tableAliasGenerator;
}
public void analyze(Analyzer analyzer) throws AnalysisException, UserException {
if (isAnalyzed()) {
return;
}
super.analyze(analyzer);
fromClause_.setNeedToSql(needToSql);
fromClause_.analyze(analyzer);
// Generate !empty() predicates to filter out empty collections.
// Skip this step when analyzing a WITH-clause because CollectionTableRefs
// do not register collection slots in their parent in that context
// (see CollectionTableRef.analyze()).
if (!analyzer.isWithClause()) {
registerIsNotEmptyPredicates(analyzer);
}
// populate selectListExprs, aliasSMap, groupingSmap and colNames
for (SelectListItem item : selectList.getItems()) {
if (item.isStar()) {
TableName tblName = item.getTblName();
if (tblName == null) {
expandStar(analyzer);
} else {
expandStar(analyzer, tblName);
}
} else {
// Analyze the resultExpr before generating a label to ensure enforcement
// of expr child and depth limits (toColumn() label may call toSql()).
item.getExpr().analyze(analyzer);
if (!(item.getExpr() instanceof CaseExpr) &&
item.getExpr().contains(Predicates.instanceOf(Subquery.class))) {
throw new AnalysisException("Subquery is not supported in the select list.");
}
Expr expr = rewriteCountDistinctForBitmapOrHLL(item.getExpr(), analyzer);
resultExprs.add(expr);
SlotRef aliasRef = new SlotRef(null, item.toColumnLabel());
Expr existingAliasExpr = aliasSMap.get(aliasRef);
if (existingAliasExpr != null && !existingAliasExpr.equals(item.getExpr())) {
// If we have already seen this alias, it refers to more than one column and
// therefore is ambiguous.
ambiguousAliasList.add(aliasRef);
}
aliasSMap.put(aliasRef, item.getExpr().clone());
colLabels.add(item.toColumnLabel());
}
}
if (groupByClause != null && groupByClause.isGroupByExtension()) {
for (SelectListItem item : selectList.getItems()) {
if (item.getExpr() instanceof FunctionCallExpr && item.getExpr().fn instanceof AggregateFunction) {
for (Expr expr: groupByClause.getGroupingExprs()) {
if (item.getExpr().contains(expr)) {
throw new AnalysisException("column: " + expr.toSql() + " cannot both in select list and "
+ "aggregate functions when using GROUPING SETS/CUBE/ROLLUP, please use union"
+ " instead.");
}
}
}
}
groupingInfo = new GroupingInfo(analyzer, groupByClause.getGroupingType());
groupingInfo.substituteGroupingFn(resultExprs, analyzer);
} else {
for (Expr expr : resultExprs) {
if (checkGroupingFn(expr)) {
throw new AnalysisException(
"cannot use GROUPING functions without [grouping sets|rollup|cube] "
+ "clause or grouping sets only have one element.");
}
}
}
if (valueList != null) {
if (!fromInsert) {
valueList.analyzeForSelect(analyzer);
}
for (Expr expr : valueList.getFirstRow()) {
if (expr instanceof DefaultValueExpr) {
resultExprs.add(new IntLiteral(1));
} else {
resultExprs.add(expr);
}
colLabels.add(expr.toColumnLabel());
}
}
// analyze valueList if exists
if (needToSql) {
originalExpr = Expr.cloneList(resultExprs);
}
// analyze selectListExprs
Expr.analyze(resultExprs, analyzer);
if (TreeNode.contains(resultExprs, AnalyticExpr.class)) {
if (fromClause_.isEmpty()) {
throw new AnalysisException("Analytic expressions require FROM clause.");
}
// do this here, not after analyzeAggregation(), otherwise the AnalyticExprs
// will get substituted away
if (selectList.isDistinct()) {
throw new AnalysisException(
"cannot combine SELECT DISTINCT with analytic functions");
}
}
if (whereClause != null) {
whereClauseRewrite();
if (checkGroupingFn(whereClause)) {
throw new AnalysisException("grouping operations are not allowed in WHERE.");
}
whereClause.analyze(analyzer);
if (whereClause.containsAggregate()) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_INVALID_GROUP_FUNC_USE);
}
whereClause.checkReturnsBool("WHERE clause", false);
Expr e = whereClause.findFirstOf(AnalyticExpr.class);
if (e != null) {
throw new AnalysisException(
"WHERE clause must not contain analytic expressions: " + e.toSql());
}
analyzer.registerConjuncts(whereClause, false, getTableRefIds());
}
createSortInfo(analyzer);
if (sortInfo != null && CollectionUtils.isNotEmpty(sortInfo.getOrderingExprs())) {
if (groupingInfo != null) {
groupingInfo.substituteGroupingFn(sortInfo.getOrderingExprs(), analyzer);
}
}
analyzeAggregation(analyzer);
createAnalyticInfo(analyzer);
if (evaluateOrderBy) {
createSortTupleInfo(analyzer);
}
if (needToSql) {
sqlString_ = toSql();
}
reorderTable(analyzer);
resolveInlineViewRefs(analyzer);
if (analyzer.hasEmptySpjResultSet() && aggInfo == null) {
analyzer.setHasEmptyResultSet();
}
if (aggInfo != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("post-analysis " + aggInfo.debugString());
}
}
}
public List<TupleId> getTableRefIds() {
List<TupleId> result = Lists.newArrayList();
for (TableRef ref : fromClause_) {
result.add(ref.getId());
}
return result;
}
@Override
public List<TupleId> collectTupleIds() {
List<TupleId> result = Lists.newArrayList();
resultExprs.stream().forEach(expr -> expr.getIds(result, null));
result.addAll(getTableRefIds());
if (whereClause != null) {
whereClause.getIds(result, null);
}
if (havingClauseAfterAnaylzed != null) {
havingClauseAfterAnaylzed.getIds(result, null);
}
return result;
}
private void whereClauseRewrite() {
Expr deDuplicatedWhere = deduplicateOrs(whereClause);
if (deDuplicatedWhere != null) {
whereClause = deDuplicatedWhere;
}
if (whereClause instanceof IntLiteral) {
if (((IntLiteral) whereClause).getLongValue() == 0) {
whereClause = new BoolLiteral(false);
} else {
whereClause = new BoolLiteral(true);
}
}
}
/**
* this function only process (a and b and c) or (d and e and f) like clause,
* this function will extract this to [[a, b, c], [d, e, f]]
*/
private List<List<Expr>> extractDuplicateOrs(CompoundPredicate expr) {
List<List<Expr>> orExprs = new ArrayList<>();
for (Expr child : expr.getChildren()) {
if (child instanceof CompoundPredicate) {
CompoundPredicate childCp = (CompoundPredicate) child;
if (childCp.getOp() == CompoundPredicate.Operator.OR) {
orExprs.addAll(extractDuplicateOrs(childCp));
continue;
} else if (childCp.getOp() == CompoundPredicate.Operator.AND) {
orExprs.add(flatAndExpr(child));
continue;
}
}
orExprs.add(Arrays.asList(child));
}
return orExprs;
}
/**
* This function attempts to apply the inverse OR distributive law:
* ((A AND B) OR (A AND C)) => (A AND (B OR C))
* That is, locate OR clauses in which every subclause contains an
* identical term, and pull out the duplicated terms.
*/
private Expr deduplicateOrs(Expr expr) {
if (expr instanceof CompoundPredicate && ((CompoundPredicate) expr).getOp() == CompoundPredicate.Operator.OR) {
Expr rewritedExpr = processDuplicateOrs(extractDuplicateOrs((CompoundPredicate) expr));
if (rewritedExpr != null) {
return rewritedExpr;
}
} else {
for (int i = 0; i < expr.getChildren().size(); i++) {
Expr rewritedExpr = deduplicateOrs(expr.getChild(i));
if (rewritedExpr != null) {
expr.setChild(i, rewritedExpr);
}
}
}
return expr;
}
/**
* try to flat and , a and b and c => [a, b, c]
*/
private List<Expr> flatAndExpr(Expr expr) {
List<Expr> andExprs = new ArrayList<>();
if (expr instanceof CompoundPredicate && ((CompoundPredicate) expr).getOp() == CompoundPredicate.Operator.AND) {
andExprs.addAll(flatAndExpr(expr.getChild(0)));
andExprs.addAll(flatAndExpr(expr.getChild(1)));
} else {
andExprs.add(expr);
}
return andExprs;
}
/**
* the input is a list of list, the inner list is and connected exprs, the outer list is or connected
* for example clause (a and b and c) or (a and e and f) after extractDuplicateOrs will be [[a, b, c], [a, e, f]]
* this is the input of this function, first step is deduplicate [[a, b, c], [a, e, f]] => [[a], [b, c], [e, f]]
* then rebuild the expr to a and ((b and c) or (e and f))
*/
private Expr processDuplicateOrs(List<List<Expr>> exprs) {
if (exprs.size() < 2) {
return null;
}
// 1. remove duplicated elements [[a,a], [a, b], [a,b]] => [[a], [a,b]]
Set<Set<Expr>> set = new LinkedHashSet<>();
for (List<Expr> ex : exprs) {
Set<Expr> es = new LinkedHashSet<>();
es.addAll(ex);
set.add(es);
}
List<List<Expr>> clearExprs = new ArrayList<>();
for (Set<Expr> es : set) {
List<Expr> el = new ArrayList<>();
el.addAll(es);
clearExprs.add(el);
}
if (clearExprs.size() == 1) {
return makeCompound(clearExprs.get(0), CompoundPredicate.Operator.AND);
}
// 2. find duplcate cross the clause
List<Expr> cloneExprs = new ArrayList<>(clearExprs.get(0));
for (int i = 1; i < clearExprs.size(); ++i) {
cloneExprs.retainAll(clearExprs.get(i));
}
List<Expr> temp = new ArrayList<>();
if (CollectionUtils.isNotEmpty(cloneExprs)) {
temp.add(makeCompound(cloneExprs, CompoundPredicate.Operator.AND));
}
for (List<Expr> exprList : clearExprs) {
exprList.removeAll(cloneExprs);
temp.add(makeCompound(exprList, CompoundPredicate.Operator.AND));
}
// rebuild CompoundPredicate if found duplicate predicate will build (predcate) and (.. or ..) predicate in
// step 1: will build (.. or ..)
Expr result = CollectionUtils.isNotEmpty(cloneExprs) ? new CompoundPredicate(CompoundPredicate.Operator.AND,
temp.get(0), makeCompound(temp.subList(1, temp.size()), CompoundPredicate.Operator.OR))
: makeCompound(temp, CompoundPredicate.Operator.OR);
LOG.debug("rewrite ors: " + result.toSql());
return result;
}
/**
* Rebuild CompoundPredicate, [a, e, f] AND => a and e and f
*/
private Expr makeCompound(List<Expr> exprs, CompoundPredicate.Operator op) {
if (CollectionUtils.isEmpty(exprs)) {
return null;
}
if (exprs.size() == 1) {
return exprs.get(0);
}
CompoundPredicate result = new CompoundPredicate(op, exprs.get(0), exprs.get(1));
for (int i = 2; i < exprs.size(); ++i) {
result = new CompoundPredicate(op, result.clone(), exprs.get(i));
}
return result;
}
/**
* Generates and registers !empty() predicates to filter out empty collections directly
* in the parent scan of collection table refs. This is a performance optimization to
* avoid the expensive processing of empty collections inside a subplan that would
* yield an empty result set.
* <p>
* For correctness purposes, the predicates are generated in cases where we can ensure
* that they will be assigned only to the parent scan, and no other plan node.
* <p>
* The conditions are as follows:
* - collection table ref is relative and non-correlated
* - collection table ref represents the rhs of an inner/cross/semi join
* - collection table ref's parent tuple is not outer joined
* <p>
* TODO: In some cases, it is possible to generate !empty() predicates for a correlated
* table ref, but in general, that is not correct for non-trivial query blocks.
* For example, if the block with the correlated ref has an aggregation then adding a
* !empty() predicate would incorrectly discard rows from the final result set.
* TODO: Evaluating !empty() predicates at non-scan nodes interacts poorly with our BE
* projection of collection slots. For example, rows could incorrectly be filtered if
* a !empty() predicate is assigned to a plan node that comes after the unnest of the
* collection that also performs the projection.
*/
private void registerIsNotEmptyPredicates(Analyzer analyzer) throws AnalysisException {
/*
for (TableRef tblRef: fromClause_.getTableRefs()) {
Preconditions.checkState(tblRef.isResolved());
if (!(tblRef instanceof CollectionTableRef)) continue;
CollectionTableRef ref = (CollectionTableRef) tblRef;
// Skip non-relative and correlated refs.
if (!ref.isRelative() || ref.isCorrelated()) continue;
// Skip outer and anti joins.
if (ref.getJoinOp().isOuterJoin() || ref.getJoinOp().isAntiJoin()) continue;
// Do not generate a predicate if the parent tuple is outer joined.
if (analyzer.isOuterJoined(ref.getResolvedPath().getRootDesc().getId())) continue;
IsNotEmptyPredicate isNotEmptyPred =
new IsNotEmptyPredicate(ref.getCollectionExpr().clone());
isNotEmptyPred.analyze(analyzer);
// Register the predicate as an On-clause conjunct because it should only
// affect the result of this join and not the whole FROM clause.
analyzer.registerOnClauseConjuncts(
Lists.<Expr>newArrayList(isNotEmptyPred), ref);
}
*/
}
/**
* Marks all unassigned join predicates as well as exprs in aggInfo and sortInfo.
*/
public void materializeRequiredSlots(Analyzer analyzer) throws AnalysisException {
// Mark unassigned join predicates. Some predicates that must be evaluated by a join
// can also be safely evaluated below the join (picked up by getBoundPredicates()).
// Such predicates will be marked twice and that is ok.
List<Expr> unassigned =
analyzer.getUnassignedConjuncts(getTableRefIds(), true);
List<Expr> unassignedJoinConjuncts = Lists.newArrayList();
for (Expr e : unassigned) {
if (analyzer.evalAfterJoin(e)) {
unassignedJoinConjuncts.add(e);
}
}
List<Expr> baseTblJoinConjuncts =
Expr.trySubstituteList(unassignedJoinConjuncts, baseTblSmap, analyzer, false);
analyzer.materializeSlots(baseTblJoinConjuncts);
if (evaluateOrderBy) {
// mark ordering exprs before marking agg/analytic exprs because they could contain
// agg/analytic exprs that are not referenced anywhere but the ORDER BY clause
sortInfo.materializeRequiredSlots(analyzer, baseTblSmap);
}
if (hasAnalyticInfo()) {
// Mark analytic exprs before marking agg exprs because they could contain agg
// exprs that are not referenced anywhere but the analytic expr.
// Gather unassigned predicates and mark their slots. It is not desirable
// to account for propagated predicates because if an analytic expr is only
// referenced by a propagated predicate, then it's better to not materialize the
// analytic expr at all.
ArrayList<TupleId> tids = Lists.newArrayList();
getMaterializedTupleIds(tids); // includes the analytic tuple
List<Expr> conjuncts = analyzer.getUnassignedConjuncts(tids);
analyzer.materializeSlots(conjuncts);
analyticInfo.materializeRequiredSlots(analyzer, baseTblSmap);
}
if (aggInfo != null) {
// mark all agg exprs needed for HAVING pred and binding predicates as materialized
// before calling AggregateInfo.materializeRequiredSlots(), otherwise they won't
// show up in AggregateInfo.getMaterializedAggregateExprs()
ArrayList<Expr> havingConjuncts = Lists.newArrayList();
if (havingPred != null) {
havingConjuncts.add(havingPred);
}
// Binding predicates are assigned to the final output tuple of the aggregation,
// which is the tuple of the 2nd phase agg for distinct aggs.
// TODO(zc):
// ArrayList<Expr> bindingPredicates =
// analyzer.getBoundPredicates(aggInfo.getResultTupleId(), groupBySlots, false);
// havingConjuncts.addAll(bindingPredicates);
havingConjuncts.addAll(
analyzer.getUnassignedConjuncts(aggInfo.getResultTupleId().asList()));
materializeSlots(analyzer, havingConjuncts);
aggInfo.materializeRequiredSlots(analyzer, baseTblSmap);
}
}
protected void reorderTable(Analyzer analyzer) throws AnalysisException {
List<Pair<TableRef, Long>> candidates = Lists.newArrayList();
// New pair of table ref and row count
for (TableRef tblRef : fromClause_) {
if (tblRef.getJoinOp() != JoinOperator.INNER_JOIN || tblRef.hasJoinHints()) {
// Unsupported reorder outer join
return;
}
long rowCount = 0;
if (tblRef.getTable().getType() == TableType.OLAP) {
rowCount = ((OlapTable) (tblRef.getTable())).getRowCount();
LOG.debug("tableName={} rowCount={}", tblRef.getAlias(), rowCount);
}
candidates.add(new Pair(tblRef, rowCount));
}
// give InlineView row count
long last = 0;
for (int i = candidates.size() - 1; i >= 0; --i) {
Pair<TableRef, Long> candidate = candidates.get(i);
if (candidate.first instanceof InlineViewRef) {
candidate.second = last;
}
last = candidate.second + 1;
}
// order oldRefList by row count
Collections.sort(candidates, (a, b) -> b.second.compareTo(a.second));
for (Pair<TableRef, Long> candidate : candidates) {
if (reorderTable(analyzer, candidate.first)) {
// as long as one scheme success, we return this scheme immediately.
// in this scheme, candidate.first will be consider to be the big table in star schema.
// this scheme might not be fit for snowflake schema.
return;
}
}
// can not get AST only with equal join, MayBe cross join can help
fromClause_.clear();
for (Pair<TableRef, Long> candidate : candidates) {
fromClause_.add(candidate.first);
}
}
// reorder select table
protected boolean reorderTable(Analyzer analyzer, TableRef firstRef)
throws AnalysisException {
List<TableRef> tmpRefList = Lists.newArrayList();
Map<TupleId, TableRef> tableRefMap = Maps.newHashMap();
// set Map and push list
for (TableRef tblRef : fromClause_) {
tableRefMap.put(tblRef.getId(), tblRef);
tmpRefList.add(tblRef);
}
// clear tableRefList
fromClause_.clear();
// mark first table
fromClause_.add(firstRef);
tableRefMap.remove(firstRef.getId());
// reserve TupleId has been added successfully
Set<TupleId> validTupleId = Sets.newHashSet();
validTupleId.add(firstRef.getId());
// find table
int i = 0;
while (i < fromClause_.size()) {
TableRef tblRef = fromClause_.get(i);
// get all equal
List<Expr> eqJoinPredicates = analyzer.getEqJoinConjuncts(tblRef.getId());
List<TupleId> tuple_list = Lists.newArrayList();
Expr.getIds(eqJoinPredicates, tuple_list, null);
for (TupleId tid : tuple_list) {
if (validTupleId.contains(tid)) {
// tid has allreday in the list of validTupleId, ignore it
continue;
}
TableRef candidateTableRef = tableRefMap.get(tid);
if (candidateTableRef != null) {
// When sorting table according to the rows, you must ensure
// that all tables On-conjuncts referenced has been added or
// is being added.
Preconditions.checkState(tid == candidateTableRef.getId());
List<Expr> candidateEqJoinPredicates = analyzer.getEqJoinConjunctsExcludeAuxPredicates(tid);
List<TupleId> candidateTupleList = Lists.newArrayList();
Expr.getIds(candidateEqJoinPredicates, candidateTupleList, null);
int count = candidateTupleList.size();
for (TupleId tupleId : candidateTupleList) {
if (validTupleId.contains(tupleId) || tid == tupleId) {
count--;
}
}
if (count == 0) {
fromClause_.add(candidateTableRef);
validTupleId.add(tid);
tableRefMap.remove(tid);
}
}
}
i++;
}
// find path failed.
if (0 != tableRefMap.size()) {
fromClause_.clear();
fromClause_.addAll(tmpRefList);
return false;
}
return true;
}
/**
* Populates baseTblSmap_ with our combined inline view smap and creates
* baseTblResultExprs.
*/
protected void resolveInlineViewRefs(Analyzer analyzer) throws AnalysisException {
// Gather the inline view substitution maps from the enclosed inline views
for (TableRef tblRef : fromClause_) {
if (tblRef instanceof InlineViewRef) {
InlineViewRef inlineViewRef = (InlineViewRef) tblRef;
baseTblSmap = ExprSubstitutionMap.combine(baseTblSmap, inlineViewRef.getBaseTblSmap());
}
}
baseTblResultExprs = Expr.trySubstituteList(resultExprs, baseTblSmap, analyzer, false);
if (LOG.isDebugEnabled()) {
LOG.debug("baseTblSmap_: " + baseTblSmap.debugString());
LOG.debug("resultExprs: " + Expr.debugString(resultExprs));
LOG.debug("baseTblResultExprs: " + Expr.debugString(baseTblResultExprs));
}
}
/**
* Expand "*" select list item.
*/
private void expandStar(Analyzer analyzer) throws AnalysisException {
if (fromClause_.isEmpty()) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_NO_TABLES_USED);
}
// expand in From clause order
for (TableRef tableRef : fromClause_) {
if (analyzer.isSemiJoined(tableRef.getId())) {
continue;
}
expandStar(new TableName(null, tableRef.getAlias()), tableRef.getDesc());
}
}
/**
* Expand "<tbl>.*" select list item.
*/
private void expandStar(Analyzer analyzer, TableName tblName) throws AnalysisException {
Collection<TupleDescriptor> descs = analyzer.getDescriptor(tblName);
if (descs == null || descs.isEmpty()) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_BAD_TABLE_ERROR, tblName.getTbl());
}
for (TupleDescriptor desc : descs) {
expandStar(tblName, desc);
}
}
/**
* Expand "*" for a particular tuple descriptor by appending
* refs for each column to selectListExprs.
*/
private void expandStar(TableName tblName, TupleDescriptor desc) {
for (Column col : desc.getTable().getBaseSchema()) {
resultExprs.add(new SlotRef(tblName, col.getName()));
colLabels.add(col.getName());
}
}
/**
* Analyze aggregation-relevant components of the select block (Group By clause,
* select list, Order By clause),
* Create the AggregationInfo, including the agg output tuple, and transform all post-agg exprs
* given AggregationInfo's smap.
*/
private void analyzeAggregation(Analyzer analyzer) throws AnalysisException {
// check having clause
if (havingClause != null) {
Expr ambiguousAlias = getFirstAmbiguousAlias(havingClause);
if (ambiguousAlias != null) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_NON_UNIQ_ERROR, ambiguousAlias.toColumnLabel());
}
/*
* The having clause need to be substitute by aliasSMap.
* And it is analyzed after substitute.
* For example:
* Query: select k1 a, sum(k2) b from table group by k1 having a > 1;
* Having clause: a > 1
* aliasSMap: <a, table.k1> <b, sum(table.k2)>
* After substitute: a > 1 changed to table.k1 > 1
* Analyzer: check column and other subquery in having clause
* having predicate: table.k1 > 1
*/
/*
* TODO(ml): support substitute outer column in correlated subquery
* For example: select k1 key, sum(k1) sum_k1 from table a group by k1
* having k1 >
* (select min(k1) from table b where a.key=b.k2);
* TODO: the a.key should be replaced by a.k1 instead of unknown column 'key' in 'a'
*/
havingClauseAfterAnaylzed = havingClause.substitute(aliasSMap, analyzer, false);
havingClauseAfterAnaylzed = rewriteCountDistinctForBitmapOrHLL(havingClauseAfterAnaylzed, analyzer);
havingClauseAfterAnaylzed.checkReturnsBool("HAVING clause", true);
// can't contain analytic exprs
Expr analyticExpr = havingClauseAfterAnaylzed.findFirstOf(AnalyticExpr.class);
if (analyticExpr != null) {
throw new AnalysisException(
"HAVING clause must not contain analytic expressions: "
+ analyticExpr.toSql());
}
}
if (groupByClause == null && !selectList.isDistinct()
&& !TreeNode.contains(resultExprs, Expr.isAggregatePredicate())
&& (havingClauseAfterAnaylzed == null || !havingClauseAfterAnaylzed.contains(Expr.isAggregatePredicate()))
&& (sortInfo == null || !TreeNode.contains(sortInfo.getOrderingExprs(),
Expr.isAggregatePredicate()))) {
// We're not computing aggregates but we still need to register the HAVING
// clause which could, e.g., contain a constant expression evaluating to false.
if (havingClauseAfterAnaylzed != null) {
if (havingClauseAfterAnaylzed.contains(Subquery.class)) {
throw new AnalysisException("Only constant expr could be supported in having clause "
+ "when no aggregation in stmt");
}
analyzer.registerConjuncts(havingClauseAfterAnaylzed, true);
}
return;
}
// If we're computing an aggregate, we must have a FROM clause.
if (fromClause_.size() == 0) {
throw new AnalysisException("Aggregation without a FROM clause is not allowed");
}
if (selectList.isDistinct() && groupByClause == null) {
List<Expr> aggregateExpr = Lists.newArrayList();
TreeNode.collect(resultExprs, Expr.isAggregatePredicate(), aggregateExpr);
if (aggregateExpr.size() == resultExprs.size()) {
selectList.setIsDistinct(false);
}
}
if (selectList.isDistinct()
&& (groupByClause != null
|| TreeNode.contains(resultExprs, Expr.isAggregatePredicate())
|| (havingClauseAfterAnaylzed != null && havingClauseAfterAnaylzed.contains(Expr.isAggregatePredicate())))) {
throw new AnalysisException("cannot combine SELECT DISTINCT with aggregate functions or GROUP BY");
}
// disallow '*' and explicit GROUP BY (we can't group by '*', and if you need to
// name all star-expanded cols in the group by clause you might as well do it
// in the select list)
if (groupByClause != null ||
TreeNode.contains(resultExprs, Expr.isAggregatePredicate())) {
for (SelectListItem item : selectList.getItems()) {
if (item.isStar()) {
throw new AnalysisException(
"cannot combine '*' in select list with GROUP BY: " + item.toSql());
}
}
}
// Collect the aggregate expressions from the SELECT, HAVING and ORDER BY clauses
// of this statement.
ArrayList<FunctionCallExpr> aggExprs = Lists.newArrayList();
TreeNode.collect(resultExprs, Expr.isAggregatePredicate(), aggExprs);
if (havingClauseAfterAnaylzed != null) {
havingClauseAfterAnaylzed.collect(Expr.isAggregatePredicate(), aggExprs);
}
if (sortInfo != null) {
// TODO: Avoid evaluating aggs in ignored order-bys
TreeNode.collect(sortInfo.getOrderingExprs(), Expr.isAggregatePredicate(), aggExprs);
}
// When DISTINCT aggregates are present, non-distinct (i.e. ALL) aggregates are
// evaluated in two phases (see AggregateInfo for more details). In particular,
// COUNT(c) in "SELECT COUNT(c), AGG(DISTINCT d) from R" is transformed to
// "SELECT SUM(cnt) FROM (SELECT COUNT(c) as cnt from R group by d ) S".
// Since a group-by expression is added to the inner query it returns no rows if
// R is empty, in which case the SUM of COUNTs will return NULL.
// However the original COUNT(c) should have returned 0 instead of NULL in this case.
// Therefore, COUNT([ALL]) is transformed into zeroifnull(COUNT([ALL]) if
// i) There is no GROUP-BY clause, and
// ii) Other DISTINCT aggregates are present.
ExprSubstitutionMap countAllMap = createCountAllMap(aggExprs, analyzer);
final ExprSubstitutionMap multiCountOrSumDistinctMap =
createSumOrCountMultiDistinctSMap(aggExprs, analyzer);
countAllMap = ExprSubstitutionMap.compose(multiCountOrSumDistinctMap, countAllMap, analyzer);
List<Expr> substitutedAggs =
Expr.substituteList(aggExprs, countAllMap, analyzer, false);
aggExprs.clear();
TreeNode.collect(substitutedAggs, Expr.isAggregatePredicate(), aggExprs);
List<TupleId> groupingByTupleIds = new ArrayList<>();
if (groupByClause != null) {
// must do it before copying for createAggInfo()
if (groupingInfo != null) {
groupingByTupleIds.add(groupingInfo.getVirtualTuple().getId());
}
groupByClause.genGroupingExprs();
if (groupingInfo != null) {
GroupByClause.GroupingType groupingType = groupByClause.getGroupingType();
if ((groupingType == GroupByClause.GroupingType.GROUPING_SETS && CollectionUtils
.isNotEmpty(groupByClause.getGroupingSetList()))
|| groupingType == GroupByClause.GroupingType.CUBE
|| groupingType == GroupByClause.GroupingType.ROLLUP) {
}
groupingInfo.buildRepeat(groupByClause.getGroupingExprs(), groupByClause.getGroupingSetList());
}
substituteOrdinalsAliases(groupByClause.getGroupingExprs(), "GROUP BY", analyzer);
groupByClause.analyze(analyzer);
createAggInfo(groupByClause.getGroupingExprs(), aggExprs, analyzer);
} else {
createAggInfo( new ArrayList<>(), aggExprs, analyzer);
}
// combine avg smap with the one that produces the final agg output
AggregateInfo finalAggInfo =
aggInfo.getSecondPhaseDistinctAggInfo() != null
? aggInfo.getSecondPhaseDistinctAggInfo()
: aggInfo;
groupingByTupleIds.add(finalAggInfo.getOutputTupleId());
ExprSubstitutionMap combinedSmap = ExprSubstitutionMap.compose(countAllMap, finalAggInfo.getOutputSmap(), analyzer);
// change select list, having and ordering exprs to point to agg output. We need
// to reanalyze the exprs at this point.
if (LOG.isDebugEnabled()) {
LOG.debug("combined smap: " + combinedSmap.debugString());
LOG.debug("desctbl: " + analyzer.getDescTbl().debugString());
LOG.debug("resultexprs: " + Expr.debugString(resultExprs));
}
if (havingClauseAfterAnaylzed != null) {
// forbidden correlated subquery in having clause
List<Subquery> subqueryInHaving = Lists.newArrayList();
havingClauseAfterAnaylzed.collect(Subquery.class, subqueryInHaving);
for (Subquery subquery : subqueryInHaving) {
if (subquery.isCorrelatedPredicate(getTableRefIds())) {
throw new AnalysisException("The correlated having clause is not supported");
}
}
}
/*
* All of columns of result and having clause are replaced by new slot ref which is bound by top tuple of agg info.
* For example:
* ResultExprs: SlotRef(k1), FunctionCall(sum(SlotRef(k2)))
* Having predicate: FunctionCall(sum(SlotRef(k2))) > subquery
* CombinedSMap: <SlotRef(k1) tuple 0, SlotRef(k1) of tuple 3>,
* <FunctionCall(SlotRef(k2)) tuple 0, SlotRef(sum(k2)) of tuple 3>
*
* After rewritten:
* ResultExprs: SlotRef(k1) of tuple 3, SlotRef(sum(k2)) of tuple 3
* Having predicate: SlotRef(sum(k2)) of tuple 3 > subquery
*/
resultExprs = Expr.substituteList(resultExprs, combinedSmap, analyzer, false);
if (LOG.isDebugEnabled()) {
LOG.debug("post-agg selectListExprs: " + Expr.debugString(resultExprs));
}
if (havingClauseAfterAnaylzed != null) {
havingPred = havingClauseAfterAnaylzed.substitute(combinedSmap, analyzer, false);
analyzer.registerConjuncts(havingPred, true, finalAggInfo.getOutputTupleId().asList());
if (LOG.isDebugEnabled()) {
LOG.debug("post-agg havingPred: " + havingPred.debugString());
}
}
if (sortInfo != null) {
sortInfo.substituteOrderingExprs(combinedSmap, analyzer);
if (LOG.isDebugEnabled()) {
LOG.debug("post-agg orderingExprs: " +
Expr.debugString(sortInfo.getOrderingExprs()));
}
}
// check that all post-agg exprs point to agg output
for (int i = 0; i < selectList.getItems().size(); ++i) {
if (!resultExprs.get(i).isBoundByTupleIds(groupingByTupleIds)) {
throw new AnalysisException(
"select list expression not produced by aggregation output " + "(missing from " +
"GROUP BY clause?): " + selectList.getItems().get(i).getExpr().toSql());
}
}
if (orderByElements != null) {
for (int i = 0; i < orderByElements.size(); ++i) {
if (!sortInfo.getOrderingExprs().get(i).isBoundByTupleIds(groupingByTupleIds)) {
throw new AnalysisException(
"ORDER BY expression not produced by aggregation output " + "(missing from " +
"GROUP BY clause?): " + orderByElements.get(i).getExpr().toSql());
}
if (sortInfo.getOrderingExprs().get(i).type.isHllType()) {
throw new AnalysisException("ORDER BY expression could not contain hll column.");
}
if (sortInfo.getOrderingExprs().get(i).type.isBitmapType()) {
throw new AnalysisException("ORDER BY expression could not contain bitmap column.");
}
}
}
if (havingPred != null) {
if (!havingPred.isBoundByTupleIds(groupingByTupleIds)) {
throw new AnalysisException(
"HAVING clause not produced by aggregation output " + "(missing from GROUP BY " +
"clause?): " + havingClause.toSql());
}
}
}
/**
* Build smap count_distinct->multi_count_distinct sum_distinct->multi_count_distinct
* assumes that select list and having clause have been analyzed.
*/
private ExprSubstitutionMap createSumOrCountMultiDistinctSMap(
ArrayList<FunctionCallExpr> aggExprs, Analyzer analyzer) throws AnalysisException {
final List<FunctionCallExpr> distinctExprs = Lists.newArrayList();
for (FunctionCallExpr aggExpr : aggExprs) {
if (aggExpr.isDistinct()) {
distinctExprs.add(aggExpr);
}
}
final ExprSubstitutionMap result = new ExprSubstitutionMap();
final boolean hasMultiDistinct = AggregateInfo.estimateIfContainsMultiDistinct(distinctExprs);
if (!hasMultiDistinct) {
return result;
}
for (FunctionCallExpr inputExpr : distinctExprs) {
Expr replaceExpr = null;
final String functionName = inputExpr.getFnName().getFunction();
if (functionName.equalsIgnoreCase("COUNT")) {
final List<Expr> countInputExpr = Lists.newArrayList(inputExpr.getChild(0).clone(null));
replaceExpr = new FunctionCallExpr("MULTI_DISTINCT_COUNT",
new FunctionParams(inputExpr.isDistinct(), countInputExpr));
} else if (functionName.equalsIgnoreCase("SUM")) {
final List<Expr> sumInputExprs = Lists.newArrayList(inputExpr.getChild(0).clone(null));
replaceExpr = new FunctionCallExpr("MULTI_DISTINCT_SUM",
new FunctionParams(inputExpr.isDistinct(), sumInputExprs));
} else if (functionName.equalsIgnoreCase("AVG")) {
final List<Expr> sumInputExprs = Lists.newArrayList(inputExpr.getChild(0).clone(null));
final List<Expr> countInputExpr = Lists.newArrayList(inputExpr.getChild(0).clone(null));
final FunctionCallExpr sumExpr = new FunctionCallExpr("MULTI_DISTINCT_SUM",
new FunctionParams(inputExpr.isDistinct(), sumInputExprs));
final FunctionCallExpr countExpr = new FunctionCallExpr("MULTI_DISTINCT_COUNT",
new FunctionParams(inputExpr.isDistinct(), countInputExpr));
replaceExpr = new ArithmeticExpr(ArithmeticExpr.Operator.DIVIDE, sumExpr, countExpr);
} else {
throw new AnalysisException(inputExpr.getFnName() + " can't support multi distinct.");
}
replaceExpr.analyze(analyzer);
result.put(inputExpr, replaceExpr);
}
if (LOG.isDebugEnabled()) {
LOG.debug("multi distinct smap: {}", result.debugString());
}
return result;
}
/**
* Create a map from COUNT([ALL]) -> zeroifnull(COUNT([ALL])) if
* i) There is no GROUP-BY, and
* ii) There are other distinct aggregates to be evaluated.
* This transformation is necessary for COUNT to correctly return 0 for empty
* input relations.
*/
private ExprSubstitutionMap createCountAllMap(
List<FunctionCallExpr> aggExprs, Analyzer analyzer)
throws AnalysisException {
ExprSubstitutionMap scalarCountAllMap = new ExprSubstitutionMap();
if (groupByClause != null && !groupByClause.isEmpty()) {
// There are grouping expressions, so no substitution needs to be done.
return scalarCountAllMap;
}
com.google.common.base.Predicate<FunctionCallExpr> isNotDistinctPred =
new com.google.common.base.Predicate<FunctionCallExpr>() {
public boolean apply(FunctionCallExpr expr) {
return !expr.isDistinct();
}
};
if (Iterables.all(aggExprs, isNotDistinctPred)) {
// Only [ALL] aggs, so no substitution needs to be done.
return scalarCountAllMap;
}
com.google.common.base.Predicate<FunctionCallExpr> isCountPred =
new com.google.common.base.Predicate<FunctionCallExpr>() {
public boolean apply(FunctionCallExpr expr) {
return expr.getFnName().getFunction().equals("count");
}
};
Iterable<FunctionCallExpr> countAllAggs =
Iterables.filter(aggExprs, Predicates.and(isCountPred, isNotDistinctPred));
for (FunctionCallExpr countAllAgg : countAllAggs) {
// TODO(zc)
// Replace COUNT(ALL) with zeroifnull(COUNT(ALL))
ArrayList<Expr> zeroIfNullParam = Lists.newArrayList(countAllAgg.clone(), new IntLiteral(0, Type.BIGINT));
FunctionCallExpr zeroIfNull =
new FunctionCallExpr("ifnull", zeroIfNullParam);
zeroIfNull.analyze(analyzer);
scalarCountAllMap.put(countAllAgg, zeroIfNull);
}
return scalarCountAllMap;
}
/**
* Create aggInfo for the given grouping and agg exprs.
*/
private void createAggInfo(
ArrayList<Expr> groupingExprs,
ArrayList<FunctionCallExpr> aggExprs,
Analyzer analyzer)
throws AnalysisException {
if (selectList.isDistinct()) {
// Create aggInfo for SELECT DISTINCT ... stmt:
// - all select list items turn into grouping exprs
// - there are no aggregate exprs
Preconditions.checkState(groupingExprs.isEmpty());
Preconditions.checkState(aggExprs.isEmpty());
aggInfo = AggregateInfo.create(Expr.cloneList(resultExprs), null, null, analyzer);
} else {
aggInfo = AggregateInfo.create(groupingExprs, aggExprs, null, analyzer);
}
}
/**
* If the select list contains AnalyticExprs, create AnalyticInfo and substitute
* AnalyticExprs using the AnalyticInfo's smap.
*/
private void createAnalyticInfo(Analyzer analyzer) throws AnalysisException {
// collect AnalyticExprs from the SELECT and ORDER BY clauses
ArrayList<Expr> analyticExprs = Lists.newArrayList();
TreeNode.collect(resultExprs, AnalyticExpr.class, analyticExprs);
if (sortInfo != null) {
TreeNode.collect(sortInfo.getOrderingExprs(), AnalyticExpr.class, analyticExprs);
}
if (analyticExprs.isEmpty()) {
return;
}
ExprSubstitutionMap rewriteSmap = new ExprSubstitutionMap();
for (Expr expr : analyticExprs) {
AnalyticExpr toRewrite = (AnalyticExpr) expr;
Expr newExpr = AnalyticExpr.rewrite(toRewrite);
if (newExpr != null) {
newExpr.analyze(analyzer);
if (!rewriteSmap.containsMappingFor(toRewrite)) {
rewriteSmap.put(toRewrite, newExpr);
}
}
}
if (rewriteSmap.size() > 0) {
// Substitute the exprs with their rewritten versions.
ArrayList<Expr> updatedAnalyticExprs =
Expr.substituteList(analyticExprs, rewriteSmap, analyzer, false);
// This is to get rid the original exprs which have been rewritten.
analyticExprs.clear();
// Collect the new exprs introduced through the rewrite and the non-rewrite exprs.
TreeNode.collect(updatedAnalyticExprs, AnalyticExpr.class, analyticExprs);
}
analyticInfo = AnalyticInfo.create(analyticExprs, analyzer);
ExprSubstitutionMap smap = analyticInfo.getSmap();
// If 'exprRewritten' is true, we have to compose the new smap with the existing one.
if (rewriteSmap.size() > 0) {
smap = ExprSubstitutionMap.compose(
rewriteSmap, analyticInfo.getSmap(), analyzer);
}
// change select list and ordering exprs to point to analytic output. We need
// to reanalyze the exprs at this point.
resultExprs = Expr.substituteList(resultExprs, smap, analyzer, false);
if (LOG.isDebugEnabled()) {
LOG.debug("post-analytic selectListExprs: " + Expr.debugString(resultExprs));
}
if (sortInfo != null) {
sortInfo.substituteOrderingExprs(smap, analyzer);
if (LOG.isDebugEnabled()) {
LOG.debug("post-analytic orderingExprs: " +
Expr.debugString(sortInfo.getOrderingExprs()));
}
}
}
@Override
public void rewriteExprs(ExprRewriter rewriter) throws AnalysisException {
Preconditions.checkState(isAnalyzed());
rewriteSelectList(rewriter);
for (TableRef ref : fromClause_) {
ref.rewriteExprs(rewriter, analyzer);
}
// Also rewrite exprs in the statements of subqueries.
List<Subquery> subqueryExprs = Lists.newArrayList();
if (whereClause != null) {
whereClause = rewriter.rewrite(whereClause, analyzer);
whereClause.collect(Subquery.class, subqueryExprs);
}
if (havingClause != null) {
havingClause = rewriter.rewrite(havingClause, analyzer);
havingClauseAfterAnaylzed.collect(Subquery.class, subqueryExprs);
}
for (Subquery subquery : subqueryExprs) {
subquery.getStatement().rewriteExprs(rewriter);
}
if (groupByClause != null) {
ArrayList<Expr> groupingExprs = groupByClause.getGroupingExprs();
if (groupingExprs != null) {
rewriter.rewriteList(groupingExprs, analyzer);
}
}
if (orderByElements != null) {
for (OrderByElement orderByElem : orderByElements) {
orderByElem.setExpr(rewriter.rewrite(orderByElem.getExpr(), analyzer));
}
}
}
private void rewriteSelectList(ExprRewriter rewriter) throws AnalysisException {
for (SelectListItem item : selectList.getItems()) {
if (item.getExpr() instanceof CaseExpr && item.getExpr().contains(Predicates.instanceOf(Subquery.class))) {
rewriteSubquery(item.getExpr(), analyzer);
}
}
selectList.rewriteExprs(rewriter, analyzer);
}
/** rewrite subquery in case when to an inline view
* subquery in case when statement like
*
* SELECT CASE
* WHEN (
* SELECT COUNT(*) / 2
* FROM t
* ) > k4 THEN (
* SELECT AVG(k4)
* FROM t
* )
* ELSE (
* SELECT SUM(k4)
* FROM t
* )
* END AS kk4
* FROM t;
* this statement will be rewrite to
*
* SELECT CASE
* WHEN t1.a > k4 THEN t2.a
* ELSE t3.a
* END AS kk4
* FROM t, (
* SELECT COUNT(*) / 2 AS a
* FROM t
* ) t1, (
* SELECT AVG(k4) AS a
* FROM t
* ) t2, (
* SELECT SUM(k4) AS a
* FROM t
* ) t3;
*/
private Expr rewriteSubquery(Expr expr, Analyzer analyzer)
throws AnalysisException {
if (expr instanceof Subquery) {
if (!(((Subquery) expr).getStatement() instanceof SelectStmt)) {
throw new AnalysisException("Only support select subquery in case-when clause.");
}
if (expr.isCorrelatedPredicate(getTableRefIds())) {
throw new AnalysisException("The correlated subquery in case-when clause is not supported");
}
SelectStmt subquery = (SelectStmt) ((Subquery) expr).getStatement();
if (subquery.resultExprs.size() != 1 || !subquery.returnsSingleRow()) {
throw new AnalysisException("Subquery in case-when must return scala type");
}
subquery.reset();
subquery.setAssertNumRowsElement(1, AssertNumRowsElement.Assertion.EQ);
String alias = getTableAliasGenerator().getNextAlias();
String colAlias = getColumnAliasGenerator().getNextAlias();
InlineViewRef inlineViewRef = new InlineViewRef(alias, subquery, Arrays.asList(colAlias));
try {
inlineViewRef.analyze(analyzer);
} catch (UserException e) {
throw new AnalysisException(e.getMessage());
}
fromClause_.add(inlineViewRef);
expr = new SlotRef(inlineViewRef.getAliasAsName(), colAlias);
} else if (CollectionUtils.isNotEmpty(expr.getChildren())) {
for (int i = 0; i < expr.getChildren().size(); ++i) {
expr.setChild(i, rewriteSubquery(expr.getChild(i), analyzer));
}
}
return expr;
}
@Override
public String toSql() {
if (sqlString_ != null) {
return sqlString_;
}
StringBuilder strBuilder = new StringBuilder();
if (withClause_ != null) {
strBuilder.append(withClause_.toSql());
strBuilder.append(" ");
}
// Select list
strBuilder.append("SELECT ");
if (selectList.isDistinct()) {
strBuilder.append("DISTINCT ");
}
for (int i = 0; i < resultExprs.size(); ++i) {
// strBuilder.append(selectList.getItems().get(i).toSql());
// strBuilder.append((i + 1 != selectList.getItems().size()) ? ", " : "");
if (i != 0) {
strBuilder.append(", ");
}
if (needToSql) {
strBuilder.append(originalExpr.get(i).toSql());
} else {
strBuilder.append(resultExprs.get(i).toSql());
}
strBuilder.append(" AS ").append(SqlUtils.getIdentSql(colLabels.get(i)));
}
// From clause
if (!fromClause_.isEmpty()) {
strBuilder.append(fromClause_.toSql());
}
// Where clause
if (whereClause != null) {
strBuilder.append(" WHERE ");
strBuilder.append(whereClause.toSql());
}
// Group By clause
if (groupByClause != null) {
strBuilder.append(" GROUP BY ");
strBuilder.append(groupByClause.toSql());
}
// Having clause
if (havingClause != null) {
strBuilder.append(" HAVING ");
strBuilder.append(havingClause.toSql());
}
// Order By clause
if (orderByElements != null) {
strBuilder.append(" ORDER BY ");
for (int i = 0; i < orderByElements.size(); ++i) {
strBuilder.append(orderByElements.get(i).getExpr().toSql());
if (sortInfo != null) {
strBuilder.append((sortInfo.getIsAscOrder().get(i)) ? " ASC" : " DESC");
}
strBuilder.append((i + 1 != orderByElements.size()) ? ", " : "");
}
}
// Limit clause.
if (hasLimitClause()) {
strBuilder.append(limitElement.toSql());
}
return strBuilder.toString();
}
@Override
public void getMaterializedTupleIds(ArrayList<TupleId> tupleIdList) {
// If select statement has an aggregate, then the aggregate tuple id is materialized.
// Otherwise, all referenced tables are materialized.
if (evaluateOrderBy) {
tupleIdList.add(sortInfo.getSortTupleDescriptor().getId());
} else if (aggInfo != null) {
// Return the tuple id produced in the final aggregation step.
if (aggInfo.isDistinctAgg()) {
tupleIdList.add(aggInfo.getSecondPhaseDistinctAggInfo().getOutputTupleId());
} else {
tupleIdList.add(aggInfo.getOutputTupleId());
}
} else {
for (TableRef tblRef : fromClause_) {
tupleIdList.addAll(tblRef.getMaterializedTupleIds());
}
}
// We materialize the agg tuple or the table refs together with the analytic tuple.
if (hasAnalyticInfo() && isEvaluateOrderBy()) {
tupleIdList.add(analyticInfo.getOutputTupleId());
}
}
@Override
public void substituteSelectList(Analyzer analyzer, List<String> newColLabels)
throws AnalysisException, UserException {
// analyze with clause
if (hasWithClause()) {
withClause_.analyze(analyzer);
}
// start out with table refs to establish aliases
TableRef leftTblRef = null; // the one to the left of tblRef
for (int i = 0; i < fromClause_.size(); ++i) {
// Resolve and replace non-InlineViewRef table refs with a BaseTableRef or ViewRef.
TableRef tblRef = fromClause_.get(i);
tblRef = analyzer.resolveTableRef(tblRef);
Preconditions.checkNotNull(tblRef);
fromClause_.set(i, tblRef);
tblRef.setLeftTblRef(leftTblRef);
tblRef.analyze(analyzer);
leftTblRef = tblRef;
}
// populate selectListExprs, aliasSMap, and colNames
for (SelectListItem item : selectList.getItems()) {
if (item.isStar()) {
TableName tblName = item.getTblName();
if (tblName == null) {
expandStar(analyzer);
} else {
expandStar(analyzer, tblName);
}
} else {
// to make sure the sortinfo's AnalyticExpr and resultExprs's AnalyticExpr analytic once
if (item.getExpr() instanceof AnalyticExpr) {
item.getExpr().analyze(analyzer);
}
if (item.getAlias() != null) {
SlotRef aliasRef = new SlotRef(null, item.getAlias());
SlotRef newAliasRef = new SlotRef(null, newColLabels.get(resultExprs.size()));
newAliasRef.analysisDone();
aliasSMap.put(aliasRef, newAliasRef);
}
resultExprs.add(item.getExpr());
}
}
// substitute group by
if (groupByClause != null) {
substituteOrdinalsAliases(groupByClause.getGroupingExprs(), "GROUP BY", analyzer);
}
// substitute having
if (havingClause != null) {
havingClause = havingClause.clone(aliasSMap);
}
// substitute order by
if (orderByElements != null) {
for (int i = 0; i < orderByElements.size(); ++i) {
orderByElements = OrderByElement.substitute(orderByElements, aliasSMap, analyzer);
}
}
colLabels.clear();
colLabels.addAll(newColLabels);
}
public boolean hasWhereClause() {
return whereClause != null;
}
public boolean hasAggInfo() {
return aggInfo != null;
}
public boolean hasGroupByClause() {
return groupByClause != null;
}
/**
* Check if the stmt returns a single row. This can happen
* in the following cases:
* 1. select stmt with a 'limit 1' clause
* 2. select stmt with an aggregate function and no group by.
* 3. select stmt with no from clause.
* <p>
* This function may produce false negatives because the cardinality of the
* result set also depends on the data a stmt is processing.
*/
public boolean returnsSingleRow() {
// limit 1 clause
if (hasLimitClause() && getLimit() == 1) {
return true;
}
// No from clause (base tables or inline views)
if (fromClause_.isEmpty()) {
return true;
}
// Aggregation with no group by and no DISTINCT
if (hasAggInfo() && !hasGroupByClause() && !selectList.isDistinct()) {
return true;
}
// In all other cases, return false.
return false;
}
@Override
public void collectTableRefs(List<TableRef> tblRefs) {
for (TableRef tblRef : fromClause_) {
if (tblRef instanceof InlineViewRef) {
InlineViewRef inlineViewRef = (InlineViewRef) tblRef;
inlineViewRef.getViewStmt().collectTableRefs(tblRefs);
} else {
tblRefs.add(tblRef);
}
}
}
private boolean checkGroupingFn(Expr expr) {
if (expr instanceof GroupingFunctionCallExpr) {
return true;
} else if (expr.getChildren() != null && expr.getChildren().size() > 0) {
for (Expr child : expr.getChildren()) {
if (checkGroupingFn(child)) {
return true;
}
}
}
return false;
}
@Override
public int hashCode() {
return id.hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (!(obj instanceof SelectStmt)) {
return false;
}
return this.id.equals(((SelectStmt) obj).id);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.