repo_name stringlengths 5 108 | path stringlengths 6 333 | size stringlengths 1 6 | content stringlengths 4 977k | license stringclasses 15
values |
|---|---|---|---|---|
WilliamNouet/nifi | nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/PutDistributedMapCache.java | 11671 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.processors.standard;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.lang3.StringUtils;
import org.apache.nifi.annotation.behavior.EventDriven;
import org.apache.nifi.annotation.behavior.InputRequirement;
import org.apache.nifi.annotation.behavior.InputRequirement.Requirement;
import org.apache.nifi.annotation.behavior.SupportsBatching;
import org.apache.nifi.annotation.behavior.WritesAttribute;
import org.apache.nifi.annotation.documentation.CapabilityDescription;
import org.apache.nifi.annotation.documentation.SeeAlso;
import org.apache.nifi.annotation.documentation.Tags;
import org.apache.nifi.components.AllowableValue;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.distributed.cache.client.Deserializer;
import org.apache.nifi.distributed.cache.client.DistributedMapCacheClient;
import org.apache.nifi.distributed.cache.client.Serializer;
import org.apache.nifi.distributed.cache.client.exception.DeserializationException;
import org.apache.nifi.distributed.cache.client.exception.SerializationException;
import org.apache.nifi.expression.AttributeExpression.ResultType;
import org.apache.nifi.flowfile.FlowFile;
import org.apache.nifi.logging.ComponentLog;
import org.apache.nifi.processor.AbstractProcessor;
import org.apache.nifi.processor.DataUnit;
import org.apache.nifi.processor.ProcessContext;
import org.apache.nifi.processor.ProcessSession;
import org.apache.nifi.processor.Relationship;
import org.apache.nifi.processor.exception.ProcessException;
import org.apache.nifi.processor.util.StandardValidators;
@EventDriven
@SupportsBatching
@Tags({"map", "cache", "put", "distributed"})
@InputRequirement(Requirement.INPUT_REQUIRED)
@CapabilityDescription("Gets the content of a FlowFile and puts it to a distributed map cache, using a cache key " +
"computed from FlowFile attributes. If the cache already contains the entry and the cache update strategy is " +
"'keep original' the entry is not replaced.'")
@WritesAttribute(attribute = "cached", description = "All FlowFiles will have an attribute 'cached'. The value of this " +
"attribute is true, is the FlowFile is cached, otherwise false.")
@SeeAlso(classNames = {"org.apache.nifi.distributed.cache.client.DistributedMapCacheClientService", "org.apache.nifi.distributed.cache.server.map.DistributedMapCacheServer",
"org.apache.nifi.processors.standard.FetchDistributedMapCache"})
public class PutDistributedMapCache extends AbstractProcessor {
public static final String CACHED_ATTRIBUTE_NAME = "cached";
// Identifies the distributed map cache client
public static final PropertyDescriptor DISTRIBUTED_CACHE_SERVICE = new PropertyDescriptor.Builder()
.name("Distributed Cache Service")
.description("The Controller Service that is used to cache flow files")
.required(true)
.identifiesControllerService(DistributedMapCacheClient.class)
.build();
// Selects the FlowFile attribute, whose value is used as cache key
public static final PropertyDescriptor CACHE_ENTRY_IDENTIFIER = new PropertyDescriptor.Builder()
.name("Cache Entry Identifier")
.description("A FlowFile attribute, or the results of an Attribute Expression Language statement, which will " +
"be evaluated against a FlowFile in order to determine the cache key")
.required(true)
.addValidator(StandardValidators.createAttributeExpressionLanguageValidator(ResultType.STRING, true))
.expressionLanguageSupported(true)
.build();
public static final AllowableValue CACHE_UPDATE_REPLACE = new AllowableValue("replace", "Replace if present",
"Adds the specified entry to the cache, replacing any value that is currently set.");
public static final AllowableValue CACHE_UPDATE_KEEP_ORIGINAL = new AllowableValue("keeporiginal", "Keep original",
"Adds the specified entry to the cache, if the key does not exist.");
public static final PropertyDescriptor CACHE_UPDATE_STRATEGY = new PropertyDescriptor.Builder()
.name("Cache update strategy")
.description("Determines how the cache is updated if the cache already contains the entry")
.required(true)
.allowableValues(CACHE_UPDATE_REPLACE, CACHE_UPDATE_KEEP_ORIGINAL)
.defaultValue(CACHE_UPDATE_REPLACE.getValue())
.build();
public static final PropertyDescriptor CACHE_ENTRY_MAX_BYTES = new PropertyDescriptor.Builder()
.name("Max cache entry size")
.description("The maximum amount of data to put into cache")
.required(false)
.addValidator(StandardValidators.DATA_SIZE_VALIDATOR)
.defaultValue("1 MB")
.expressionLanguageSupported(false)
.build();
public static final Relationship REL_SUCCESS = new Relationship.Builder()
.name("success")
.description("Any FlowFile that is successfully inserted into cache will be routed to this relationship")
.build();
public static final Relationship REL_FAILURE = new Relationship.Builder()
.name("failure")
.description("Any FlowFile that cannot be inserted into the cache will be routed to this relationship")
.build();
private final Set<Relationship> relationships;
private final Serializer<String> keySerializer = new StringSerializer();
private final Serializer<byte[]> valueSerializer = new CacheValueSerializer();
private final Deserializer<byte[]> valueDeserializer = new CacheValueDeserializer();
public PutDistributedMapCache() {
final Set<Relationship> rels = new HashSet<>();
rels.add(REL_SUCCESS);
rels.add(REL_FAILURE);
relationships = Collections.unmodifiableSet(rels);
}
@Override
protected List<PropertyDescriptor> getSupportedPropertyDescriptors() {
final List<PropertyDescriptor> descriptors = new ArrayList<>();
descriptors.add(CACHE_ENTRY_IDENTIFIER);
descriptors.add(DISTRIBUTED_CACHE_SERVICE);
descriptors.add(CACHE_UPDATE_STRATEGY);
descriptors.add(CACHE_ENTRY_MAX_BYTES);
return descriptors;
}
@Override
public Set<Relationship> getRelationships() {
return relationships;
}
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final ComponentLog logger = getLogger();
// cache key is computed from attribute 'CACHE_ENTRY_IDENTIFIER' with expression language support
final String cacheKey = context.getProperty(CACHE_ENTRY_IDENTIFIER).evaluateAttributeExpressions(flowFile).getValue();
// if the computed value is null, or empty, we transfer the flow file to failure relationship
if (StringUtils.isBlank(cacheKey)) {
logger.error("FlowFile {} has no attribute for given Cache Entry Identifier", new Object[] {flowFile});
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
return;
}
// the cache client used to interact with the distributed cache
final DistributedMapCacheClient cache = context.getProperty(DISTRIBUTED_CACHE_SERVICE).asControllerService(DistributedMapCacheClient.class);
try {
final long maxCacheEntrySize = context.getProperty(CACHE_ENTRY_MAX_BYTES).asDataSize(DataUnit.B).longValue();
long flowFileSize = flowFile.getSize();
// too big flow file
if (flowFileSize > maxCacheEntrySize) {
logger.warn("Flow file {} size {} exceeds the max cache entry size ({} B).", new Object[] {flowFile, flowFileSize, maxCacheEntrySize});
session.transfer(flowFile, REL_FAILURE);
return;
}
if (flowFileSize == 0) {
logger.warn("Flow file {} is empty, there is nothing to cache.", new Object[] {flowFile});
session.transfer(flowFile, REL_FAILURE);
return;
}
// get flow file content
final ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
session.exportTo(flowFile, byteStream);
byte[] cacheValue = byteStream.toByteArray();
final String updateStrategy = context.getProperty(CACHE_UPDATE_STRATEGY).getValue();
boolean cached = false;
if (updateStrategy.equals(CACHE_UPDATE_REPLACE.getValue())) {
cache.put(cacheKey, cacheValue, keySerializer, valueSerializer);
cached = true;
} else if (updateStrategy.equals(CACHE_UPDATE_KEEP_ORIGINAL.getValue())) {
final byte[] oldValue = cache.getAndPutIfAbsent(cacheKey, cacheValue, keySerializer, valueSerializer, valueDeserializer);
if (oldValue == null) {
cached = true;
}
}
// set 'cached' attribute
flowFile = session.putAttribute(flowFile, CACHED_ATTRIBUTE_NAME, String.valueOf(cached));
if (cached) {
session.transfer(flowFile, REL_SUCCESS);
} else {
session.transfer(flowFile, REL_FAILURE);
}
} catch (final IOException e) {
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
logger.error("Unable to communicate with cache when processing {} due to {}", new Object[] {flowFile, e});
}
}
public static class CacheValueSerializer implements Serializer<byte[]> {
@Override
public void serialize(final byte[] bytes, final OutputStream out) throws SerializationException, IOException {
out.write(bytes);
}
}
public static class CacheValueDeserializer implements Deserializer<byte[]> {
@Override
public byte[] deserialize(final byte[] input) throws DeserializationException, IOException {
if (input == null || input.length == 0) {
return null;
}
return input;
}
}
/**
* Simple string serializer, used for serializing the cache key
*/
public static class StringSerializer implements Serializer<String> {
@Override
public void serialize(final String value, final OutputStream out) throws SerializationException, IOException {
out.write(value.getBytes(StandardCharsets.UTF_8));
}
}
}
| apache-2.0 |
gstevey/gradle | subprojects/core/src/main/java/org/gradle/configuration/project/ProjectConfigureAction.java | 1190 | /*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.configuration.project;
import org.gradle.api.Action;
import org.gradle.api.internal.project.ProjectInternal;
/**
* Can be implemented by plugins to auto-configure each project.
*
* <p>Implementations are discovered using the JAR service locator mechanism (see {@link org.gradle.internal.service.ServiceLocator}).
* Each action is invoked for each project that is to be configured, before the project has been configured. Actions are executed
* in an arbitrary order.
*/
public interface ProjectConfigureAction extends Action<ProjectInternal> {
}
| apache-2.0 |
kyroskoh/jmeter | src/core/org/apache/jmeter/save/ScriptWrapperConverter.java | 4773 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.jmeter.save;
import org.apache.jmeter.save.converters.ConversionHelp;
import org.apache.jmeter.util.JMeterUtils;
import org.apache.jorphan.collections.HashTree;
import com.thoughtworks.xstream.mapper.Mapper;
import com.thoughtworks.xstream.converters.ConversionException;
import com.thoughtworks.xstream.converters.Converter;
import com.thoughtworks.xstream.converters.MarshallingContext;
import com.thoughtworks.xstream.converters.UnmarshallingContext;
import com.thoughtworks.xstream.io.HierarchicalStreamReader;
import com.thoughtworks.xstream.io.HierarchicalStreamWriter;
/**
* Handles XStream conversion of Test Scripts
*
*/
public class ScriptWrapperConverter implements Converter {
private static final String ATT_PROPERTIES = "properties"; // $NON-NLS-1$
private static final String ATT_VERSION = "version"; // $NON-NLS-1$
private static final String ATT_JMETER = "jmeter"; // $NON-NLS-1$
/**
* Returns the converter version; used to check for possible
* incompatibilities
*
* @return the version of the converter
*/
public static String getVersion() {
return "$Revision$"; // $NON-NLS-1$
}
private final Mapper classMapper;
public ScriptWrapperConverter(Mapper classMapper) {
this.classMapper = classMapper;
}
/**
* {@inheritDoc}
*/
@Override
public boolean canConvert(@SuppressWarnings("rawtypes") Class arg0) { // superclass is not typed
return arg0.equals(ScriptWrapper.class);
}
/**
* {@inheritDoc}
*/
@Override
public void marshal(Object arg0, HierarchicalStreamWriter writer, MarshallingContext context) {
ScriptWrapper wrap = (ScriptWrapper) arg0;
String version = SaveService.getVERSION();
ConversionHelp.setOutVersion(version);// Ensure output follows version
writer.addAttribute(ATT_VERSION, version);
writer.addAttribute(ATT_PROPERTIES, SaveService.getPropertiesVersion());
writer.addAttribute(ATT_JMETER, JMeterUtils.getJMeterVersion());
writer.startNode(classMapper.serializedClass(wrap.testPlan.getClass()));
context.convertAnother(wrap.testPlan);
writer.endNode();
}
/**
* {@inheritDoc}
*/
@Override
public Object unmarshal(HierarchicalStreamReader reader, UnmarshallingContext context) {
ScriptWrapper wrap = new ScriptWrapper();
wrap.version = reader.getAttribute(ATT_VERSION);
ConversionHelp.setInVersion(wrap.version);// Make sure decoding
// follows input file
reader.moveDown();
// Catch errors and rethrow as ConversionException so we get location details
try {
wrap.testPlan = (HashTree) context.convertAnother(wrap, getNextType(reader));
} catch (NoClassDefFoundError | Exception e) {
throw createConversionException(e);
}
return wrap;
}
private ConversionException createConversionException(Throwable e) {
final ConversionException conversionException = new ConversionException(e);
StackTraceElement[] ste = e.getStackTrace();
if (ste!=null){
for(StackTraceElement top : ste){
String className=top.getClassName();
if (className.startsWith("org.apache.jmeter.")){
conversionException.add("first-jmeter-class", top.toString());
break;
}
}
}
return conversionException;
}
protected Class<?> getNextType(HierarchicalStreamReader reader) {
String classAttribute = reader.getAttribute(ConversionHelp.ATT_CLASS);
Class<?> type;
if (classAttribute == null) {
type = classMapper.realClass(reader.getNodeName());
} else {
type = classMapper.realClass(classAttribute);
}
return type;
}
}
| apache-2.0 |
apache/flink | flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/copy/HiveParserSqlFunctionConverter.java | 22141 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.delegation.hive.copy;
import org.apache.flink.table.planner.delegation.hive.HiveParserIN;
import org.apache.flink.table.planner.delegation.hive.parse.HiveASTParser;
import org.apache.calcite.rel.type.RelDataType;
import org.apache.calcite.sql.SqlAggFunction;
import org.apache.calcite.sql.SqlFunction;
import org.apache.calcite.sql.SqlFunctionCategory;
import org.apache.calcite.sql.SqlIdentifier;
import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.SqlOperator;
import org.apache.calcite.sql.fun.SqlMonotonicBinaryOperator;
import org.apache.calcite.sql.fun.SqlStdOperatorTable;
import org.apache.calcite.sql.parser.SqlParserPos;
import org.apache.calcite.sql.type.InferTypes;
import org.apache.calcite.sql.type.OperandTypes;
import org.apache.calcite.sql.type.ReturnTypes;
import org.apache.calcite.sql.type.SqlOperandTypeChecker;
import org.apache.calcite.sql.type.SqlOperandTypeInference;
import org.apache.calcite.sql.type.SqlReturnTypeInference;
import org.apache.calcite.sql.type.SqlTypeFamily;
import org.apache.calcite.util.Util;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.ql.exec.Description;
import org.apache.hadoop.hive.ql.exec.FunctionInfo;
import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNegative;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPPositive;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Counterpart of hive's
* org.apache.hadoop.hive.ql.optimizer.calcite.translator.SqlFunctionConverter.
*/
public class HiveParserSqlFunctionConverter {
private static final Logger LOG = LoggerFactory.getLogger(HiveParserSqlFunctionConverter.class);
static final Map<String, SqlOperator> HIVE_TO_CALCITE;
static final Map<SqlOperator, HiveToken> CALCITE_TO_HIVE_TOKEN;
static final Map<SqlOperator, String> REVERSE_OPERATOR_MAP;
static {
StaticBlockBuilder builder = new StaticBlockBuilder();
HIVE_TO_CALCITE = Collections.unmodifiableMap(builder.hiveToCalcite);
CALCITE_TO_HIVE_TOKEN = Collections.unmodifiableMap(builder.calciteToHiveToken);
REVERSE_OPERATOR_MAP = Collections.unmodifiableMap(builder.reverseOperatorMap);
}
public static SqlOperator getCalciteOperator(
String funcTextName,
GenericUDF hiveUDF,
List<RelDataType> calciteArgTypes,
RelDataType retType)
throws SemanticException {
// handle overloaded methods first
if (hiveUDF instanceof GenericUDFOPNegative) {
return SqlStdOperatorTable.UNARY_MINUS;
} else if (hiveUDF instanceof GenericUDFOPPositive) {
return SqlStdOperatorTable.UNARY_PLUS;
} // do generic lookup
String name = null;
if (StringUtils.isEmpty(funcTextName)) {
name = getName(hiveUDF); // this should probably never happen, see getName comment
LOG.warn("The function text was empty, name from annotation is " + name);
} else {
// We could just do toLowerCase here and let SA qualify it, but
// let's be proper...
name = FunctionRegistry.getNormalizedFunctionName(funcTextName);
}
return getCalciteFn(
name, calciteArgTypes, retType, FunctionRegistry.isDeterministic(hiveUDF));
}
// TODO: this is not valid. Function names for built-in UDFs are specified in
// FunctionRegistry, and only happen to match annotations. For user UDFs, the
// name is what user specifies at creation time (annotation can be absent,
// different, or duplicate some other function).
private static String getName(GenericUDF hiveUDF) {
String udfName = null;
if (hiveUDF instanceof GenericUDFBridge) {
udfName = hiveUDF.getUdfName();
} else {
Class<? extends GenericUDF> udfClass = hiveUDF.getClass();
Description udfAnnotation = udfClass.getAnnotation(Description.class);
if (udfAnnotation != null) {
udfName = udfAnnotation.name();
if (udfName != null) {
String[] aliases = udfName.split(",");
if (aliases.length > 0) {
udfName = aliases[0];
}
}
}
if (udfName == null || udfName.isEmpty()) {
udfName = hiveUDF.getClass().getName();
int indx = udfName.lastIndexOf(".");
if (indx >= 0) {
indx += 1;
udfName = udfName.substring(indx);
}
}
}
return udfName;
}
/** This class is used to build immutable hashmaps in the static block above. */
private static class StaticBlockBuilder {
final Map<String, SqlOperator> hiveToCalcite = new HashMap<>();
final Map<SqlOperator, HiveToken> calciteToHiveToken = new HashMap<>();
final Map<SqlOperator, String> reverseOperatorMap = new HashMap<>();
StaticBlockBuilder() {
registerFunction("+", SqlStdOperatorTable.PLUS, hToken(HiveASTParser.PLUS, "+"));
registerFunction("-", SqlStdOperatorTable.MINUS, hToken(HiveASTParser.MINUS, "-"));
registerFunction("*", SqlStdOperatorTable.MULTIPLY, hToken(HiveASTParser.STAR, "*"));
registerFunction("/", SqlStdOperatorTable.DIVIDE, hToken(HiveASTParser.DIVIDE, "/"));
registerFunction("%", SqlStdOperatorTable.MOD, hToken(HiveASTParser.Identifier, "%"));
registerFunction("and", SqlStdOperatorTable.AND, hToken(HiveASTParser.KW_AND, "and"));
registerFunction("or", SqlStdOperatorTable.OR, hToken(HiveASTParser.KW_OR, "or"));
registerFunction("=", SqlStdOperatorTable.EQUALS, hToken(HiveASTParser.EQUAL, "="));
registerDuplicateFunction(
"==", SqlStdOperatorTable.EQUALS, hToken(HiveASTParser.EQUAL, "="));
registerFunction(
"<", SqlStdOperatorTable.LESS_THAN, hToken(HiveASTParser.LESSTHAN, "<"));
registerFunction(
"<=",
SqlStdOperatorTable.LESS_THAN_OR_EQUAL,
hToken(HiveASTParser.LESSTHANOREQUALTO, "<="));
registerFunction(
">", SqlStdOperatorTable.GREATER_THAN, hToken(HiveASTParser.GREATERTHAN, ">"));
registerFunction(
">=",
SqlStdOperatorTable.GREATER_THAN_OR_EQUAL,
hToken(HiveASTParser.GREATERTHANOREQUALTO, ">="));
registerFunction("not", SqlStdOperatorTable.NOT, hToken(HiveASTParser.KW_NOT, "not"));
registerDuplicateFunction(
"!", SqlStdOperatorTable.NOT, hToken(HiveASTParser.KW_NOT, "not"));
registerFunction(
"<>", SqlStdOperatorTable.NOT_EQUALS, hToken(HiveASTParser.NOTEQUAL, "<>"));
registerDuplicateFunction(
"!=", SqlStdOperatorTable.NOT_EQUALS, hToken(HiveASTParser.NOTEQUAL, "<>"));
registerFunction("in", HiveParserIN.INSTANCE, hToken(HiveASTParser.Identifier, "in"));
registerFunction(
"between",
HiveParserBetween.INSTANCE,
hToken(HiveASTParser.Identifier, "between"));
registerFunction(
"struct", SqlStdOperatorTable.ROW, hToken(HiveASTParser.Identifier, "struct"));
registerFunction(
"isnotnull",
SqlStdOperatorTable.IS_NOT_NULL,
hToken(HiveASTParser.TOK_ISNOTNULL, "TOK_ISNOTNULL"));
registerFunction(
"isnull",
SqlStdOperatorTable.IS_NULL,
hToken(HiveASTParser.TOK_ISNULL, "TOK_ISNULL"));
// let's try removing 'when' for better compatibility
// registerFunction("when", SqlStdOperatorTable.CASE, hToken(HiveASTParser.Identifier,
// "when"));
// let's try removing 'case' for better compatibility
// registerDuplicateFunction("case", SqlStdOperatorTable.CASE,
// hToken(HiveASTParser.Identifier, "when"));
// timebased
registerFunction(
"year", HiveParserExtractDate.YEAR, hToken(HiveASTParser.Identifier, "year"));
registerFunction(
"quarter",
HiveParserExtractDate.QUARTER,
hToken(HiveASTParser.Identifier, "quarter"));
registerFunction(
"month",
HiveParserExtractDate.MONTH,
hToken(HiveASTParser.Identifier, "month"));
registerFunction(
"weekofyear",
HiveParserExtractDate.WEEK,
hToken(HiveASTParser.Identifier, "weekofyear"));
registerFunction(
"day", HiveParserExtractDate.DAY, hToken(HiveASTParser.Identifier, "day"));
registerFunction(
"hour", HiveParserExtractDate.HOUR, hToken(HiveASTParser.Identifier, "hour"));
registerFunction(
"minute",
HiveParserExtractDate.MINUTE,
hToken(HiveASTParser.Identifier, "minute"));
registerFunction(
"second",
HiveParserExtractDate.SECOND,
hToken(HiveASTParser.Identifier, "second"));
registerFunction(
"floor_year",
HiveParserFloorDate.YEAR,
hToken(HiveASTParser.Identifier, "floor_year"));
registerFunction(
"floor_quarter",
HiveParserFloorDate.QUARTER,
hToken(HiveASTParser.Identifier, "floor_quarter"));
registerFunction(
"floor_month",
HiveParserFloorDate.MONTH,
hToken(HiveASTParser.Identifier, "floor_month"));
registerFunction(
"floor_week",
HiveParserFloorDate.WEEK,
hToken(HiveASTParser.Identifier, "floor_week"));
registerFunction(
"floor_day",
HiveParserFloorDate.DAY,
hToken(HiveASTParser.Identifier, "floor_day"));
registerFunction(
"floor_hour",
HiveParserFloorDate.HOUR,
hToken(HiveASTParser.Identifier, "floor_hour"));
registerFunction(
"floor_minute",
HiveParserFloorDate.MINUTE,
hToken(HiveASTParser.Identifier, "floor_minute"));
registerFunction(
"floor_second",
HiveParserFloorDate.SECOND,
hToken(HiveASTParser.Identifier, "floor_second"));
// support <=>
registerFunction(
"<=>",
SqlStdOperatorTable.IS_NOT_DISTINCT_FROM,
hToken(HiveASTParser.EQUAL_NS, "<=>"));
}
private void registerFunction(String name, SqlOperator calciteFn, HiveToken hiveToken) {
reverseOperatorMap.put(calciteFn, name);
FunctionInfo hFn;
try {
hFn = FunctionRegistry.getFunctionInfo(name);
} catch (SemanticException e) {
LOG.warn("Failed to load udf " + name, e);
hFn = null;
}
if (hFn != null) {
String hFnName = getName(hFn.getGenericUDF());
hiveToCalcite.put(hFnName, calciteFn);
if (hiveToken != null) {
calciteToHiveToken.put(calciteFn, hiveToken);
}
}
}
private void registerDuplicateFunction(
String name, SqlOperator calciteFn, HiveToken hiveToken) {
hiveToCalcite.put(name, calciteFn);
if (hiveToken != null) {
calciteToHiveToken.put(calciteFn, hiveToken);
}
}
}
private static HiveToken hToken(int type, String text) {
return new HiveToken(type, text);
}
/** UDAF is assumed to be deterministic. */
private static class CalciteUDAF extends SqlAggFunction implements CanAggregateDistinct {
private final boolean isDistinct;
public CalciteUDAF(
boolean isDistinct,
String opName,
SqlIdentifier identifier,
SqlReturnTypeInference returnTypeInference,
SqlOperandTypeInference operandTypeInference,
SqlOperandTypeChecker operandTypeChecker) {
super(
opName,
identifier,
SqlKind.OTHER_FUNCTION,
returnTypeInference,
operandTypeInference,
operandTypeChecker,
SqlFunctionCategory.USER_DEFINED_FUNCTION);
this.isDistinct = isDistinct;
}
@Override
public boolean isDistinct() {
return isDistinct;
}
}
/** CalciteSqlFn. */
public static class CalciteSqlFn extends SqlFunction {
private final boolean deterministic;
public CalciteSqlFn(
String name,
SqlIdentifier identifier,
SqlKind kind,
SqlReturnTypeInference returnTypeInference,
SqlOperandTypeInference operandTypeInference,
SqlOperandTypeChecker operandTypeChecker,
SqlFunctionCategory category,
boolean deterministic) {
super(
name,
identifier,
kind,
returnTypeInference,
operandTypeInference,
operandTypeChecker,
category);
this.deterministic = deterministic;
}
@Override
public boolean isDeterministic() {
return deterministic;
}
}
private static class CalciteUDFInfo {
private String udfName;
// need an identifier if we have a composite name
private SqlIdentifier identifier;
private SqlReturnTypeInference returnTypeInference;
private SqlOperandTypeInference operandTypeInference;
private SqlOperandTypeChecker operandTypeChecker;
}
private static CalciteUDFInfo getUDFInfo(
String hiveUdfName, List<RelDataType> calciteArgTypes, RelDataType calciteRetType) {
CalciteUDFInfo udfInfo = new CalciteUDFInfo();
udfInfo.udfName = hiveUdfName;
String[] nameParts = hiveUdfName.split("\\.");
if (nameParts.length > 1) {
udfInfo.identifier =
new SqlIdentifier(
Arrays.stream(nameParts).collect(Collectors.toList()),
new SqlParserPos(0, 0));
}
udfInfo.returnTypeInference = ReturnTypes.explicit(calciteRetType);
udfInfo.operandTypeInference = InferTypes.explicit(calciteArgTypes);
List<SqlTypeFamily> typeFamily = new ArrayList<>();
for (RelDataType argType : calciteArgTypes) {
typeFamily.add(Util.first(argType.getSqlTypeName().getFamily(), SqlTypeFamily.ANY));
}
udfInfo.operandTypeChecker = OperandTypes.family(Collections.unmodifiableList(typeFamily));
return udfInfo;
}
public static SqlOperator getCalciteFn(
String hiveUdfName,
List<RelDataType> calciteArgTypes,
RelDataType calciteRetType,
boolean deterministic) {
SqlOperator calciteOp;
CalciteUDFInfo uInf = getUDFInfo(hiveUdfName, calciteArgTypes, calciteRetType);
switch (hiveUdfName) {
// Follow hive's rules for type inference as oppose to Calcite's
// for return type.
// TODO: Perhaps we should do this for all functions, not just +,-
case "-":
calciteOp =
new SqlMonotonicBinaryOperator(
"-",
SqlKind.MINUS,
40,
true,
uInf.returnTypeInference,
uInf.operandTypeInference,
OperandTypes.MINUS_OPERATOR);
break;
case "+":
calciteOp =
new SqlMonotonicBinaryOperator(
"+",
SqlKind.PLUS,
40,
true,
uInf.returnTypeInference,
uInf.operandTypeInference,
OperandTypes.PLUS_OPERATOR);
break;
default:
calciteOp = HIVE_TO_CALCITE.get(hiveUdfName);
if (null == calciteOp) {
calciteOp =
new CalciteSqlFn(
uInf.udfName,
uInf.identifier,
SqlKind.OTHER_FUNCTION,
uInf.returnTypeInference,
uInf.operandTypeInference,
uInf.operandTypeChecker,
SqlFunctionCategory.USER_DEFINED_FUNCTION,
deterministic);
}
break;
}
return calciteOp;
}
public static SqlAggFunction getCalciteAggFn(
String hiveUdfName,
boolean isDistinct,
List<RelDataType> calciteArgTypes,
RelDataType calciteRetType) {
SqlAggFunction calciteAggFn = (SqlAggFunction) HIVE_TO_CALCITE.get(hiveUdfName);
if (calciteAggFn == null) {
CalciteUDFInfo udfInfo = getUDFInfo(hiveUdfName, calciteArgTypes, calciteRetType);
switch (hiveUdfName.toLowerCase()) {
case "sum":
calciteAggFn =
new HiveParserSqlSumAggFunction(
isDistinct,
udfInfo.returnTypeInference,
udfInfo.operandTypeInference,
udfInfo.operandTypeChecker);
break;
case "count":
calciteAggFn =
new HiveParserSqlCountAggFunction(
isDistinct,
udfInfo.returnTypeInference,
udfInfo.operandTypeInference,
udfInfo.operandTypeChecker);
break;
case "min":
calciteAggFn =
new HiveParserSqlMinMaxAggFunction(
udfInfo.returnTypeInference,
udfInfo.operandTypeInference,
udfInfo.operandTypeChecker,
true);
break;
case "max":
calciteAggFn =
new HiveParserSqlMinMaxAggFunction(
udfInfo.returnTypeInference,
udfInfo.operandTypeInference,
udfInfo.operandTypeChecker,
false);
break;
default:
calciteAggFn =
new CalciteUDAF(
isDistinct,
udfInfo.udfName,
udfInfo.identifier,
udfInfo.returnTypeInference,
udfInfo.operandTypeInference,
udfInfo.operandTypeChecker);
break;
}
}
return calciteAggFn;
}
static class HiveToken {
int type;
String text;
String[] args;
HiveToken(int type, String text, String... args) {
this.type = type;
this.text = text;
this.args = args;
}
}
/** CanAggregateDistinct. */
public interface CanAggregateDistinct {
boolean isDistinct();
}
}
| apache-2.0 |
CesarPantoja/jena | jena-sdb/src/test/java/org/apache/jena/sdb/test/graph/TestDB2Graph.java | 2564 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jena.sdb.test.graph;
import junit.framework.TestSuite;
import org.apache.jena.graph.Graph ;
import org.apache.jena.sdb.SDBFactory ;
import org.apache.jena.sdb.Store ;
import org.apache.jena.sdb.test.StoreCreator ;
public class TestDB2Graph {
public static junit.framework.Test suite() {
TestSuite ts = new TestSuite();
ts.addTestSuite(TestDB2IndexGraph.class);
ts.addTestSuite(TestDB2IndexQuadGraph.class);
ts.addTestSuite(TestDB2HashGraph.class);
ts.addTestSuite(TestDB2HashQuadGraph.class);
return ts;
}
public static class TestDB2IndexGraph extends AbstractTestGraphSDB {
public TestDB2IndexGraph(String arg0) {
super(arg0);
}
@Override
public Graph getGraph()
{
Store store = StoreCreator.getIndexDB2();
return SDBFactory.connectDefaultGraph(store);
}
}
public static class TestDB2IndexQuadGraph extends AbstractTestGraphSDB {
public TestDB2IndexQuadGraph(String arg0) {
super(arg0);
}
@Override
public Graph getGraph()
{
Store store = StoreCreator.getIndexDB2();
return SDBFactory.connectNamedGraph(store, "http://example.com/graph");
}
}
public static class TestDB2HashGraph extends AbstractTestGraphSDB {
public TestDB2HashGraph(String arg0) {
super(arg0);
}
@Override
public Graph getGraph()
{
Store store = StoreCreator.getHashDB2();
return SDBFactory.connectDefaultGraph(store);
}
}
public static class TestDB2HashQuadGraph extends AbstractTestGraphSDB {
public TestDB2HashQuadGraph(String arg0) {
super(arg0);
}
@Override
public Graph getGraph()
{
Store store = StoreCreator.getHashDB2();
return SDBFactory.connectNamedGraph(store, "http://example.com/graph");
}
}
}
| apache-2.0 |
fluxcapacitor/pipeline | predict/dashboard/hystrix-core/src/main/java/com/netflix/hystrix/metric/consumer/RollingCollapserEventCounterStream.java | 4642 | /**
* Copyright 2015 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.hystrix.metric.consumer;
import com.netflix.hystrix.HystrixCollapserKey;
import com.netflix.hystrix.HystrixCollapserMetrics;
import com.netflix.hystrix.HystrixCollapserProperties;
import com.netflix.hystrix.HystrixEventType;
import com.netflix.hystrix.metric.HystrixCollapserEvent;
import com.netflix.hystrix.metric.HystrixCollapserEventStream;
import rx.functions.Func2;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
/**
* Maintains a stream of event counters for a given Command.
* There is a rolling window abstraction on this stream.
* The event counters object is calculated over a window of t1 milliseconds. This window has b buckets.
* Therefore, a new set of counters is produced every t2 (=t1/b) milliseconds
* t1 = {@link com.netflix.hystrix.HystrixCollapserProperties#metricsRollingStatisticalWindowInMilliseconds()}
* b = {@link com.netflix.hystrix.HystrixCollapserProperties#metricsRollingStatisticalWindowBuckets()}
*
* These values are stable - there's no peeking into a bucket until it is emitted
*
* These values get produced and cached in this class. This value (the latest observed value) may be queried using {@link #getLatest(HystrixEventType.Collapser)}.
*/
public class RollingCollapserEventCounterStream extends BucketedRollingCounterStream<HystrixCollapserEvent, long[], long[]> {
private static final ConcurrentMap<String, RollingCollapserEventCounterStream> streams = new ConcurrentHashMap<String, RollingCollapserEventCounterStream>();
private static final int NUM_EVENT_TYPES = HystrixEventType.Collapser.values().length;
public static RollingCollapserEventCounterStream getInstance(HystrixCollapserKey collapserKey, HystrixCollapserProperties properties) {
final int counterMetricWindow = properties.metricsRollingStatisticalWindowInMilliseconds().get();
final int numCounterBuckets = properties.metricsRollingStatisticalWindowBuckets().get();
final int counterBucketSizeInMs = counterMetricWindow / numCounterBuckets;
return getInstance(collapserKey, numCounterBuckets, counterBucketSizeInMs);
}
public static RollingCollapserEventCounterStream getInstance(HystrixCollapserKey collapserKey, int numBuckets, int bucketSizeInMs) {
RollingCollapserEventCounterStream initialStream = streams.get(collapserKey.name());
if (initialStream != null) {
return initialStream;
} else {
synchronized (RollingCollapserEventCounterStream.class) {
RollingCollapserEventCounterStream existingStream = streams.get(collapserKey.name());
if (existingStream == null) {
RollingCollapserEventCounterStream newStream = new RollingCollapserEventCounterStream(collapserKey, numBuckets, bucketSizeInMs, HystrixCollapserMetrics.appendEventToBucket, HystrixCollapserMetrics.bucketAggregator);
streams.putIfAbsent(collapserKey.name(), newStream);
return newStream;
} else {
return existingStream;
}
}
}
}
public static void reset() {
streams.clear();
}
private RollingCollapserEventCounterStream(HystrixCollapserKey collapserKey, int numCounterBuckets, int counterBucketSizeInMs,
Func2<long[], HystrixCollapserEvent, long[]> appendEventToBucket,
Func2<long[], long[], long[]> reduceBucket) {
super(HystrixCollapserEventStream.getInstance(collapserKey), numCounterBuckets, counterBucketSizeInMs, appendEventToBucket, reduceBucket);
}
@Override
long[] getEmptyBucketSummary() {
return new long[NUM_EVENT_TYPES];
}
@Override
long[] getEmptyOutputValue() {
return new long[NUM_EVENT_TYPES];
}
public long getLatest(HystrixEventType.Collapser eventType) {
return getLatest()[eventType.ordinal()];
}
}
| apache-2.0 |
sischnei/jbehave-core | examples/i18n/src/main/java/org/jbehave/examples/trader/i18n/FrStories.java | 450 | package org.jbehave.examples.trader.i18n;
import java.util.Locale;
import org.jbehave.examples.trader.i18n.steps.FrSteps;
public class FrStories extends LocalizedStories {
@Override
protected Locale locale() {
return new Locale("fr");
}
@Override
protected String storyPattern() {
return "**/*.histoire";
}
@Override
protected Object localizedSteps() {
return new FrSteps();
}
}
| bsd-3-clause |
boneyao/actor-platform | actor-apps/core/src/main/java/im/actor/model/api/rpc/RequestClearChat.java | 1493 | package im.actor.model.api.rpc;
/*
* Generated by the Actor API Scheme generator. DO NOT EDIT!
*/
import im.actor.model.droidkit.bser.Bser;
import im.actor.model.droidkit.bser.BserValues;
import im.actor.model.droidkit.bser.BserWriter;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.annotations.NotNull;
import java.io.IOException;
import im.actor.model.network.parser.*;
import im.actor.model.api.*;
public class RequestClearChat extends Request<ResponseSeq> {
public static final int HEADER = 0x63;
public static RequestClearChat fromBytes(byte[] data) throws IOException {
return Bser.parse(new RequestClearChat(), data);
}
private OutPeer peer;
public RequestClearChat(@NotNull OutPeer peer) {
this.peer = peer;
}
public RequestClearChat() {
}
@NotNull
public OutPeer getPeer() {
return this.peer;
}
@Override
public void parse(BserValues values) throws IOException {
this.peer = values.getObj(1, new OutPeer());
}
@Override
public void serialize(BserWriter writer) throws IOException {
if (this.peer == null) {
throw new IOException();
}
writer.writeObject(1, this.peer);
}
@Override
public String toString() {
String res = "rpc ClearChat{";
res += "peer=" + this.peer;
res += "}";
return res;
}
@Override
public int getHeaderKey() {
return HEADER;
}
}
| mit |
joval/jna | test/com/sun/jna/IntegerTypeTest.java | 4009 | package com.sun.jna;
import junit.framework.TestCase;
public class IntegerTypeTest extends TestCase {
public static class Sized extends IntegerType {
public Sized() { this(4, 0); }
public Sized(int size, long value) { super(size, value); }
}
public void testWriteNull() {
class NTStruct extends Structure {
public Sized field;
}
NTStruct s = new NTStruct();
assertNotNull("Field not initialized", s.field);
}
public void testReadNull() {
class NTStruct extends Structure {
public Sized field;
}
NTStruct s = new NTStruct();
s.read();
assertNotNull("Integer type field should be initialized on read", s.field);
}
public void testCheckArgumentSize() {
for (int i=1;i <= 8;i*=2) {
long value = -1L << (i*8-1);
new Sized(i, value);
new Sized(i, -1);
new Sized(i, 0);
new Sized(i, 1);
value = 1L << (i*8-1);
new Sized(i, value);
value = -1L & ~(-1L << (i*8));
new Sized(i, value);
if (i < 8) {
try {
value = 1L << (i*8);
new Sized(i, value);
fail("Value exceeding size (" + i + ") should fail");
}
catch(IllegalArgumentException e) {
}
}
if (i < 8) {
try {
value = -1L << (i*8);
new Sized(i, value);
fail("Negative value (" + value + ") exceeding size (" + i + ") should fail");
}
catch(IllegalArgumentException e) {
}
}
}
}
public void testInitialValue() {
long VALUE = 20;
NativeLong nl = new NativeLong(VALUE);
assertEquals("Wrong initial value", VALUE, nl.longValue());
}
public void testValueBoundaries() {
class TestType extends IntegerType {
public TestType(int size, long value) {
super(size, value);
}
}
try {
new TestType(1, 0x100L);
fail("Exception should be thrown if byte value out of bounds");
}
catch(IllegalArgumentException e) {
}
try {
new TestType(2, 0x10000L);
fail("Exception should be thrown if short value out of bounds");
}
catch(IllegalArgumentException e) {
}
try {
new TestType(4, 0x100000000L);
fail("Exception should be thrown if int value out of bounds");
}
catch(IllegalArgumentException e) {
}
}
public void testUnsignedValues() {
class TestType extends IntegerType {
public TestType(int size, long value) {
super(size, value);
}
}
long VALUE = 0xFF;
assertEquals("Wrong unsigned byte value", VALUE, new TestType(1, VALUE).longValue());
VALUE = 0xFFFF;
assertEquals("Wrong unsigned short value", VALUE, new TestType(2, VALUE).longValue());
VALUE = 0xFFFFFFFF;
assertEquals("Wrong unsigned int value", VALUE, new TestType(4, VALUE).longValue());
class UnsignedTestType extends IntegerType {
public UnsignedTestType(int size, long value) {
super(size, value, true);
}
}
UnsignedTestType tt = new UnsignedTestType(4, -1);
assertTrue("Expected an unsigned value (ctor): " + tt.longValue(), tt.longValue() > 0);
tt.setValue(-2);
assertTrue("Expected an unsigned value: " + tt.longValue(), tt.longValue() > 0);
}
public static void main(String[] args) {
junit.textui.TestRunner.run(IntegerTypeTest.class);
}
}
| lgpl-2.1 |
syntelos/cddb | src/org/jaudiotagger/tag/mp4/atom/Mp4MeanBox.java | 1458 | package org.jaudiotagger.tag.mp4.atom;
import org.jaudiotagger.audio.generic.Utils;
import org.jaudiotagger.audio.mp4.atom.AbstractMp4Box;
import org.jaudiotagger.audio.mp4.atom.Mp4BoxHeader;
import java.nio.ByteBuffer;
/**
* This box is used within ---- boxes to hold the issuer
*/
public class Mp4MeanBox extends AbstractMp4Box
{
public static final String IDENTIFIER = "mean";
private String issuer;
//TODO Are these misnamed, are these version flag bytes or just null bytes
public static final int VERSION_LENGTH = 1;
public static final int FLAGS_LENGTH = 3;
public static final int PRE_DATA_LENGTH = VERSION_LENGTH + FLAGS_LENGTH;
/**
* @param header parentHeader info
* @param dataBuffer data of box (doesnt include parentHeader data)
*/
public Mp4MeanBox(Mp4BoxHeader header, ByteBuffer dataBuffer)
{
this.header = header;
//Double check
if (!header.getId().equals(IDENTIFIER))
{
throw new RuntimeException("Unable to process data box because identifier is:" + header.getId());
}
//Make slice so operations here don't effect position of main buffer
this.dataBuffer = dataBuffer.slice();
//issuer
this.issuer = Utils.getString(this.dataBuffer, PRE_DATA_LENGTH, header.getDataLength() - PRE_DATA_LENGTH, header.getEncoding());
}
public String getIssuer()
{
return issuer;
}
}
| lgpl-3.0 |
lwriemen/bridgepoint | src/org.xtuml.bp.ui.graphics/src/org/xtuml/bp/ui/graphics/utilities/GraphicsUtil.java | 5431 | //========================================================================
//
//File: $RCSfile: GraphicsUtil.java,v $
//Version: $Revision: 1.6 $
//Modified: $Date: 2013/01/10 23:06:24 $
//
//(c) Copyright 2005-2014 by Mentor Graphics Corp. All rights reserved.
//
//========================================================================
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy
// of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
//========================================================================
//
package org.xtuml.bp.ui.graphics.utilities;
import org.eclipse.draw2d.geometry.Point;
import org.eclipse.draw2d.geometry.PointList;
import org.eclipse.draw2d.geometry.PrecisionPoint;
import org.eclipse.draw2d.geometry.Rectangle;
import org.eclipse.gef.EditPart;
import org.xtuml.bp.ui.canvas.Connector_c;
import org.xtuml.bp.ui.canvas.FloatingText_c;
import org.xtuml.bp.ui.canvas.GraphicalElement_c;
import org.xtuml.bp.ui.canvas.LineSegment_c;
import org.xtuml.bp.ui.canvas.Model_c;
import org.xtuml.bp.ui.canvas.Shape_c;
import org.xtuml.bp.ui.canvas.Waypoint_c;
import org.xtuml.bp.ui.graphics.parts.ConnectorEditPart;
import org.xtuml.bp.ui.graphics.parts.DiagramEditPart;
public class GraphicsUtil {
public static Rectangle calculateFreeformBoundsWithBuffer(Rectangle extent) {
// return the higher of the width and height values
// this allows the diagram to grow
if (extent.width > DiagramEditPart.DEFAULT_VIEWPORT_WIDTH) {
// add a buffer
extent.width = extent.width + 100;
} else {
// otherwise use the default
extent.width = DiagramEditPart.DEFAULT_VIEWPORT_WIDTH;
}
if (extent.height > DiagramEditPart.DEFAULT_VIEWPORT_HEIGHT) {
// add a buffer
extent.height = extent.height + 100;
} else {
// otherwise use the default
extent.height = DiagramEditPart.DEFAULT_VIEWPORT_HEIGHT;
}
// use 0,0 as the start point, unless the extent
// indicates some negative values
int x = 0;
int y = 0;
if (extent.x < 0) {
// use the extent plus a buffer
x = extent.x - 100;
}
if (extent.y < 0) {
// use the extent plus a buffer
y = extent.y - 100;
}
return new Rectangle(x, y, extent.width, extent.height);
}
public static void synchronizePoints(Point point, Point reference,
int tolerance) {
if (Math.abs(point.x - reference.x) <= tolerance) {
point.setLocation(reference.x, point.y);
if (point instanceof PrecisionPoint) {
PrecisionPoint precisionPoint = (PrecisionPoint) point;
precisionPoint.setPreciseX(point.x);
}
}
if (Math.abs(point.y - reference.y) <= tolerance) {
point.setLocation(point.x, reference.y);
if (point instanceof PrecisionPoint) {
PrecisionPoint precisionPoint = (PrecisionPoint) point;
precisionPoint.setPreciseY(point.y);
}
}
}
public static boolean pointsAreEqual(PointList points, PointList otherPoints) {
if (points.size() != otherPoints.size()) {
return false;
}
for (int i = 0; i < points.size(); i++) {
if (!points.getPoint(i).equals(otherPoints.getPoint(i))) {
return false;
}
}
return true;
}
public static PointList getPointsFromModelConnector(
ConnectorEditPart editPart) {
LineSegment_c[] orderedSegments = editPart.getOrderedSegments();
PointList points = new PointList();
for (int i = 0; i < orderedSegments.length; i++) {
// use just the start position, except on the last
// segment use the end as well
Waypoint_c waypoint = Waypoint_c
.getOneDIM_WAYOnR21(orderedSegments[i]);
points.addPoint((int) waypoint.getPositionx(), (int) waypoint
.getPositiony());
if (i == orderedSegments.length - 1) {
waypoint = Waypoint_c.getOneDIM_WAYOnR22(orderedSegments[i]);
points.addPoint((int) waypoint.getPositionx(), (int) waypoint
.getPositiony());
}
}
return points;
}
public static Object getRepresentsFromEditPart(EditPart part) {
GraphicalElement_c element = null;
if (part.getModel() instanceof Shape_c) {
element = GraphicalElement_c.getOneGD_GEOnR2((Shape_c) part
.getModel());
} else if (part.getModel() instanceof Connector_c) {
element = GraphicalElement_c.getOneGD_GEOnR2((Connector_c) part
.getModel());
} else if (part.getModel() instanceof FloatingText_c) {
Shape_c shape = Shape_c.getOneGD_SHPOnR27((FloatingText_c) part
.getModel());
Connector_c connector = Connector_c
.getOneGD_CONOnR8((FloatingText_c) part.getModel());
if (shape != null) {
element = GraphicalElement_c.getOneGD_GEOnR2(shape);
}
if (connector != null) {
element = GraphicalElement_c.getOneGD_GEOnR2(connector);
}
} else if (part.getModel() instanceof Model_c) {
return ((Model_c) part.getModel()).getRepresents();
}
if(element == null) {
return null;
}
return element.getRepresents();
}
}
| apache-2.0 |
bruthe/hadoop-2.6.0r | src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java | 6864 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.DataChecksum;
import com.google.common.annotations.VisibleForTesting;
/**
* BlockMetadataHeader manages metadata for data blocks on Datanodes.
* This is not related to the Block related functionality in Namenode.
* The biggest part of data block metadata is CRC for the block.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class BlockMetadataHeader {
private static final Log LOG = LogFactory.getLog(BlockMetadataHeader.class);
public static final short VERSION = 1;
/**
* Header includes everything except the checksum(s) themselves.
* Version is two bytes. Following it is the DataChecksum
* that occupies 5 bytes.
*/
private final short version;
private DataChecksum checksum = null;
@VisibleForTesting
public BlockMetadataHeader(short version, DataChecksum checksum) {
this.checksum = checksum;
this.version = version;
}
/** Get the version */
public short getVersion() {
return version;
}
/** Get the checksum */
public DataChecksum getChecksum() {
return checksum;
}
/**
* Read the checksum header from the meta file.
* @return the data checksum obtained from the header.
*/
public static DataChecksum readDataChecksum(File metaFile) throws IOException {
DataInputStream in = null;
try {
in = new DataInputStream(new BufferedInputStream(
new FileInputStream(metaFile), HdfsConstants.IO_FILE_BUFFER_SIZE));
return readDataChecksum(in, metaFile);
} finally {
IOUtils.closeStream(in);
}
}
/**
* Read the checksum header from the meta input stream.
* @return the data checksum obtained from the header.
*/
public static DataChecksum readDataChecksum(final DataInputStream metaIn,
final Object name) throws IOException {
// read and handle the common header here. For now just a version
final BlockMetadataHeader header = readHeader(metaIn);
if (header.getVersion() != VERSION) {
LOG.warn("Unexpected meta-file version for " + name
+ ": version in file is " + header.getVersion()
+ " but expected version is " + VERSION);
}
return header.getChecksum();
}
/**
* Read the header without changing the position of the FileChannel.
*
* @param fc The FileChannel to read.
* @return the Metadata Header.
* @throws IOException on error.
*/
public static BlockMetadataHeader preadHeader(FileChannel fc)
throws IOException {
final byte arr[] = new byte[getHeaderSize()];
ByteBuffer buf = ByteBuffer.wrap(arr);
while (buf.hasRemaining()) {
if (fc.read(buf, 0) <= 0) {
throw new EOFException("unexpected EOF while reading " +
"metadata file header");
}
}
short version = (short)((arr[0] << 8) | (arr[1] & 0xff));
DataChecksum dataChecksum = DataChecksum.newDataChecksum(arr, 2);
return new BlockMetadataHeader(version, dataChecksum);
}
/**
* This reads all the fields till the beginning of checksum.
* @return Metadata Header
* @throws IOException
*/
public static BlockMetadataHeader readHeader(DataInputStream in) throws IOException {
return readHeader(in.readShort(), in);
}
/**
* Reads header at the top of metadata file and returns the header.
*
* @return metadata header for the block
* @throws IOException
*/
public static BlockMetadataHeader readHeader(File file) throws IOException {
DataInputStream in = null;
try {
in = new DataInputStream(new BufferedInputStream(
new FileInputStream(file)));
return readHeader(in);
} finally {
IOUtils.closeStream(in);
}
}
/**
* Read the header at the beginning of the given block meta file.
* The current file position will be altered by this method.
* If an error occurs, the file is <em>not</em> closed.
*/
static BlockMetadataHeader readHeader(RandomAccessFile raf) throws IOException {
byte[] buf = new byte[getHeaderSize()];
raf.seek(0);
raf.readFully(buf, 0, buf.length);
return readHeader(new DataInputStream(new ByteArrayInputStream(buf)));
}
// Version is already read.
private static BlockMetadataHeader readHeader(short version, DataInputStream in)
throws IOException {
DataChecksum checksum = DataChecksum.newDataChecksum(in);
return new BlockMetadataHeader(version, checksum);
}
/**
* This writes all the fields till the beginning of checksum.
* @param out DataOutputStream
* @throws IOException
*/
@VisibleForTesting
public static void writeHeader(DataOutputStream out,
BlockMetadataHeader header)
throws IOException {
out.writeShort(header.getVersion());
header.getChecksum().writeHeader(out);
}
/**
* Writes all the fields till the beginning of checksum.
* @throws IOException on error
*/
public static void writeHeader(DataOutputStream out, DataChecksum checksum)
throws IOException {
writeHeader(out, new BlockMetadataHeader(VERSION, checksum));
}
/**
* Returns the size of the header
*/
public static int getHeaderSize() {
return Short.SIZE/Byte.SIZE + DataChecksum.getChecksumHeaderSize();
}
}
| apache-2.0 |
farble1670/clover-boinc | lib/src/main/java/edu/berkeley/boinc/rpc/AppVersionsParser.java | 4001 | /*******************************************************************************
* This file is part of BOINC.
* http://boinc.berkeley.edu
* Copyright (C) 2012 University of California
*
* BOINC is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation,
* either version 3 of the License, or (at your option) any later version.
*
* BOINC is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with BOINC. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package edu.berkeley.boinc.rpc;
import java.util.ArrayList;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import android.util.Xml;
public class AppVersionsParser extends DefaultHandler {
private ArrayList<AppVersion> mAppVersions = new ArrayList<AppVersion>();
private AppVersion mAppVersion = null;
private StringBuilder mCurrentElement = new StringBuilder();
public final ArrayList<AppVersion> getAppVersions() {
return mAppVersions;
}
/**
* Parse the RPC result (app_version) and generate corresponding vector
* @param rpcResult String returned by RPC call of core client
* @return vector of application version
*/
public static ArrayList<AppVersion> parse(String rpcResult) {
try {
AppVersionsParser parser = new AppVersionsParser();
Xml.parse(rpcResult, parser);
return parser.getAppVersions();
}
catch (SAXException e) {
return null;
}
}
@Override
public void startElement(String uri, String localName, String qName, Attributes attributes) throws SAXException {
super.startElement(uri, localName, qName, attributes);
if (localName.equalsIgnoreCase("app_version")) {
mAppVersion = new AppVersion();
}
}
@Override
public void characters(char[] ch, int start, int length) throws SAXException {
super.characters(ch, start, length);
// put it into StringBuilder
int myStart = start;
int myLength = length;
if (mCurrentElement.length() == 0) {
// still empty - trim leading white-spaces
for ( ; myStart < length; ++myStart, --myLength) {
if (!Character.isWhitespace(ch[myStart])) {
// First non-white-space character
break;
}
}
}
mCurrentElement.append(ch, myStart, myLength);
}
@Override
public void endElement(String uri, String localName, String qName) throws SAXException {
super.endElement(uri, localName, qName);
try {
trimEnd();
if (mAppVersion != null) {
// We are inside <app_version>
if (localName.equalsIgnoreCase("app_version")) {
// Closing tag of <app_version> - add to vector and be ready for next one
if (!mAppVersion.app_name.equals("")) {
// app_name is a must
mAppVersions.add(mAppVersion);
}
mAppVersion = null;
}
else {
// Not the closing tag - we decode possible inner tags
if (localName.equalsIgnoreCase("app_name")) {
mAppVersion.app_name = mCurrentElement.toString();
}
else if (localName.equalsIgnoreCase("version_num")) {
mAppVersion.version_num = Integer.parseInt(mCurrentElement.toString());
}
}
}
}
catch (NumberFormatException e) {
}
mCurrentElement.setLength(0); // to be clean for next one
}
private void trimEnd() {
int length = mCurrentElement.length();
int i;
// Trim trailing spaces
for (i = length - 1; i >= 0; --i) {
if (!Character.isWhitespace(mCurrentElement.charAt(i))) {
// All trailing white-spaces are skipped, i is position of last character
break;
}
}
// i is position of last character
mCurrentElement.setLength(i+1);
}
}
| mit |
sincere520/testGitRepo | hudson-core/src/main/java/hudson/tasks/junit/ClassResult.java | 6427 | /*
* The MIT License
*
* Copyright (c) 2004-2009, Sun Microsystems, Inc., Kohsuke Kawaguchi, Daniel Dyer, id:cactusman, Tom Huybrechts, Yahoo!, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package hudson.tasks.junit;
import hudson.model.AbstractBuild;
import hudson.tasks.test.TabulatedResult;
import hudson.tasks.test.TestResult;
import hudson.tasks.test.TestObject;
import org.kohsuke.stapler.StaplerRequest;
import org.kohsuke.stapler.StaplerResponse;
import org.kohsuke.stapler.export.Exported;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* Cumulative test result of a test class.
*
* @author Kohsuke Kawaguchi
*/
public final class ClassResult extends TabulatedResult implements Comparable<ClassResult> {
private final String className; // simple name
private final List<CaseResult> cases = new ArrayList<CaseResult>();
private int passCount,failCount,skipCount;
private float duration;
private final PackageResult parent;
ClassResult(PackageResult parent, String className) {
this.parent = parent;
this.className = className;
}
@Override
public AbstractBuild<?, ?> getOwner() {
return (parent==null ? null: parent.getOwner());
}
public PackageResult getParent() {
return parent;
}
@Override
public ClassResult getPreviousResult() {
if(parent==null) return null;
TestResult pr = parent.getPreviousResult();
if(pr==null) return null;
if(pr instanceof PackageResult) {
return ((PackageResult)pr).getClassResult(getName());
}
return null;
}
@Override
public hudson.tasks.test.TestResult findCorrespondingResult(String id) {
String myID = safe(getName());
int base = id.indexOf(myID);
String caseName;
if (base > 0) {
int caseNameStart = base + myID.length() + 1;
caseName = id.substring(caseNameStart);
} else {
caseName = id;
}
CaseResult child = getCaseResult(caseName);
if (child != null) {
return child;
}
return null;
}
public String getTitle() {
return Messages.ClassResult_getTitle(getName());
}
@Override
public String getChildTitle() {
return "Class Reults";
}
@Exported(visibility=999)
public String getName() {
int idx = className.lastIndexOf('.');
if(idx<0) return className;
else return className.substring(idx+1);
}
public @Override String getSafeName() {
return uniquifyName(parent.getChildren(), safe(getName()));
}
public CaseResult getCaseResult(String name) {
for (CaseResult c : cases) {
if(c.getSafeName().equals(name))
return c;
}
return null;
}
@Override
public Object getDynamic(String name, StaplerRequest req, StaplerResponse rsp) {
CaseResult c = getCaseResult(name);
if (c != null) {
return c;
} else {
return super.getDynamic(name, req, rsp);
}
}
@Exported(name="child")
public List<CaseResult> getChildren() {
return cases;
}
public boolean hasChildren() {
return ((cases != null) && (cases.size() > 0));
}
// TODO: wait for stapler 1.60 @Exported
public float getDuration() {
return duration;
}
@Exported
public int getPassCount() {
return passCount;
}
@Exported
public int getFailCount() {
return failCount;
}
@Exported
public int getSkipCount() {
return skipCount;
}
public void add(CaseResult r) {
cases.add(r);
}
/**
* Recount my children.
*/
@Override
public void tally() {
passCount=failCount=skipCount=0;
duration=0;
for (CaseResult r : cases) {
r.setClass(this);
if (r.isSkipped()) {
skipCount++;
}
else if(r.isPassed()) {
passCount++;
}
else {
failCount++;
}
duration += r.getDuration();
}
}
void freeze() {
passCount=failCount=skipCount=0;
duration=0;
for (CaseResult r : cases) {
r.setClass(this);
if (r.isSkipped()) {
skipCount++;
}
else if(r.isPassed()) {
passCount++;
}
else {
failCount++;
}
duration += r.getDuration();
}
Collections.sort(cases);
}
public String getClassName() {
return className;
}
public int compareTo(ClassResult that) {
return this.className.compareTo(that.className);
}
public String getDisplayName() {
return getName();
}
public String getFullName() {
return getParent().getDisplayName() + "." + className;
}
/**
* Gets the relative path to this test case from the given object.
*/
@Override
public String getRelativePathFrom(TestObject it) {
if(it instanceof CaseResult) {
return "..";
} else {
return super.getRelativePathFrom(it);
}
}
}
| mit |
cdut007/PMS_TASK | third_party/okhttp-master/okhttp-tests/src/test/java/com/squareup/okhttp/internal/spdy/Http2Test.java | 27487 | /*
* Copyright (C) 2013 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.squareup.okhttp.internal.spdy;
import com.squareup.okhttp.internal.Util;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import okio.Buffer;
import okio.BufferedSink;
import okio.BufferedSource;
import okio.ByteString;
import okio.GzipSink;
import okio.Okio;
import org.junit.Test;
import static com.squareup.okhttp.TestUtil.headerEntries;
import static com.squareup.okhttp.internal.spdy.Http2.FLAG_COMPRESSED;
import static com.squareup.okhttp.internal.spdy.Http2.FLAG_END_HEADERS;
import static com.squareup.okhttp.internal.spdy.Http2.FLAG_END_STREAM;
import static com.squareup.okhttp.internal.spdy.Http2.FLAG_NONE;
import static com.squareup.okhttp.internal.spdy.Http2.FLAG_PADDED;
import static com.squareup.okhttp.internal.spdy.Http2.FLAG_PRIORITY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
public class Http2Test {
final Buffer frame = new Buffer();
final FrameReader fr = new Http2.Reader(frame, 4096, false);
final int expectedStreamId = 15;
@Test public void unknownFrameTypeSkipped() throws IOException {
writeMedium(frame, 4); // has a 4-byte field
frame.writeByte(99); // type 99
frame.writeByte(Http2.FLAG_NONE);
frame.writeInt(expectedStreamId);
frame.writeInt(111111111); // custom data
fr.nextFrame(new BaseTestHandler()); // Should not callback.
}
@Test public void onlyOneLiteralHeadersFrame() throws IOException {
final List<Header> sentHeaders = headerEntries("name", "value");
Buffer headerBytes = literalHeaders(sentHeaders);
writeMedium(frame, (int) headerBytes.size());
frame.writeByte(Http2.TYPE_HEADERS);
frame.writeByte(FLAG_END_HEADERS | FLAG_END_STREAM);
frame.writeInt(expectedStreamId & 0x7fffffff);
frame.writeAll(headerBytes);
assertEquals(frame, sendHeaderFrames(true, sentHeaders)); // Check writer sends the same bytes.
fr.nextFrame(new BaseTestHandler() {
@Override
public void headers(boolean outFinished, boolean inFinished, int streamId,
int associatedStreamId, List<Header> headerBlock, HeadersMode headersMode) {
assertFalse(outFinished);
assertTrue(inFinished);
assertEquals(expectedStreamId, streamId);
assertEquals(-1, associatedStreamId);
assertEquals(sentHeaders, headerBlock);
assertEquals(HeadersMode.HTTP_20_HEADERS, headersMode);
}
});
}
@Test public void headersWithPriority() throws IOException {
final List<Header> sentHeaders = headerEntries("name", "value");
Buffer headerBytes = literalHeaders(sentHeaders);
writeMedium(frame, (int) (headerBytes.size() + 5));
frame.writeByte(Http2.TYPE_HEADERS);
frame.writeByte(FLAG_END_HEADERS | FLAG_PRIORITY);
frame.writeInt(expectedStreamId & 0x7fffffff);
frame.writeInt(0); // Independent stream.
frame.writeByte(255); // Heaviest weight, zero-indexed.
frame.writeAll(headerBytes);
fr.nextFrame(new BaseTestHandler() {
@Override public void priority(int streamId, int streamDependency, int weight,
boolean exclusive) {
assertEquals(0, streamDependency);
assertEquals(256, weight);
assertFalse(exclusive);
}
@Override public void headers(boolean outFinished, boolean inFinished, int streamId,
int associatedStreamId, List<Header> nameValueBlock,
HeadersMode headersMode) {
assertFalse(outFinished);
assertFalse(inFinished);
assertEquals(expectedStreamId, streamId);
assertEquals(-1, associatedStreamId);
assertEquals(sentHeaders, nameValueBlock);
assertEquals(HeadersMode.HTTP_20_HEADERS, headersMode);
}
});
}
/** Headers are compressed, then framed. */
@Test public void headersFrameThenContinuation() throws IOException {
final List<Header> sentHeaders = largeHeaders();
Buffer headerBlock = literalHeaders(sentHeaders);
// Write the first headers frame.
writeMedium(frame, Http2.INITIAL_MAX_FRAME_SIZE);
frame.writeByte(Http2.TYPE_HEADERS);
frame.writeByte(Http2.FLAG_NONE);
frame.writeInt(expectedStreamId & 0x7fffffff);
frame.write(headerBlock, Http2.INITIAL_MAX_FRAME_SIZE);
// Write the continuation frame, specifying no more frames are expected.
writeMedium(frame, (int) headerBlock.size());
frame.writeByte(Http2.TYPE_CONTINUATION);
frame.writeByte(FLAG_END_HEADERS);
frame.writeInt(expectedStreamId & 0x7fffffff);
frame.writeAll(headerBlock);
assertEquals(frame, sendHeaderFrames(false, sentHeaders)); // Check writer sends the same bytes.
// Reading the above frames should result in a concatenated headerBlock.
fr.nextFrame(new BaseTestHandler() {
@Override public void headers(boolean outFinished, boolean inFinished, int streamId,
int associatedStreamId, List<Header> headerBlock, HeadersMode headersMode) {
assertFalse(outFinished);
assertFalse(inFinished);
assertEquals(expectedStreamId, streamId);
assertEquals(-1, associatedStreamId);
assertEquals(sentHeaders, headerBlock);
assertEquals(HeadersMode.HTTP_20_HEADERS, headersMode);
}
});
}
@Test public void pushPromise() throws IOException {
final int expectedPromisedStreamId = 11;
final List<Header> pushPromise = Arrays.asList(
new Header(Header.TARGET_METHOD, "GET"),
new Header(Header.TARGET_SCHEME, "https"),
new Header(Header.TARGET_AUTHORITY, "squareup.com"),
new Header(Header.TARGET_PATH, "/")
);
// Write the push promise frame, specifying the associated stream ID.
Buffer headerBytes = literalHeaders(pushPromise);
writeMedium(frame, (int) (headerBytes.size() + 4));
frame.writeByte(Http2.TYPE_PUSH_PROMISE);
frame.writeByte(Http2.FLAG_END_PUSH_PROMISE);
frame.writeInt(expectedStreamId & 0x7fffffff);
frame.writeInt(expectedPromisedStreamId & 0x7fffffff);
frame.writeAll(headerBytes);
assertEquals(frame, sendPushPromiseFrames(expectedPromisedStreamId, pushPromise));
fr.nextFrame(new BaseTestHandler() {
@Override
public void pushPromise(int streamId, int promisedStreamId, List<Header> headerBlock) {
assertEquals(expectedStreamId, streamId);
assertEquals(expectedPromisedStreamId, promisedStreamId);
assertEquals(pushPromise, headerBlock);
}
});
}
/** Headers are compressed, then framed. */
@Test public void pushPromiseThenContinuation() throws IOException {
final int expectedPromisedStreamId = 11;
final List<Header> pushPromise = largeHeaders();
// Decoding the first header will cross frame boundaries.
Buffer headerBlock = literalHeaders(pushPromise);
// Write the first headers frame.
writeMedium(frame, Http2.INITIAL_MAX_FRAME_SIZE);
frame.writeByte(Http2.TYPE_PUSH_PROMISE);
frame.writeByte(Http2.FLAG_NONE);
frame.writeInt(expectedStreamId & 0x7fffffff);
frame.writeInt(expectedPromisedStreamId & 0x7fffffff);
frame.write(headerBlock, Http2.INITIAL_MAX_FRAME_SIZE - 4);
// Write the continuation frame, specifying no more frames are expected.
writeMedium(frame, (int) headerBlock.size());
frame.writeByte(Http2.TYPE_CONTINUATION);
frame.writeByte(FLAG_END_HEADERS);
frame.writeInt(expectedStreamId & 0x7fffffff);
frame.writeAll(headerBlock);
assertEquals(frame, sendPushPromiseFrames(expectedPromisedStreamId, pushPromise));
// Reading the above frames should result in a concatenated headerBlock.
fr.nextFrame(new BaseTestHandler() {
@Override
public void pushPromise(int streamId, int promisedStreamId, List<Header> headerBlock) {
assertEquals(expectedStreamId, streamId);
assertEquals(expectedPromisedStreamId, promisedStreamId);
assertEquals(pushPromise, headerBlock);
}
});
}
@Test public void readRstStreamFrame() throws IOException {
writeMedium(frame, 4);
frame.writeByte(Http2.TYPE_RST_STREAM);
frame.writeByte(Http2.FLAG_NONE);
frame.writeInt(expectedStreamId & 0x7fffffff);
frame.writeInt(ErrorCode.COMPRESSION_ERROR.httpCode);
fr.nextFrame(new BaseTestHandler() {
@Override public void rstStream(int streamId, ErrorCode errorCode) {
assertEquals(expectedStreamId, streamId);
assertEquals(ErrorCode.COMPRESSION_ERROR, errorCode);
}
});
}
@Test public void readSettingsFrame() throws IOException {
final int reducedTableSizeBytes = 16;
writeMedium(frame, 12); // 2 settings * 6 bytes (2 for the code and 4 for the value).
frame.writeByte(Http2.TYPE_SETTINGS);
frame.writeByte(Http2.FLAG_NONE);
frame.writeInt(0); // Settings are always on the connection stream 0.
frame.writeShort(1); // SETTINGS_HEADER_TABLE_SIZE
frame.writeInt(reducedTableSizeBytes);
frame.writeShort(2); // SETTINGS_ENABLE_PUSH
frame.writeInt(0);
fr.nextFrame(new BaseTestHandler() {
@Override public void settings(boolean clearPrevious, Settings settings) {
assertFalse(clearPrevious); // No clearPrevious in HTTP/2.
assertEquals(reducedTableSizeBytes, settings.getHeaderTableSize());
assertEquals(false, settings.getEnablePush(true));
}
});
}
@Test public void readSettingsFrameInvalidPushValue() throws IOException {
writeMedium(frame, 6); // 2 for the code and 4 for the value
frame.writeByte(Http2.TYPE_SETTINGS);
frame.writeByte(Http2.FLAG_NONE);
frame.writeInt(0); // Settings are always on the connection stream 0.
frame.writeShort(2);
frame.writeInt(2);
try {
fr.nextFrame(new BaseTestHandler());
fail();
} catch (IOException e) {
assertEquals("PROTOCOL_ERROR SETTINGS_ENABLE_PUSH != 0 or 1", e.getMessage());
}
}
@Test public void readSettingsFrameInvalidSettingId() throws IOException {
writeMedium(frame, 6); // 2 for the code and 4 for the value
frame.writeByte(Http2.TYPE_SETTINGS);
frame.writeByte(Http2.FLAG_NONE);
frame.writeInt(0); // Settings are always on the connection stream 0.
frame.writeShort(7); // old number for SETTINGS_INITIAL_WINDOW_SIZE
frame.writeInt(1);
try {
fr.nextFrame(new BaseTestHandler());
fail();
} catch (IOException e) {
assertEquals("PROTOCOL_ERROR invalid settings id: 7", e.getMessage());
}
}
@Test public void readSettingsFrameNegativeWindowSize() throws IOException {
writeMedium(frame, 6); // 2 for the code and 4 for the value
frame.writeByte(Http2.TYPE_SETTINGS);
frame.writeByte(Http2.FLAG_NONE);
frame.writeInt(0); // Settings are always on the connection stream 0.
frame.writeShort(4); // SETTINGS_INITIAL_WINDOW_SIZE
frame.writeInt(Integer.MIN_VALUE);
try {
fr.nextFrame(new BaseTestHandler());
fail();
} catch (IOException e) {
assertEquals("PROTOCOL_ERROR SETTINGS_INITIAL_WINDOW_SIZE > 2^31 - 1", e.getMessage());
}
}
@Test public void readSettingsFrameNegativeFrameLength() throws IOException {
writeMedium(frame, 6); // 2 for the code and 4 for the value
frame.writeByte(Http2.TYPE_SETTINGS);
frame.writeByte(Http2.FLAG_NONE);
frame.writeInt(0); // Settings are always on the connection stream 0.
frame.writeShort(5); // SETTINGS_MAX_FRAME_SIZE
frame.writeInt(Integer.MIN_VALUE);
try {
fr.nextFrame(new BaseTestHandler());
fail();
} catch (IOException e) {
assertEquals("PROTOCOL_ERROR SETTINGS_MAX_FRAME_SIZE: -2147483648", e.getMessage());
}
}
@Test public void readSettingsFrameTooShortFrameLength() throws IOException {
writeMedium(frame, 6); // 2 for the code and 4 for the value
frame.writeByte(Http2.TYPE_SETTINGS);
frame.writeByte(Http2.FLAG_NONE);
frame.writeInt(0); // Settings are always on the connection stream 0.
frame.writeShort(5); // SETTINGS_MAX_FRAME_SIZE
frame.writeInt((int) Math.pow(2, 14) - 1);
try {
fr.nextFrame(new BaseTestHandler());
fail();
} catch (IOException e) {
assertEquals("PROTOCOL_ERROR SETTINGS_MAX_FRAME_SIZE: 16383", e.getMessage());
}
}
@Test public void readSettingsFrameTooLongFrameLength() throws IOException {
writeMedium(frame, 6); // 2 for the code and 4 for the value
frame.writeByte(Http2.TYPE_SETTINGS);
frame.writeByte(Http2.FLAG_NONE);
frame.writeInt(0); // Settings are always on the connection stream 0.
frame.writeShort(5); // SETTINGS_MAX_FRAME_SIZE
frame.writeInt((int) Math.pow(2, 24));
try {
fr.nextFrame(new BaseTestHandler());
fail();
} catch (IOException e) {
assertEquals("PROTOCOL_ERROR SETTINGS_MAX_FRAME_SIZE: 16777216", e.getMessage());
}
}
@Test public void pingRoundTrip() throws IOException {
final int expectedPayload1 = 7;
final int expectedPayload2 = 8;
writeMedium(frame, 8); // length
frame.writeByte(Http2.TYPE_PING);
frame.writeByte(Http2.FLAG_ACK);
frame.writeInt(0); // connection-level
frame.writeInt(expectedPayload1);
frame.writeInt(expectedPayload2);
// Check writer sends the same bytes.
assertEquals(frame, sendPingFrame(true, expectedPayload1, expectedPayload2));
fr.nextFrame(new BaseTestHandler() {
@Override public void ping(boolean ack, int payload1, int payload2) {
assertTrue(ack);
assertEquals(expectedPayload1, payload1);
assertEquals(expectedPayload2, payload2);
}
});
}
@Test public void maxLengthDataFrame() throws IOException {
final byte[] expectedData = new byte[Http2.INITIAL_MAX_FRAME_SIZE];
Arrays.fill(expectedData, (byte) 2);
writeMedium(frame, expectedData.length);
frame.writeByte(Http2.TYPE_DATA);
frame.writeByte(Http2.FLAG_NONE);
frame.writeInt(expectedStreamId & 0x7fffffff);
frame.write(expectedData);
// Check writer sends the same bytes.
assertEquals(frame, sendDataFrame(new Buffer().write(expectedData)));
fr.nextFrame(new BaseTestHandler() {
@Override public void data(boolean inFinished, int streamId, BufferedSource source,
int length) throws IOException {
assertFalse(inFinished);
assertEquals(expectedStreamId, streamId);
assertEquals(Http2.INITIAL_MAX_FRAME_SIZE, length);
ByteString data = source.readByteString(length);
for (byte b : data.toByteArray()) {
assertEquals(2, b);
}
}
});
}
/** We do not send SETTINGS_COMPRESS_DATA = 1, nor want to. Let's make sure we error. */
@Test public void compressedDataFrameWhenSettingDisabled() throws IOException {
byte[] expectedData = new byte[Http2.INITIAL_MAX_FRAME_SIZE];
Arrays.fill(expectedData, (byte) 2);
Buffer zipped = gzip(expectedData);
int zippedSize = (int) zipped.size();
writeMedium(frame, zippedSize);
frame.writeByte(Http2.TYPE_DATA);
frame.writeByte(FLAG_COMPRESSED);
frame.writeInt(expectedStreamId & 0x7fffffff);
zipped.readAll(frame);
try {
fr.nextFrame(new BaseTestHandler());
fail();
} catch (IOException e) {
assertEquals("PROTOCOL_ERROR: FLAG_COMPRESSED without SETTINGS_COMPRESS_DATA",
e.getMessage());
}
}
@Test public void readPaddedDataFrame() throws IOException {
int dataLength = 1123;
byte[] expectedData = new byte[dataLength];
Arrays.fill(expectedData, (byte) 2);
int paddingLength = 254;
byte[] padding = new byte[paddingLength];
Arrays.fill(padding, (byte) 0);
writeMedium(frame, dataLength + paddingLength + 1);
frame.writeByte(Http2.TYPE_DATA);
frame.writeByte(FLAG_PADDED);
frame.writeInt(expectedStreamId & 0x7fffffff);
frame.writeByte(paddingLength);
frame.write(expectedData);
frame.write(padding);
fr.nextFrame(assertData());
assertTrue(frame.exhausted()); // Padding was skipped.
}
@Test public void readPaddedDataFrameZeroPadding() throws IOException {
int dataLength = 1123;
byte[] expectedData = new byte[dataLength];
Arrays.fill(expectedData, (byte) 2);
writeMedium(frame, dataLength + 1);
frame.writeByte(Http2.TYPE_DATA);
frame.writeByte(FLAG_PADDED);
frame.writeInt(expectedStreamId & 0x7fffffff);
frame.writeByte(0);
frame.write(expectedData);
fr.nextFrame(assertData());
}
@Test public void readPaddedHeadersFrame() throws IOException {
int paddingLength = 254;
byte[] padding = new byte[paddingLength];
Arrays.fill(padding, (byte) 0);
Buffer headerBlock = literalHeaders(headerEntries("foo", "barrr", "baz", "qux"));
writeMedium(frame, (int) headerBlock.size() + paddingLength + 1);
frame.writeByte(Http2.TYPE_HEADERS);
frame.writeByte(FLAG_END_HEADERS | FLAG_PADDED);
frame.writeInt(expectedStreamId & 0x7fffffff);
frame.writeByte(paddingLength);
frame.writeAll(headerBlock);
frame.write(padding);
fr.nextFrame(assertHeaderBlock());
assertTrue(frame.exhausted()); // Padding was skipped.
}
@Test public void readPaddedHeadersFrameZeroPadding() throws IOException {
Buffer headerBlock = literalHeaders(headerEntries("foo", "barrr", "baz", "qux"));
writeMedium(frame, (int) headerBlock.size() + 1);
frame.writeByte(Http2.TYPE_HEADERS);
frame.writeByte(FLAG_END_HEADERS | FLAG_PADDED);
frame.writeInt(expectedStreamId & 0x7fffffff);
frame.writeByte(0);
frame.writeAll(headerBlock);
fr.nextFrame(assertHeaderBlock());
}
/** Headers are compressed, then framed. */
@Test public void readPaddedHeadersFrameThenContinuation() throws IOException {
int paddingLength = 254;
byte[] padding = new byte[paddingLength];
Arrays.fill(padding, (byte) 0);
// Decoding the first header will cross frame boundaries.
Buffer headerBlock = literalHeaders(headerEntries("foo", "barrr", "baz", "qux"));
// Write the first headers frame.
writeMedium(frame, (int) (headerBlock.size() / 2) + paddingLength + 1);
frame.writeByte(Http2.TYPE_HEADERS);
frame.writeByte(FLAG_PADDED);
frame.writeInt(expectedStreamId & 0x7fffffff);
frame.writeByte(paddingLength);
frame.write(headerBlock, headerBlock.size() / 2);
frame.write(padding);
// Write the continuation frame, specifying no more frames are expected.
writeMedium(frame, (int) headerBlock.size());
frame.writeByte(Http2.TYPE_CONTINUATION);
frame.writeByte(FLAG_END_HEADERS);
frame.writeInt(expectedStreamId & 0x7fffffff);
frame.writeAll(headerBlock);
fr.nextFrame(assertHeaderBlock());
assertTrue(frame.exhausted());
}
@Test public void tooLargeDataFrame() throws IOException {
try {
sendDataFrame(new Buffer().write(new byte[0x1000000]));
fail();
} catch (IllegalArgumentException e) {
assertEquals("FRAME_SIZE_ERROR length > 16384: 16777216", e.getMessage());
}
}
@Test public void windowUpdateRoundTrip() throws IOException {
final long expectedWindowSizeIncrement = 0x7fffffff;
writeMedium(frame, 4); // length
frame.writeByte(Http2.TYPE_WINDOW_UPDATE);
frame.writeByte(Http2.FLAG_NONE);
frame.writeInt(expectedStreamId);
frame.writeInt((int) expectedWindowSizeIncrement);
// Check writer sends the same bytes.
assertEquals(frame, windowUpdate(expectedWindowSizeIncrement));
fr.nextFrame(new BaseTestHandler() {
@Override public void windowUpdate(int streamId, long windowSizeIncrement) {
assertEquals(expectedStreamId, streamId);
assertEquals(expectedWindowSizeIncrement, windowSizeIncrement);
}
});
}
@Test public void badWindowSizeIncrement() throws IOException {
try {
windowUpdate(0);
fail();
} catch (IllegalArgumentException e) {
assertEquals("windowSizeIncrement == 0 || windowSizeIncrement > 0x7fffffffL: 0",
e.getMessage());
}
try {
windowUpdate(0x80000000L);
fail();
} catch (IllegalArgumentException e) {
assertEquals("windowSizeIncrement == 0 || windowSizeIncrement > 0x7fffffffL: 2147483648",
e.getMessage());
}
}
@Test public void goAwayWithoutDebugDataRoundTrip() throws IOException {
final ErrorCode expectedError = ErrorCode.PROTOCOL_ERROR;
writeMedium(frame, 8); // Without debug data there's only 2 32-bit fields.
frame.writeByte(Http2.TYPE_GOAWAY);
frame.writeByte(Http2.FLAG_NONE);
frame.writeInt(0); // connection-scope
frame.writeInt(expectedStreamId); // last good stream.
frame.writeInt(expectedError.httpCode);
// Check writer sends the same bytes.
assertEquals(frame, sendGoAway(expectedStreamId, expectedError, Util.EMPTY_BYTE_ARRAY));
fr.nextFrame(new BaseTestHandler() {
@Override public void goAway(
int lastGoodStreamId, ErrorCode errorCode, ByteString debugData) {
assertEquals(expectedStreamId, lastGoodStreamId);
assertEquals(expectedError, errorCode);
assertEquals(0, debugData.size());
}
});
}
@Test public void goAwayWithDebugDataRoundTrip() throws IOException {
final ErrorCode expectedError = ErrorCode.PROTOCOL_ERROR;
final ByteString expectedData = ByteString.encodeUtf8("abcdefgh");
// Compose the expected GOAWAY frame without debug data.
writeMedium(frame, 8 + expectedData.size());
frame.writeByte(Http2.TYPE_GOAWAY);
frame.writeByte(Http2.FLAG_NONE);
frame.writeInt(0); // connection-scope
frame.writeInt(0); // never read any stream!
frame.writeInt(expectedError.httpCode);
frame.write(expectedData.toByteArray());
// Check writer sends the same bytes.
assertEquals(frame, sendGoAway(0, expectedError, expectedData.toByteArray()));
fr.nextFrame(new BaseTestHandler() {
@Override public void goAway(
int lastGoodStreamId, ErrorCode errorCode, ByteString debugData) {
assertEquals(0, lastGoodStreamId);
assertEquals(expectedError, errorCode);
assertEquals(expectedData, debugData);
}
});
}
@Test public void frameSizeError() throws IOException {
Http2.Writer writer = new Http2.Writer(new Buffer(), true);
try {
writer.frameHeader(0, 16777216, Http2.TYPE_DATA, FLAG_NONE);
fail();
} catch (IllegalArgumentException e) {
// TODO: real max is based on settings between 16384 and 16777215
assertEquals("FRAME_SIZE_ERROR length > 16384: 16777216", e.getMessage());
}
}
@Test public void ackSettingsAppliesMaxFrameSize() throws IOException {
int newMaxFrameSize = 16777215;
Http2.Writer writer = new Http2.Writer(new Buffer(), true);
writer.ackSettings(new Settings().set(Settings.MAX_FRAME_SIZE, 0, newMaxFrameSize));
assertEquals(newMaxFrameSize, writer.maxDataLength());
writer.frameHeader(0, newMaxFrameSize, Http2.TYPE_DATA, FLAG_NONE);
}
@Test public void streamIdHasReservedBit() throws IOException {
Http2.Writer writer = new Http2.Writer(new Buffer(), true);
try {
int streamId = 3;
streamId |= 1L << 31; // set reserved bit
writer.frameHeader(streamId, Http2.INITIAL_MAX_FRAME_SIZE, Http2.TYPE_DATA, FLAG_NONE);
fail();
} catch (IllegalArgumentException e) {
assertEquals("reserved bit set: -2147483645", e.getMessage());
}
}
private Buffer literalHeaders(List<Header> sentHeaders) throws IOException {
Buffer out = new Buffer();
new Hpack.Writer(out).writeHeaders(sentHeaders);
return out;
}
private Buffer sendHeaderFrames(boolean outFinished, List<Header> headers) throws IOException {
Buffer out = new Buffer();
new Http2.Writer(out, true).headers(outFinished, expectedStreamId, headers);
return out;
}
private Buffer sendPushPromiseFrames(int streamId, List<Header> headers) throws IOException {
Buffer out = new Buffer();
new Http2.Writer(out, true).pushPromise(expectedStreamId, streamId, headers);
return out;
}
private Buffer sendPingFrame(boolean ack, int payload1, int payload2) throws IOException {
Buffer out = new Buffer();
new Http2.Writer(out, true).ping(ack, payload1, payload2);
return out;
}
private Buffer sendGoAway(int lastGoodStreamId, ErrorCode errorCode, byte[] debugData)
throws IOException {
Buffer out = new Buffer();
new Http2.Writer(out, true).goAway(lastGoodStreamId, errorCode, debugData);
return out;
}
private Buffer sendDataFrame(Buffer data) throws IOException {
Buffer out = new Buffer();
new Http2.Writer(out, true).dataFrame(expectedStreamId, FLAG_NONE, data,
(int) data.size());
return out;
}
private Buffer windowUpdate(long windowSizeIncrement) throws IOException {
Buffer out = new Buffer();
new Http2.Writer(out, true).windowUpdate(expectedStreamId, windowSizeIncrement);
return out;
}
private FrameReader.Handler assertHeaderBlock() {
return new BaseTestHandler() {
@Override public void headers(boolean outFinished, boolean inFinished, int streamId,
int associatedStreamId, List<Header> headerBlock, HeadersMode headersMode) {
assertFalse(outFinished);
assertFalse(inFinished);
assertEquals(expectedStreamId, streamId);
assertEquals(-1, associatedStreamId);
assertEquals(headerEntries("foo", "barrr", "baz", "qux"), headerBlock);
assertEquals(HeadersMode.HTTP_20_HEADERS, headersMode);
}
};
}
private FrameReader.Handler assertData() {
return new BaseTestHandler() {
@Override public void data(boolean inFinished, int streamId, BufferedSource source,
int length) throws IOException {
assertFalse(inFinished);
assertEquals(expectedStreamId, streamId);
assertEquals(1123, length);
ByteString data = source.readByteString(length);
for (byte b : data.toByteArray()) {
assertEquals(2, b);
}
}
};
}
private static Buffer gzip(byte[] data) throws IOException {
Buffer buffer = new Buffer();
Okio.buffer(new GzipSink(buffer)).write(data).close();
return buffer;
}
/** Create a sufficiently large header set to overflow Http20Draft12.INITIAL_MAX_FRAME_SIZE bytes. */
private static List<Header> largeHeaders() {
String[] nameValues = new String[32];
char[] chars = new char[512];
for (int i = 0; i < nameValues.length;) {
Arrays.fill(chars, (char) i);
nameValues[i++] = nameValues[i++] = String.valueOf(chars);
}
return headerEntries(nameValues);
}
private static void writeMedium(BufferedSink sink, int i) throws IOException {
sink.writeByte((i >>> 16) & 0xff);
sink.writeByte((i >>> 8) & 0xff);
sink.writeByte( i & 0xff);
}
}
| mit |
FauxFaux/jdk9-jaxws | src/java.xml.ws/share/classes/com/sun/xml/internal/ws/policy/sourcemodel/PolicyReferenceData.java | 5391 | /*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package com.sun.xml.internal.ws.policy.sourcemodel;
import com.sun.xml.internal.ws.policy.privateutil.LocalizationMessages;
import com.sun.xml.internal.ws.policy.privateutil.PolicyLogger;
import com.sun.xml.internal.ws.policy.privateutil.PolicyUtils;
import java.net.URI;
import java.net.URISyntaxException;
/**
*
* @author Marek Potociar
*/
final class PolicyReferenceData {
private static final PolicyLogger LOGGER = PolicyLogger.getLogger(PolicyReferenceData.class);
private static final URI DEFAULT_DIGEST_ALGORITHM_URI;
private static final URISyntaxException CLASS_INITIALIZATION_EXCEPTION;
static {
URISyntaxException tempEx = null;
URI tempUri = null;
try {
tempUri = new URI("http://schemas.xmlsoap.org/ws/2004/09/policy/Sha1Exc");
} catch (URISyntaxException e) {
tempEx = e;
} finally {
DEFAULT_DIGEST_ALGORITHM_URI = tempUri;
CLASS_INITIALIZATION_EXCEPTION = tempEx;
}
}
private final URI referencedModelUri;
private final String digest;
private final URI digestAlgorithmUri;
/** Creates a new instance of PolicyReferenceData */
public PolicyReferenceData(URI referencedModelUri) {
this.referencedModelUri = referencedModelUri;
this.digest = null;
this.digestAlgorithmUri = null;
}
public PolicyReferenceData(URI referencedModelUri, String expectedDigest, URI usedDigestAlgorithm) {
if (CLASS_INITIALIZATION_EXCEPTION != null) {
throw LOGGER.logSevereException(new IllegalStateException(LocalizationMessages.WSP_0015_UNABLE_TO_INSTANTIATE_DIGEST_ALG_URI_FIELD(), CLASS_INITIALIZATION_EXCEPTION));
}
if (usedDigestAlgorithm != null && expectedDigest == null) {
throw LOGGER.logSevereException(new IllegalArgumentException(LocalizationMessages.WSP_0072_DIGEST_MUST_NOT_BE_NULL_WHEN_ALG_DEFINED()));
}
this.referencedModelUri = referencedModelUri;
if (expectedDigest == null) {
this.digest = null;
this.digestAlgorithmUri = null;
} else {
this.digest = expectedDigest;
if (usedDigestAlgorithm == null) {
this.digestAlgorithmUri = DEFAULT_DIGEST_ALGORITHM_URI;
} else {
this.digestAlgorithmUri = usedDigestAlgorithm;
}
}
}
public URI getReferencedModelUri() {
return referencedModelUri;
}
public String getDigest() {
return digest;
}
public URI getDigestAlgorithmUri() {
return digestAlgorithmUri;
}
/**
* An {@code Object.toString()} method override.
*/
@Override
public String toString() {
return toString(0, new StringBuffer()).toString();
}
/**
* A helper method that appends indented string representation of this instance to the input string buffer.
*
* @param indentLevel indentation level to be used.
* @param buffer buffer to be used for appending string representation of this instance
* @return modified buffer containing new string representation of the instance
*/
public StringBuffer toString(final int indentLevel, final StringBuffer buffer) {
final String indent = PolicyUtils.Text.createIndent(indentLevel);
final String innerIndent = PolicyUtils.Text.createIndent(indentLevel + 1);
buffer.append(indent).append("reference data {").append(PolicyUtils.Text.NEW_LINE);
buffer.append(innerIndent).append("referenced policy model URI = '").append(referencedModelUri).append('\'').append(PolicyUtils.Text.NEW_LINE);
if (digest == null) {
buffer.append(innerIndent).append("no digest specified").append(PolicyUtils.Text.NEW_LINE);
} else {
buffer.append(innerIndent).append("digest algorith URI = '").append(digestAlgorithmUri).append('\'').append(PolicyUtils.Text.NEW_LINE);
buffer.append(innerIndent).append("digest = '").append(digest).append('\'').append(PolicyUtils.Text.NEW_LINE);
}
buffer.append(indent).append('}');
return buffer;
}
}
| gpl-2.0 |
oscarmartins/oscarmartins-gameforce | src/jake2/server/SV_CCMDS.java | 26351 | /*
Copyright (C) 1997-2001 Id Software, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/* Modifications
Copyright 2003-2004 Bytonic Software
Copyright 2010 Google Inc.
*/
package jake2.server;
import jake2.game.Cmd;
import jake2.game.EndianHandler;
import jake2.game.GameSVCmds;
import jake2.game.GameSave;
import jake2.game.Info;
import jake2.game.cvar_t;
import jake2.qcommon.CM;
import jake2.qcommon.Com;
import jake2.qcommon.Compatibility;
import jake2.qcommon.Cvar;
import jake2.qcommon.Defines;
import jake2.qcommon.FS;
import jake2.qcommon.Globals;
import jake2.qcommon.MSG;
import jake2.qcommon.Netchan;
import jake2.qcommon.SZ;
import jake2.qcommon.netadr_t;
import jake2.qcommon.sizebuf_t;
import jake2.qcommon.xcommand_t;
import jake2.sys.NET;
import jake2.sys.Sys;
import jake2.util.Lib;
import jake2.util.QuakeFile;
import jake2.util.Vargs;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.IntBuffer;
import java.util.Calendar;
import com.google.gwt.user.client.Command;
public class SV_CCMDS {
/*
===============================================================================
OPERATOR CONSOLE ONLY COMMANDS
These commands can only be entered from stdin or by a remote operator datagram
===============================================================================
*/
private static final Command EMPTY_COMMAND = new Command() {
public void execute() {
}
};
/*
====================
SV_SetMaster_f
Specify a list of master servers
====================
*/
public static void SV_SetMaster_f() {
int i, slot;
// only dedicated servers send heartbeats
if (Globals.dedicated.value == 0) {
Com.Printf("Only dedicated servers use masters.\n");
return;
}
// make sure the server is listed public
Cvar.Set("public", "1");
for (i = 1; i < Defines.MAX_MASTERS; i++)
SV_MAIN.master_adr[i] = new netadr_t();
slot = 1; // slot 0 will always contain the id master
for (i = 1; i < Cmd.Argc(); i++) {
if (slot == Defines.MAX_MASTERS)
break;
if (!NET.StringToAdr(Cmd.Argv(i), SV_MAIN.master_adr[i])) {
Com.Printf("Bad address: " + Cmd.Argv(i) + "\n");
continue;
}
if (SV_MAIN.master_adr[slot].port == 0)
SV_MAIN.master_adr[slot].port = Defines.PORT_MASTER;
Com.Printf("Master server at " + NET.AdrToString(SV_MAIN.master_adr[slot]) + "\n");
Com.Printf("Sending a ping.\n");
Netchan.OutOfBandPrint(Defines.NS_SERVER, SV_MAIN.master_adr[slot], "ping");
slot++;
}
SV_INIT.svs.last_heartbeat = -9999999;
}
/*
==================
SV_SetPlayer
Sets sv_client and sv_player to the player with idnum Cmd.Argv(1)
==================
*/
public static boolean SV_SetPlayer() {
client_t cl;
int i;
int idnum;
String s;
if (Cmd.Argc() < 2)
return false;
s = Cmd.Argv(1);
// numeric values are just slot numbers
if (s.charAt(0) >= '0' && s.charAt(0) <= '9') {
idnum = Lib.atoi(Cmd.Argv(1));
if (idnum < 0 || idnum >= SV_MAIN.maxclients.value) {
Com.Printf("Bad client slot: " + idnum + "\n");
return false;
}
SV_MAIN.sv_client = SV_INIT.svs.clients[idnum];
SV_USER.sv_player = SV_MAIN.sv_client.edict;
if (0 == SV_MAIN.sv_client.state) {
Com.Printf("Client " + idnum + " is not active\n");
return false;
}
return true;
}
// check for a name match
for (i = 0; i < SV_MAIN.maxclients.value; i++) {
cl = SV_INIT.svs.clients[i];
if (0 == cl.state)
continue;
if (0 == Lib.strcmp(cl.name, s)) {
SV_MAIN.sv_client = cl;
SV_USER.sv_player = SV_MAIN.sv_client.edict;
return true;
}
}
Com.Printf("Userid " + s + " is not on the server\n");
return false;
}
/*
===============================================================================
SAVEGAME FILES
===============================================================================
*/
public static void remove(String name) {
try {
new File(name).delete();
}
catch (Exception e) {
Compatibility.printStackTrace(e);
}
}
/** Delete save files save/(number)/. */
public static void SV_WipeSavegame(String savename) {
String name;
Com.DPrintf("SV_WipeSaveGame(" + savename + ")\n");
name = FS.Gamedir() + "/save/" + savename + "/server.ssv";
remove(name);
name = FS.Gamedir() + "/save/" + savename + "/game.ssv";
remove(name);
name = FS.Gamedir() + "/save/" + savename + "/*.sav";
File f = Sys.FindFirst(name, 0, 0);
while (f != null) {
f.delete();
f = Sys.FindNext();
}
Sys.FindClose();
name = FS.Gamedir() + "/save/" + savename + "/*.sv2";
f = Sys.FindFirst(name, 0, 0);
while (f != null) {
f.delete();
f = Sys.FindNext();
}
Sys.FindClose();
}
/*
================
CopyFile
================
*/
public static void CopyFile(String src, String dst) {
RandomAccessFile f1, f2;
int l = -1;
byte buffer[] = new byte[65536];
//Com.DPrintf("CopyFile (" + src + ", " + dst + ")\n");
try {
f1 = new RandomAccessFile(src, "r");
}
catch (Exception e) {
Compatibility.printStackTrace(e);
return;
}
try {
f2 = new RandomAccessFile(dst, "rw");
}
catch (Exception e) {
try {
f1.close();
}
catch (IOException e1) {
Compatibility.printStackTrace(e1);
}
return;
}
while (true) {
try {
l = f1.read(buffer, 0, 65536);
}
catch (IOException e1) {
Compatibility.printStackTrace(e1);
}
if (l == -1)
break;
try {
f2.write(buffer, 0, l);
}
catch (IOException e2) {
Compatibility.printStackTrace(e2);
}
}
try {
f1.close();
}
catch (IOException e1) {
Compatibility.printStackTrace(e1);
}
try {
f2.close();
}
catch (IOException e2) {
Compatibility.printStackTrace(e2);
}
}
/*
================
SV_CopySaveGame
================
*/
public static void SV_CopySaveGame(String src, String dst) {
//char name[MAX_OSPATH], name2[MAX_OSPATH];
int l, len;
File found;
String name, name2;
Com.DPrintf("SV_CopySaveGame(" + src + "," + dst + ")\n");
SV_WipeSavegame(dst);
// copy the savegame over
name = FS.Gamedir() + "/save/" + src + "/server.ssv";
name2 = FS.Gamedir() + "/save/" + dst + "/server.ssv";
FS.CreatePath(name2);
CopyFile(name, name2);
name = FS.Gamedir() + "/save/" + src + "/game.ssv";
name2 = FS.Gamedir() + "/save/" + dst + "/game.ssv";
CopyFile(name, name2);
String name1 = FS.Gamedir() + "/save/" + src + "/";
len = name1.length();
name = FS.Gamedir() + "/save/" + src + "/*.sav";
found = Sys.FindFirst(name, 0, 0);
while (found != null) {
name = name1 + found.getName();
name2 = FS.Gamedir() + "/save/" + dst + "/" + found.getName();
CopyFile(name, name2);
// change sav to sv2
name = name.substring(0, name.length() - 3) + "sv2";
name2 = name2.substring(0, name2.length() - 3) + "sv2";
CopyFile(name, name2);
found = Sys.FindNext();
}
Sys.FindClose();
}
/*
==============
SV_WriteLevelFile
==============
*/
public static void SV_WriteLevelFile() {
String name;
QuakeFile f;
Com.DPrintf("SV_WriteLevelFile()\n");
name = FS.Gamedir() + "/save/current/" + SV_INIT.sv.name + ".sv2";
try {
f = new QuakeFile(name, "rw");
for (int i = 0; i < Defines.MAX_CONFIGSTRINGS; i++)
f.writeString(SV_INIT.sv.configstrings[i]);
CM.CM_WritePortalState(f);
f.close();
}
catch (Exception e) {
Com.Printf("Failed to open " + name + "\n");
Compatibility.printStackTrace(e);
}
name = FS.Gamedir() + "/save/current/" + SV_INIT.sv.name + ".sav";
GameSave.WriteLevel(name);
}
/*
==============
SV_ReadLevelFile
==============
*/
public static void SV_ReadLevelFile() {
//char name[MAX_OSPATH];
String name;
QuakeFile f;
Com.DPrintf("SV_ReadLevelFile()\n");
name = FS.Gamedir() + "/save/current/" + SV_INIT.sv.name + ".sv2";
try {
f = new QuakeFile(name, "r");
for (int n = 0; n < Defines.MAX_CONFIGSTRINGS; n++)
SV_INIT.sv.configstrings[n] = f.readString();
CM.CM_ReadPortalState(f);
f.close();
}
catch (IOException e1) {
Com.Printf("Failed to open " + name + "\n");
Compatibility.printStackTrace(e1);
}
name = FS.Gamedir() + "/save/current/" + SV_INIT.sv.name + ".sav";
GameSave.ReadLevel(name);
}
/*
==============
SV_WriteServerFile
==============
*/
public static void SV_WriteServerFile(boolean autosave) {
QuakeFile f;
cvar_t var;
String filename, name, string, comment;
Com.DPrintf("SV_WriteServerFile(" + (autosave ? "true" : "false") + ")\n");
filename = FS.Gamedir() + "/save/current/server.ssv";
try {
f = new QuakeFile(filename, "rw");
if (!autosave) {
Calendar c = Calendar.getInstance();
comment =
Com.sprintf(
"%2i:%2i %2i/%2i ",
new Vargs().add(c.get(Calendar.HOUR_OF_DAY)).add(c.get(Calendar.MINUTE)).add(
c.get(Calendar.MONTH) + 1).add(
c.get(Calendar.DAY_OF_MONTH)));
comment += SV_INIT.sv.configstrings[Defines.CS_NAME];
}
else {
// autosaved
comment = "ENTERING " + SV_INIT.sv.configstrings[Defines.CS_NAME];
}
f.writeString(comment);
f.writeString(SV_INIT.svs.mapcmd);
// write the mapcmd
// write all CVAR_LATCH cvars
// these will be things like coop, skill, deathmatch, etc
for (var = Globals.cvar_vars; var != null; var = var.next) {
if (0 == (var.flags & Defines.CVAR_LATCH))
continue;
if (var.name.length() >= Defines.MAX_OSPATH - 1 || var.string.length() >= 128 - 1) {
Com.Printf("Cvar too long: " + var.name + " = " + var.string + "\n");
continue;
}
name = var.name;
string = var.string;
try {
f.writeString(name);
f.writeString(string);
}
catch (IOException e2) {
}
}
// rst: for termination.
f.writeString(null);
f.close();
}
catch (Exception e) {
Compatibility.printStackTrace(e);
Com.Printf("Couldn't write " + filename + "\n");
}
// write game state
filename = FS.Gamedir() + "/save/current/game.ssv";
GameSave.WriteGame(filename, autosave);
}
/*
==============
SV_ReadServerFile
==============
*/
public static void SV_ReadServerFile() {
String filename="", name = "", string, comment, mapcmd;
try {
QuakeFile f;
mapcmd = "";
Com.DPrintf("SV_ReadServerFile()\n");
filename = FS.Gamedir() + "/save/current/server.ssv";
f = new QuakeFile(filename, "r");
// read the comment field
comment = f.readString();
// read the mapcmd
mapcmd = f.readString();
// read all CVAR_LATCH cvars
// these will be things like coop, skill, deathmatch, etc
while (true) {
name = f.readString();
if (name == null)
break;
string = f.readString();
Com.DPrintf("Set " + name + " = " + string + "\n");
Cvar.ForceSet(name, string);
}
f.close();
// start a new game fresh with new cvars
SV_INIT.SV_InitGame();
SV_INIT.svs.mapcmd = mapcmd;
// read game state
filename = FS.Gamedir() + "/save/current/game.ssv";
GameSave.ReadGame(filename);
}
catch (Exception e) {
Com.Printf("Couldn't read file " + filename + "\n");
Compatibility.printStackTrace(e);
}
}
//=========================================================
/*
==================
SV_DemoMap_f
Puts the server in demo mode on a specific map/cinematic
==================
*/
public static void SV_DemoMap_f() {
SV_INIT.SV_Map(true, Cmd.Argv(1), false, EMPTY_COMMAND);
}
/*
==================
SV_GameMap_f
Saves the state of the map just being exited and goes to a new map.
If the initial character of the map string is '*', the next map is
in a new unit, so the current savegame directory is cleared of
map files.
Example:
*inter.cin+jail
Clears the archived maps, plays the inter.cin cinematic, then
goes to map jail.bsp.
==================
*/
public static void SV_GameMap_f() {
String map;
int i;
client_t cl;
boolean savedInuse[];
if (Cmd.Argc() != 2) {
Com.Printf("USAGE: gamemap <map>\n");
return;
}
Com.DPrintf("SV_GameMap(" + Cmd.Argv(1) + ")\n");
FS.CreatePath(FS.Gamedir() + "/save/current/");
// check for clearing the current savegame
map = Cmd.Argv(1);
if (map.charAt(0) == '*') {
// wipe all the *.sav files
SV_WipeSavegame("current");
}
else { // save the map just exited
if (SV_INIT.sv.state == Defines.ss_game) {
// clear all the client inuse flags before saving so that
// when the level is re-entered, the clients will spawn
// at spawn points instead of occupying body shells
savedInuse = new boolean[(int) SV_MAIN.maxclients.value];
for (i = 0; i < SV_MAIN.maxclients.value; i++) {
cl = SV_INIT.svs.clients[i];
savedInuse[i] = cl.edict.inuse;
cl.edict.inuse = false;
}
SV_WriteLevelFile();
// we must restore these for clients to transfer over correctly
for (i = 0; i < SV_MAIN.maxclients.value; i++) {
cl = SV_INIT.svs.clients[i];
cl.edict.inuse = savedInuse[i];
}
savedInuse = null;
}
}
Command continueCmd = new Command() {
public void execute() {
// archive server state
SV_INIT.svs.mapcmd = Cmd.Argv(1);
// copy off the level to the autosave slot
if (0 == Globals.dedicated.value) {
SV_WriteServerFile(true);
SV_CopySaveGame("current", "save0");
}
}
};
// start up the next map
SV_INIT.SV_Map(false, Cmd.Argv(1), false, continueCmd);
}
/*
==================
SV_Map_f
Goes directly to a given map without any savegame archiving.
For development work
==================
*/
public static void SV_Map_f() {
String map;
//char expanded[MAX_QPATH];
String expanded;
// if not a pcx, demo, or cinematic, check to make sure the level exists
map = Cmd.Argv(1);
// if (map.indexOf(".") < 0) {
// expanded = "maps/" + map + ".bsp";
// if (FS.LoadFile(expanded) == null) {
//
// Com.Printf("Can't find " + expanded + "\n");
// return;
// }
// }
SV_INIT.sv.state = Defines.ss_dead; // don't save current level when changing
SV_WipeSavegame("current");
SV_GameMap_f();
}
/*
=====================================================================
SAVEGAMES
=====================================================================
*/
/*
==============
SV_Loadgame_f
==============
*/
public static void SV_Loadgame_f() {
String name;
RandomAccessFile f;
String dir;
if (Cmd.Argc() != 2) {
Com.Printf("USAGE: loadgame <directory>\n");
return;
}
Com.Printf("Loading game...\n");
dir = Cmd.Argv(1);
if ( (dir.indexOf("..") > -1) || (dir.indexOf("/") > -1) || (dir.indexOf("\\") > -1)) {
Com.Printf("Bad savedir.\n");
}
// make sure the server.ssv file exists
name = FS.Gamedir() + "/save/" + Cmd.Argv(1) + "/server.ssv";
try {
f = new RandomAccessFile(name, "r");
}
catch (FileNotFoundException e) {
Com.Printf("No such savegame: " + name + "\n");
Compatibility.printStackTrace(e);
return;
}
try {
f.close();
}
catch (IOException e1) {
Compatibility.printStackTrace(e1);
}
SV_CopySaveGame(Cmd.Argv(1), "current");
SV_ReadServerFile();
// go to the map
SV_INIT.sv.state = Defines.ss_dead; // don't save current level when changing
SV_INIT.SV_Map(false, SV_INIT.svs.mapcmd, true, EMPTY_COMMAND);
}
/*
==============
SV_Savegame_f
==============
*/
public static void SV_Savegame_f() {
String dir;
if (SV_INIT.sv.state != Defines.ss_game) {
Com.Printf("You must be in a game to save.\n");
return;
}
if (Cmd.Argc() != 2) {
Com.Printf("USAGE: savegame <directory>\n");
return;
}
if (Cvar.VariableValue("deathmatch") != 0) {
Com.Printf("Can't savegame in a deathmatch\n");
return;
}
if (0 == Lib.strcmp(Cmd.Argv(1), "current")) {
Com.Printf("Can't save to 'current'\n");
return;
}
if (SV_MAIN.maxclients.value == 1 && SV_INIT.svs.clients[0].edict.client.ps.stats[Defines.STAT_HEALTH] <= 0) {
Com.Printf("\nCan't savegame while dead!\n");
return;
}
dir = Cmd.Argv(1);
if ( (dir.indexOf("..") > -1) || (dir.indexOf("/") > -1) || (dir.indexOf("\\") > -1)) {
Com.Printf("Bad savedir.\n");
}
Com.Printf("Saving game...\n");
// archive current level, including all client edicts.
// when the level is reloaded, they will be shells awaiting
// a connecting client
SV_WriteLevelFile();
// save server state
try {
SV_WriteServerFile(false);
}
catch (Exception e) {
Com.Printf("IOError in SV_WriteServerFile: " + e);
Compatibility.printStackTrace(e);
}
// copy it off
SV_CopySaveGame("current", dir);
Com.Printf("Done.\n");
}
//===============================================================
/*
==================
SV_Kick_f
Kick a user off of the server
==================
*/
public static void SV_Kick_f() {
if (!SV_INIT.svs.initialized) {
Com.Printf("No server running.\n");
return;
}
if (Cmd.Argc() != 2) {
Com.Printf("Usage: kick <userid>\n");
return;
}
if (!SV_SetPlayer())
return;
SV_SEND.SV_BroadcastPrintf(Defines.PRINT_HIGH, SV_MAIN.sv_client.name + " was kicked\n");
// print directly, because the dropped client won't get the
// SV_BroadcastPrintf message
SV_SEND.SV_ClientPrintf(SV_MAIN.sv_client, Defines.PRINT_HIGH, "You were kicked from the game\n");
SV_MAIN.SV_DropClient(SV_MAIN.sv_client);
SV_MAIN.sv_client.lastmessage = SV_INIT.svs.realtime; // min case there is a funny zombie
}
/*
================
SV_Status_f
================
*/
public static void SV_Status_f() {
int i, j, l;
client_t cl;
String s;
int ping;
if (SV_INIT.svs.clients == null) {
Com.Printf("No server running.\n");
return;
}
Com.Printf("map : " + SV_INIT.sv.name + "\n");
Com.Printf("num score ping name lastmsg address qport \n");
Com.Printf("--- ----- ---- --------------- ------- --------------------- ------\n");
for (i = 0; i < SV_MAIN.maxclients.value; i++) {
cl = SV_INIT.svs.clients[i];
if (0 == cl.state)
continue;
Com.Printf("%3i ", new Vargs().add(i));
Com.Printf("%5i ", new Vargs().add(cl.edict.client.ps.stats[Defines.STAT_FRAGS]));
if (cl.state == Defines.cs_connected)
Com.Printf("CNCT ");
else if (cl.state == Defines.cs_zombie)
Com.Printf("ZMBI ");
else {
ping = cl.ping < 9999 ? cl.ping : 9999;
Com.Printf("%4i ", new Vargs().add(ping));
}
Com.Printf("%s", new Vargs().add(cl.name));
l = 16 - cl.name.length();
for (j = 0; j < l; j++)
Com.Printf(" ");
Com.Printf("%7i ", new Vargs().add(SV_INIT.svs.realtime - cl.lastmessage));
s = NET.AdrToString(cl.netchan.remote_address);
Com.Printf(s);
l = 22 - s.length();
for (j = 0; j < l; j++)
Com.Printf(" ");
Com.Printf("%5i", new Vargs().add(cl.netchan.qport));
Com.Printf("\n");
}
Com.Printf("\n");
}
/*
==================
SV_ConSay_f
==================
*/
public static void SV_ConSay_f() {
client_t client;
int j;
String p;
String text; // char[1024];
if (Cmd.Argc() < 2)
return;
text = "console: ";
p = Cmd.Args();
if (p.charAt(0) == '"') {
p = p.substring(1, p.length() - 1);
}
text += p;
for (j = 0; j < SV_MAIN.maxclients.value; j++) {
client = SV_INIT.svs.clients[j];
if (client.state != Defines.cs_spawned)
continue;
SV_SEND.SV_ClientPrintf(client, Defines.PRINT_CHAT, text + "\n");
}
}
/*
==================
SV_Heartbeat_f
==================
*/
public static void SV_Heartbeat_f() {
SV_INIT.svs.last_heartbeat = -9999999;
}
/*
===========
SV_Serverinfo_f
Examine or change the serverinfo string
===========
*/
public static void SV_Serverinfo_f() {
Com.Printf("Server info settings:\n");
Info.Print(Cvar.Serverinfo());
}
/*
===========
SV_DumpUser_f
Examine all a users info strings
===========
*/
public static void SV_DumpUser_f() {
if (Cmd.Argc() != 2) {
Com.Printf("Usage: info <userid>\n");
return;
}
if (!SV_SetPlayer())
return;
Com.Printf("userinfo\n");
Com.Printf("--------\n");
Info.Print(SV_MAIN.sv_client.userinfo);
}
/*
==============
SV_ServerRecord_f
Begins server demo recording. Every entity and every message will be
recorded, but no playerinfo will be stored. Primarily for demo merging.
==============
*/
public static void SV_ServerRecord_f() {
//char name[MAX_OSPATH];
String name;
byte buf_data[] = new byte[32768];
sizebuf_t buf = new sizebuf_t();
int len;
int i;
if (Cmd.Argc() != 2) {
Com.Printf("serverrecord <demoname>\n");
return;
}
if (SV_INIT.svs.demofile != null) {
Com.Printf("Already recording.\n");
return;
}
if (SV_INIT.sv.state != Defines.ss_game) {
Com.Printf("You must be in a level to record.\n");
return;
}
//
// open the demo file
//
name = FS.Gamedir() + "/demos/" + Cmd.Argv(1) + ".dm2";
Com.Printf("recording to " + name + ".\n");
FS.CreatePath(name);
try {
SV_INIT.svs.demofile = new RandomAccessFile(name, "rw");
}
catch (Exception e) {
Com.Printf("ERROR: couldn't open.\n");
Compatibility.printStackTrace(e);
return;
}
// setup a buffer to catch all multicasts
SZ.Init(SV_INIT.svs.demo_multicast, SV_INIT.svs.demo_multicast_buf, SV_INIT.svs.demo_multicast_buf.length);
//
// write a single giant fake message with all the startup info
//
SZ.Init(buf, buf_data, buf_data.length);
//
// serverdata needs to go over for all types of servers
// to make sure the protocol is right, and to set the gamedir
//
// send the serverdata
MSG.WriteByte(buf, Defines.svc_serverdata);
MSG.WriteLong(buf, Defines.PROTOCOL_VERSION);
MSG.WriteLong(buf, SV_INIT.svs.spawncount);
// 2 means server demo
MSG.WriteByte(buf, 2); // demos are always attract loops
MSG.WriteString(buf, Cvar.VariableString("gamedir"));
MSG.WriteShort(buf, -1);
// send full levelname
MSG.WriteString(buf, SV_INIT.sv.configstrings[Defines.CS_NAME]);
for (i = 0; i < Defines.MAX_CONFIGSTRINGS; i++)
if (SV_INIT.sv.configstrings[i].length() == 0) {
MSG.WriteByte(buf, Defines.svc_configstring);
MSG.WriteShort(buf, i);
MSG.WriteString(buf, SV_INIT.sv.configstrings[i]);
}
// write it to the demo file
Com.DPrintf("signon message length: " + buf.cursize + "\n");
len = EndianHandler.swapInt(buf.cursize);
//fwrite(len, 4, 1, svs.demofile);
//fwrite(buf.data, buf.cursize, 1, svs.demofile);
try {
SV_INIT.svs.demofile.writeInt(len);
SV_INIT.svs.demofile.write(buf.data, 0, buf.cursize);
}
catch (IOException e1) {
// TODO: do quake2 error handling!
Compatibility.printStackTrace(e1);
}
// the rest of the demo file will be individual frames
}
/*
==============
SV_ServerStop_f
Ends server demo recording
==============
*/
public static void SV_ServerStop_f() {
if (SV_INIT.svs.demofile == null) {
Com.Printf("Not doing a serverrecord.\n");
return;
}
try {
SV_INIT.svs.demofile.close();
}
catch (IOException e) {
Compatibility.printStackTrace(e);
}
SV_INIT.svs.demofile = null;
Com.Printf("Recording completed.\n");
}
/*
===============
SV_KillServer_f
Kick everyone off, possibly in preparation for a new game
===============
*/
public static void SV_KillServer_f() {
if (!SV_INIT.svs.initialized)
return;
SV_MAIN.SV_Shutdown("Server was killed.\n", false);
NET.Config(false); // close network sockets
}
/*
===============
SV_ServerCommand_f
Let the game dll handle a command
===============
*/
public static void SV_ServerCommand_f() {
GameSVCmds.ServerCommand();
}
//===========================================================
/*
==================
SV_InitOperatorCommands
==================
*/
public static void SV_InitOperatorCommands() {
Cmd.AddCommand("heartbeat", new xcommand_t() {
public void execute() {
SV_Heartbeat_f();
}
});
Cmd.AddCommand("kick", new xcommand_t() {
public void execute() {
SV_Kick_f();
}
});
Cmd.AddCommand("status", new xcommand_t() {
public void execute() {
SV_Status_f();
}
});
Cmd.AddCommand("serverinfo", new xcommand_t() {
public void execute() {
SV_Serverinfo_f();
}
});
Cmd.AddCommand("dumpuser", new xcommand_t() {
public void execute() {
SV_DumpUser_f();
}
});
Cmd.AddCommand("map", new xcommand_t() {
public void execute() {
SV_Map_f();
}
});
Cmd.AddCommand("demomap", new xcommand_t() {
public void execute() {
SV_DemoMap_f();
}
});
Cmd.AddCommand("gamemap", new xcommand_t() {
public void execute() {
SV_GameMap_f();
}
});
Cmd.AddCommand("setmaster", new xcommand_t() {
public void execute() {
SV_SetMaster_f();
}
});
if (Globals.dedicated.value != 0)
Cmd.AddCommand("say", new xcommand_t() {
public void execute() {
SV_ConSay_f();
}
});
Cmd.AddCommand("serverrecord", new xcommand_t() {
public void execute() {
SV_ServerRecord_f();
}
});
Cmd.AddCommand("serverstop", new xcommand_t() {
public void execute() {
SV_ServerStop_f();
}
});
Cmd.AddCommand("save", new xcommand_t() {
public void execute() {
SV_Savegame_f();
}
});
Cmd.AddCommand("load", new xcommand_t() {
public void execute() {
SV_Loadgame_f();
}
});
Cmd.AddCommand("killserver", new xcommand_t() {
public void execute() {
SV_KillServer_f();
}
});
Cmd.AddCommand("sv", new xcommand_t() {
public void execute() {
SV_ServerCommand_f();
}
});
}
}
| gpl-2.0 |
codingore/yoh-at-ddns | yoh.at-ddns/src/tests/org/xbill/DNS/TypeBitmapTest.java | 2186 | // -*- Java -*-
//
// Copyright (c) 2011, org.xbill.DNS
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of the University of Colorado at Boulder nor the
// names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written
// permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
package org.xbill.DNS;
import junit.framework.TestCase;
public class TypeBitmapTest extends TestCase {
public void test_empty() {
TypeBitmap typeBitmap = new TypeBitmap(new int[]{});
assertEquals(typeBitmap.toString(), "");
}
public void test_typeA() {
TypeBitmap typeBitmap = new TypeBitmap(new int[]{1});
assertEquals(typeBitmap.toString(), "A");
}
public void test_typeNSandSOA() {
TypeBitmap typeBitmap = new TypeBitmap(new int[]{2, 6});
assertEquals(typeBitmap.toString(), "NS SOA");
}
}
| gpl-3.0 |
mwassil/inspectIT | Agent/src/info/novatec/inspectit/agent/analyzer/IClassPoolAnalyzer.java | 1749 | package info.novatec.inspectit.agent.analyzer;
import javassist.ClassPool;
import javassist.CtConstructor;
import javassist.CtMethod;
/**
* This interface defines methods to help with the usage of the javassist class pool.
*
* @author Patrice Bouillet
*
*/
public interface IClassPoolAnalyzer {
/**
* Returns all the methods of a class as an array of {@link CtMethod} objects.
*
* @param classLoader
* The class loader of the given class name to successfully search for the class.
* @param className
* The name of the class.
* @return The array of {@link CtMethod} objects of the passed class name.
*/
CtMethod[] getMethodsForClassName(final ClassLoader classLoader, final String className);
/**
* Returns all the constructors of a class as an array of {@link CtConstructor} objects.
*
* @param classLoader
* The class loader of the given class name to successfully search for the class.
* @param className
* The name of the class.
* @return The array of {@link CtConstructor} objects of the passed class name.
*/
CtConstructor[] getConstructorsForClassName(final ClassLoader classLoader, final String className);
/**
* Copy the hierarchy from the given classloader and build new classpool objects.
*
* @param classLoader
* The class loader.
* @return The ClassPool referring to this class loader.
*/
ClassPool addClassLoader(final ClassLoader classLoader);
/**
* Returns the {@link ClassPool} which is responsible for the given class loader.
*
* @param classLoader
* The class loader.
* @return The ClassPool referring to this class loader.
*/
ClassPool getClassPool(final ClassLoader classLoader);
} | agpl-3.0 |
jdahlstrom/vaadin.react | uitest/src/test/java/com/vaadin/tests/components/window/TestTooSmallSubwindowSizeTest.java | 1089 | /*
* Copyright 2000-2014 Vaadin Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.vaadin.tests.components.window;
import java.io.IOException;
import org.junit.Test;
import com.vaadin.tests.tb3.MultiBrowserTest;
/**
* Tests that the styles work correctly in tiny subwindows that have more
* content than can fit.
*
* @author Vaadin Ltd
*/
public class TestTooSmallSubwindowSizeTest extends MultiBrowserTest {
@Test
public void testSubwindowStyles() throws IOException {
openTestURL();
compareScreen("initial_state");
}
}
| apache-2.0 |
jdahlstrom/vaadin.react | uitest/src/main/java/com/vaadin/tests/components/beanitemcontainer/TestBeanItemContainerUsage.java | 1953 | package com.vaadin.tests.components.beanitemcontainer;
import java.util.ArrayList;
import java.util.List;
import com.vaadin.data.util.BeanItemContainer;
import com.vaadin.tests.components.TestBase;
import com.vaadin.ui.Table;
public class TestBeanItemContainerUsage extends TestBase {
@Override
protected String getDescription() {
return "A test for the BeanItemContainer. The table should contain three persons and show their first and last names and their age.";
}
@Override
protected Integer getTicketNumber() {
return 1061;
}
@Override
protected void setup() {
Table t = new Table("Table containing Persons");
t.setPageLength(5);
t.setWidth("100%");
List<Person> persons = new ArrayList<Person>();
persons.add(new Person("Jones", "Birchman", 35));
persons.add(new Person("Marc", "Smith", 30));
persons.add(new Person("Greg", "Sandman", 75));
BeanItemContainer<Person> bic = new BeanItemContainer<Person>(persons);
t.setContainerDataSource(bic);
addComponent(t);
}
public static class Person {
private String firstName;
private String lastName;
private int age;
public String getFirstName() {
return firstName;
}
public void setFirstName(String firstName) {
this.firstName = firstName;
}
public String getLastName() {
return lastName;
}
public void setLastName(String lastName) {
this.lastName = lastName;
}
public int getAge() {
return age;
}
public void setAge(int age) {
this.age = age;
}
public Person(String firstName, String lastName, int age) {
super();
this.firstName = firstName;
this.lastName = lastName;
this.age = age;
}
}
}
| apache-2.0 |
bhutchinson/rice | rice-framework/krad-data/src/main/java/org/kuali/rice/krad/data/jpa/JpaMetadataProviderImpl.java | 19905 | /**
* Copyright 2005-2015 The Kuali Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ecl2.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kuali.rice.krad.data.jpa;
import org.kuali.rice.core.api.data.DataType;
import org.kuali.rice.krad.data.metadata.DataObjectAttribute;
import org.kuali.rice.krad.data.metadata.DataObjectAttributeRelationship;
import org.kuali.rice.krad.data.metadata.DataObjectCollection;
import org.kuali.rice.krad.data.metadata.DataObjectMetadata;
import org.kuali.rice.krad.data.metadata.DataObjectRelationship;
import org.kuali.rice.krad.data.metadata.impl.DataObjectAttributeImpl;
import org.kuali.rice.krad.data.metadata.impl.DataObjectAttributeRelationshipImpl;
import org.kuali.rice.krad.data.metadata.impl.DataObjectCollectionImpl;
import org.kuali.rice.krad.data.metadata.impl.DataObjectMetadataImpl;
import org.kuali.rice.krad.data.metadata.impl.DataObjectRelationshipImpl;
import org.kuali.rice.krad.data.provider.annotation.ExtensionFor;
import org.kuali.rice.krad.data.provider.impl.MetadataProviderBase;
import javax.persistence.EntityManager;
import javax.persistence.metamodel.Attribute.PersistentAttributeType;
import javax.persistence.metamodel.EmbeddableType;
import javax.persistence.metamodel.EntityType;
import javax.persistence.metamodel.IdentifiableType;
import javax.persistence.metamodel.PluralAttribute;
import javax.persistence.metamodel.SingularAttribute;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* A superclass which handles most of the JPA metadata extraction.
*
* <p>
* It handles everything which can be done via the standard javax.persistence annotations. Any implementation-specific
* annotations must be processed in the provided abstract hook methods.
* </p>
*
* @author Kuali Rice Team (rice.collab@kuali.org)
*/
public abstract class JpaMetadataProviderImpl extends MetadataProviderBase implements JpaMetadataProvider {
private static final org.apache.log4j.Logger LOG = org.apache.log4j.Logger.getLogger(JpaMetadataProviderImpl.class);
/**
* The entity manager used in interacting with the database.
*/
protected EntityManager entityManager;
/**
* Hook called after all "standard" annotations are processed to perform any further extraction based on the
* internals of the JPA implementation.
*
* @param metadata The metadata for the data object.
* @param entityType The entity type of the data object.
*/
protected abstract void populateImplementationSpecificEntityLevelMetadata(DataObjectMetadataImpl metadata,
EntityType<?> entityType);
/**
* Hook called after all "standard" attribute-level annotations are processed to perform any further extraction
* based on the internals of the JPA implementation.
*
* @param attribute The attribute metadata for the data object.
* @param attr The persistent single-valued property or field.
*/
protected abstract void populateImplementationSpecificAttributeLevelMetadata(DataObjectAttributeImpl attribute,
SingularAttribute<?, ?> attr);
/**
* Hook called after all "standard" field-level annotations are processed on attributes identified as "plural" to
* perform any further extraction based on the internals of the JPA implementation.
*
* @param collection The collection metadata for the data object.
* @param cd The persistent collection-valued attribute.
*/
protected abstract void populateImplementationSpecificCollectionLevelMetadata(DataObjectCollectionImpl collection,
PluralAttribute<?, ?, ?> cd);
/**
* Hook called after all "standard" field-level annotations are processed on attributes identified as "associations"
* to perform any further extraction based on the internals of the JPA implementation.
*
* @param relationship The relationship metadata for the data object.
* @param rd The persistent single-valued property or field.
*/
protected abstract void populateImplementationSpecificRelationshipLevelMetadata(
DataObjectRelationshipImpl relationship, SingularAttribute<?, ?> rd);
/**
* {@inheritDoc}
*/
@Override
public abstract DataObjectRelationship addExtensionRelationship(Class<?> entityClass, String extensionPropertyName,
Class<?> extensionEntity);
/**
* {@inheritDoc}
*/
@Override
protected synchronized void initializeMetadata(Collection<Class<?>> types) {
LOG.info("Initializing JPA Metadata from " + entityManager);
masterMetadataMap.clear();
// QUESTION: When is JPA loaded so this service can initialize itself?
// Build and store the map
for ( IdentifiableType<?> identifiableType : entityManager.getMetamodel().getEntities() ) {
//Only extract the metadata if EntityType and not a MappedSuperClass
if(identifiableType instanceof EntityType<?>){
EntityType<?> type = (EntityType<?>)identifiableType;
try {
masterMetadataMap.put(type.getBindableJavaType(), getMetadataForClass(type.getBindableJavaType()));
if (LOG.isDebugEnabled()) {
LOG.debug("Added Metadata For: " + type.getBindableJavaType());
}
} catch (Exception ex) {
LOG.error("Error obtaining JPA metadata for type: " + type.getJavaType(), ex);
}
}
}
}
/**
* Extracts the data from the JPA Persistence Unit. This code assumes that the given class is persistable.
*
* @param persistableClass Class which will be looked up in OJB's static descriptor repository.
* @return the metadata for the class
*/
@SuppressWarnings("unchecked")
public DataObjectMetadata getMetadataForClass(Class<?> persistableClass) {
// first, let's scan for extensions
List<DataObjectRelationship> relationships = new ArrayList<DataObjectRelationship>();
Map<Class<?>, Class<?>> extensionMap = new HashMap<Class<?>, Class<?>>();
for (EntityType<?> entityType : getEntityManager().getMetamodel().getEntities()) {
if (entityType.getJavaType().isAnnotationPresent(ExtensionFor.class)) {
ExtensionFor extensionFor = entityType.getJavaType().getAnnotation(ExtensionFor.class);
if (extensionFor.value().equals(persistableClass)) {
DataObjectRelationship relationship =
addExtensionRelationship(persistableClass, extensionFor.extensionPropertyName(), entityType.getJavaType());
// have to do this because even though we've added the DatabaseMapping in EclipseLink, it will not
// rebuild the JPA metamodel for us
relationships.add(relationship);
}
}
}
// now let's build us some metadata!
DataObjectMetadataImpl metadata = new DataObjectMetadataImpl();
EntityType<?> entityType = entityManager.getMetamodel().entity(persistableClass);
metadata.setProviderName(this.getClass().getSimpleName());
metadata.setType(persistableClass);
metadata.setName(persistableClass.getSimpleName());
metadata.setReadOnly(false);
metadata.setSupportsOptimisticLocking(entityType.hasVersionAttribute());
populateImplementationSpecificEntityLevelMetadata(metadata, entityType);
// PK Extraction
try {
metadata.setPrimaryKeyAttributeNames(getPrimaryKeyAttributeNames(entityType));
} catch (RuntimeException ex) {
LOG.error("Error processing PK metadata for " + entityType.getBindableJavaType().getName());
throw new RuntimeException(
"Error processing PK metadata for " + entityType.getBindableJavaType().getName(), ex);
}
// Main Attribute Extraction
try {
List<DataObjectAttribute> attributes = getSingularAttributes(persistableClass,
entityType.getSingularAttributes(), metadata.getPrimaryKeyAttributeNames());
for (DataObjectAttribute attr : attributes) {
metadata.getOrderedAttributeList().add(attr.getName());
}
metadata.setAttributes(attributes);
} catch (RuntimeException ex) {
LOG.error("Error processing attribute metadata for " + entityType.getBindableJavaType().getName());
throw ex;
}
// Collection Extraction
try {
metadata.setCollections(getCollectionsFromMetadata((Set) entityType.getPluralAttributes()));
} catch (RuntimeException ex) {
LOG.error("Error processing collection metadata for " + entityType.getBindableJavaType().getName());
throw ex;
}
// Reference/Relationship Extraction
try {
relationships.addAll(getRelationships(entityType.getSingularAttributes()));
metadata.setRelationships(relationships);
} catch (RuntimeException ex) {
LOG.error("Error processing relationship metadata for " + entityType.getBindableJavaType().getName());
throw ex;
}
return metadata;
}
/**
* Gets the attribute names for the primary keys from the given entity type.
*
* @param entityType The entity type of the data object.
* @return A list of primary key attribute names.
*/
protected List<String> getPrimaryKeyAttributeNames(EntityType<?> entityType) {
List<String> primaryKeyAttributeNames = new ArrayList<String>();
// JHK: After examining of the metadata structures of EclipseLink, I determined that there
// was nothing in those which preserved the order of the original annotations.
// We *need* to know the order of PK fields for KNS/KRAD functionality.
// So, I'm falling back to checking the annotations and fields on the referenced objects.
// Yes, the Javadoc states that the getDeclaredFields() method does not guarantee order,
// But, it's the best we have. And, as of Java 6, it is returning them in declaration order.
if (entityType.getIdType() instanceof EmbeddableType) {
for (Field pkField : entityType.getIdType().getJavaType().getDeclaredFields()) {
primaryKeyAttributeNames.add(pkField.getName());
}
} else {
// First, get the ID attributes from the metadata
List<String> unsortedPkFields = new ArrayList<String>();
for (SingularAttribute attr : entityType.getSingularAttributes()) {
if (attr.isId()) {
unsortedPkFields.add(attr.getName());
}
}
getPrimaryKeyNamesInOrder(primaryKeyAttributeNames, unsortedPkFields, entityType.getJavaType().getDeclaredFields(), entityType.getJavaType());
}
return primaryKeyAttributeNames;
}
/**
* Sorts the list of primary key names.
*
* @param pkFieldNames The final list to which the primary key field names will be added in order.
* @param unsortedPks The current list of unsorted primary keys.
* @param fields The fields on the current object.
* @param type The class of the current object.
*/
private void getPrimaryKeyNamesInOrder(List<String> pkFieldNames, List<String> unsortedPks, Field[] fields, Class<?> type) {
for (Field field : type.getDeclaredFields()) {
if (unsortedPks.contains(field.getName())) {
pkFieldNames.add(field.getName());
}
}
if (pkFieldNames.isEmpty() && type.getSuperclass() != null) {
getPrimaryKeyNamesInOrder(pkFieldNames, unsortedPks, type.getSuperclass().getDeclaredFields(), type.getSuperclass());
}
}
/**
* Gets a list of attributes for this data object.
*
* @param persistableClass The class of the data object.
* @param fields The collection of singular attributes to process.
* @param primaryKeyAttributes The list of primary key attribute names.
* @return The list of attributes for this data object.
*/
protected List<DataObjectAttribute> getSingularAttributes(Class<?> persistableClass, Collection<?> fields,
List<String> primaryKeyAttributes) {
if (fields == null) {
fields = Collections.emptySet();
}
// Put them all into a map by their property name so we can find them
// We want to add them to the list in appearance order in the class
Map<String, SingularAttribute> attrs = new HashMap<String, SingularAttribute>(fields.size());
for (SingularAttribute attr : (Collection<SingularAttribute>) fields) {
if (!attr.isAssociation()) {
attrs.put(attr.getName(), attr);
}
}
List<DataObjectAttribute> attributes = new ArrayList<DataObjectAttribute>(fields.size());
// This will process them in appearance order
for (Field f : persistableClass.getDeclaredFields()) {
SingularAttribute attr = attrs.get(f.getName());
if (attr != null) {
attributes.add(getAttributeMetadata(persistableClass, attr, primaryKeyAttributes));
attrs.remove(f.getName()); // to note that it's been used - see below
}
}
// Just in case there are others which don't match, we don't want to miss them and will add them at the end
for (SingularAttribute attr : attrs.values()) {
attributes.add(getAttributeMetadata(persistableClass, attr, primaryKeyAttributes));
}
return attributes;
}
/**
* Gets a single field's metadata from the property descriptor.
*
* @param persistableClass The class of the data object.
* @param attr The singular attribute to process.
* @param primaryKeyAttributes The list of primary key attribute names.
* @return The DataObjectAttribute containing the metadata for the given attribute on the provided Class
*/
protected DataObjectAttribute getAttributeMetadata(Class<?> persistableClass, SingularAttribute<?, ?> attr,
List<String> primaryKeyAttributes) {
DataObjectAttributeImpl attribute = new DataObjectAttributeImpl();
attribute.setOwningType(persistableClass);
attribute.setName(attr.getName());
Class<?> propertyType = attr.getJavaType();
attribute.setType(propertyType);
DataType dataType = DataType.getDataTypeFromClass(propertyType);
if (dataType == null) {
dataType = DataType.STRING;
}
attribute.setDataType(dataType);
attribute.setRequired(!attr.isOptional() && !attr.isId() && !primaryKeyAttributes.contains(attr.getName()));
populateImplementationSpecificAttributeLevelMetadata(attribute, attr);
return attribute;
}
/**
* Gets a collection's metadata from the property descriptor.
*
* @param collections The list of plural attributes to process.
* @return The list of collections for this data object.
*/
protected List<DataObjectCollection> getCollectionsFromMetadata(Set<PluralAttribute> collections) {
List<DataObjectCollection> colls = new ArrayList<DataObjectCollection>(collections.size());
for (PluralAttribute cd : collections) {
colls.add(getCollectionMetadataFromCollectionAttribute(cd));
}
return colls;
}
/**
* Extracts the collection metadata from a single JPA {@link PluralAttribute} object.
*
* @param cd The plural attribute to process.
* @return The collection metadata from a single JPA {@link PluralAttribute} object.
*/
protected DataObjectCollection getCollectionMetadataFromCollectionAttribute(PluralAttribute cd) {
try {
DataObjectCollectionImpl collection = new DataObjectCollectionImpl();
// OJB stores the related class object name. We need to go into the repository and grab the table name.
Class<?> collectionElementClass = cd.getElementType().getJavaType();
EntityType<?> elementEntityType = entityManager.getMetamodel().entity(collectionElementClass);
collection.setName(cd.getName());
collection.setRelatedType(collectionElementClass);
populateImplementationSpecificCollectionLevelMetadata(collection, cd);
// Set to read only if store (save) operations should not be pushed through
PersistentAttributeType persistentAttributeType = cd.getPersistentAttributeType();
// default case: Without any mapping attributes, collections are linked by their primary key
if (persistentAttributeType == PersistentAttributeType.ONE_TO_MANY) {
// TODO: We probably still need to handle the "mappedBy" property on the OneToMany definition
// We only perform this logic here if we did not populate it in the implementation-specific call above
if (collection.getAttributeRelationships().isEmpty()) {
// need to obtain the keys for the relationship
List<String> pkFields = getPrimaryKeyAttributeNames((EntityType<?>) cd.getDeclaringType());
List<String> fkFields = getPrimaryKeyAttributeNames(elementEntityType);
List<DataObjectAttributeRelationship> attributeRelationships = new ArrayList<DataObjectAttributeRelationship>();
for (int i = 0; i < pkFields.size(); i++) {
attributeRelationships.add(new DataObjectAttributeRelationshipImpl(pkFields.get(i), fkFields
.get(i)));
}
collection.setAttributeRelationships(attributeRelationships);
}
} else if ( persistentAttributeType == PersistentAttributeType.MANY_TO_MANY ) {
// OK, this is an assumption
collection.setIndirectCollection( true );
// And, since the connection is set at the *database* level through the @JoinTable anotation
// we do not have any field names with which to make the connection
collection.setAttributeRelationships(null);
}
return collection;
} catch (RuntimeException ex) {
LOG.error("Unable to process Collection metadata: " + cd);
throw ex;
}
}
/**
* Gets the list of relationships for this data object.
*
* @param references The list of singular attribute references.
* @return The list of relationships for this data object.
*/
protected List<DataObjectRelationship> getRelationships(Set<?> references) {
List<DataObjectRelationship> rels = new ArrayList<DataObjectRelationship>(references.size());
for (SingularAttribute rd : (Set<SingularAttribute>) references) {
if (rd.isAssociation()) {
rels.add(getRelationshipMetadata(rd));
}
}
return rels;
}
/**
* Gets a single field's relationship metadata.
*
* @param rd The singular attribute to process.
* @return The single field's relationship metadata.
*/
protected DataObjectRelationship getRelationshipMetadata(SingularAttribute rd) {
try {
DataObjectRelationshipImpl relationship = new DataObjectRelationshipImpl();
// OJB stores the related class object name. We need to go into the repository and grab the table name.
Class<?> referencedClass = rd.getBindableJavaType();
EntityType<?> referencedEntityType = entityManager.getMetamodel().entity(referencedClass);
relationship.setName(rd.getName());
relationship.setRelatedType(referencedClass);
populateImplementationSpecificRelationshipLevelMetadata(relationship, rd);
return relationship;
} catch (RuntimeException ex) {
LOG.error("Unable to process Relationship metadata: " + rd);
throw ex;
}
}
/**
* {@inheritDoc}
*/
@Override
public boolean isClassPersistable(Class<?> type) {
return handles(type);
}
/**
* Setter for the entity manager.
*
* @param entityManager The entity manager to set.
*/
public void setEntityManager(EntityManager entityManager) {
this.entityManager = entityManager;
}
/**
* Gets the entity manager for interacting with the database.
*
* @return The entity manager for interacting with the database.
*/
public EntityManager getEntityManager() {
return entityManager;
}
}
| apache-2.0 |
amostl/chromedevtools | plugins/org.chromium.sdk.wipbackend.dev/src-wip-generated/org/chromium/sdk/internal/wip/protocol/common/network/LoaderIdTypedef.java | 474 | // Generated source.
// Generator: org.chromium.sdk.internal.wip.tools.protocolgenerator.Generator
// Origin: http://svn.webkit.org/repository/webkit/trunk/Source/WebCore/inspector/Inspector.json@114632
package org.chromium.sdk.internal.wip.protocol.common.network;
/**
Unique loader identifier.
*/
public class LoaderIdTypedef {
/*
The class is 'typedef'.
It merely holds a type javadoc and its only field refers to an actual type.
*/
String actualType;
}
| bsd-3-clause |
armenrz/adempiere | tools/src/org/apache/ecs/xhtml/p.java | 8191 | /******************************************************************************
* Product: Adempiere ERP & CRM Smart Business Solution *
* Copyright (C) 1999-2006 ComPiere, Inc. All Rights Reserved. *
* This program is free software; you can redistribute it and/or modify it *
* under the terms version 2 of the GNU General Public License as published *
* by the Free Software Foundation. This program is distributed in the hope *
* that it will be useful, but WITHOUT ANY WARRANTY; without even the implied *
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU General Public License for more details. *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. *
* For the text or an alternative of this public license, you may reach us *
* ComPiere, Inc., 2620 Augustine Dr. #245, Santa Clara, CA 95054, USA *
* or via info@compiere.org or http://www.compiere.org/license.html *
*****************************************************************************/
package org.apache.ecs.xhtml;
import org.apache.ecs.Element;
import org.apache.ecs.KeyEvents;
import org.apache.ecs.MouseEvents;
import org.apache.ecs.MultiPartElement;
import org.apache.ecs.Printable;
/**
* This class creates a <p> tag.
* <P>
* The HTML <P> tag defaults to not having a closing </P> because it
* is optional in the spec. In XHTML this is not allowed, so ending </p>
* is enforced.
*
* @version $Id: p.java,v 1.2 2006/07/30 00:54:02 jjanke Exp $
* @author <a href="mailto:snagy@servletapi.com">Stephan Nagy </a>
* @author <a href="mailto:jon@clearink.com">Jon S. Stevens </a>
* @author <a href="mailto:bojan@binarix.com">Bojan Smojver </a>
*/
public class p extends MultiPartElement
implements Printable, MouseEvents, KeyEvents
{
/**
*
*/
private static final long serialVersionUID = -5855405154143551113L;
/**
* Private initialization routine.
*/
{
setElementType ("p");
setCase (LOWERCASE);
setAttributeQuote (true);
}
/**
* Basic constructor. You need to set the attributes using the set* methods.
*/
public p ()
{
}
/**
* Use the set* methods to set the values of the attributes.
*
* @param align
* set the value of align=""
*/
public p (String align)
{
setAlign (align);
}
/**
* Use the set* methods to set the values of the attributes.
*
* @param align
* set the value of align=""
* @param value
* set the text after the <P> tag
*/
public p (String value, String align)
{
addElement (value);
setAlign (align);
}
/**
* Use the set* methods to set the values of the attributes.
*
* @param value
* set the text after the <P> tag
*/
public p (Element value)
{
addElement (value);
}
/**
* Use the set* methods to set the values of the attributes.
*
* @param align
* set the value of align=""
* @param value
* set the text after the <P> tag
*/
public p (Element value, String align)
{
addElement (value);
setAlign (align);
}
/**
* Sets the align="" attribute
*
* @param align
* the align="" attribute
*/
public p setAlign (String align)
{
addAttribute ("align", align);
return this;
}
/**
* Sets the lang="" and xml:lang="" attributes
*
* @param lang
* the lang="" and xml:lang="" attributes
*/
public Element setLang (String lang)
{
addAttribute ("lang", lang);
addAttribute ("xml:lang", lang);
return this;
}
/**
* Adds an Element to the element.
*
* @param hashcode
* name of element for hash table
* @param element
* Adds an Element to the element.
*/
public p addElement (String hashcode, Element element)
{
addElementToRegistry (hashcode, element);
return (this);
}
/**
* Adds an Element to the element.
*
* @param hashcode
* name of element for hash table
* @param element
* Adds an Element to the element.
*/
public p addElement (String hashcode, String element)
{
addElementToRegistry (hashcode, element);
return (this);
}
/**
* Adds an Element to the element.
*
* @param element
* Adds an Element to the element.
*/
public p addElement (Element element)
{
addElementToRegistry (element);
return (this);
}
/**
* Adds an Element to the element.
*
* @param element
* Adds an Element to the element.
*/
public p addElement (String element)
{
addElementToRegistry (element);
return (this);
}
/**
* Removes an Element from the element.
*
* @param hashcode
* the name of the element to be removed.
*/
public p removeElement (String hashcode)
{
removeElementFromRegistry (hashcode);
return (this);
}
/**
* The onclick event occurs when the pointing device button is clicked over
* an element. This attribute may be used with most elements.
*
* @param The
* script
*/
public void setOnClick (String script)
{
addAttribute ("onclick", script);
}
/**
* The ondblclick event occurs when the pointing device button is double
* clicked over an element. This attribute may be used with most elements.
*
* @param The
* script
*/
public void setOnDblClick (String script)
{
addAttribute ("ondblclick", script);
}
/**
* The onmousedown event occurs when the pointing device button is pressed
* over an element. This attribute may be used with most elements.
*
* @param The
* script
*/
public void setOnMouseDown (String script)
{
addAttribute ("onmousedown", script);
}
/**
* The onmouseup event occurs when the pointing device button is released
* over an element. This attribute may be used with most elements.
*
* @param The
* script
*/
public void setOnMouseUp (String script)
{
addAttribute ("onmouseup", script);
}
/**
* The onmouseover event occurs when the pointing device is moved onto an
* element. This attribute may be used with most elements.
*
* @param The
* script
*/
public void setOnMouseOver (String script)
{
addAttribute ("onmouseover", script);
}
/**
* The onmousemove event occurs when the pointing device is moved while it
* is over an element. This attribute may be used with most elements.
*
* @param The
* script
*/
public void setOnMouseMove (String script)
{
addAttribute ("onmousemove", script);
}
/**
* The onmouseout event occurs when the pointing device is moved away from
* an element. This attribute may be used with most elements.
*
* @param The
* script
*/
public void setOnMouseOut (String script)
{
addAttribute ("onmouseout", script);
}
/**
* The onkeypress event occurs when a key is pressed and released over an
* element. This attribute may be used with most elements.
*
* @param The
* script
*/
public void setOnKeyPress (String script)
{
addAttribute ("onkeypress", script);
}
/**
* The onkeydown event occurs when a key is pressed down over an element.
* This attribute may be used with most elements.
*
* @param The
* script
*/
public void setOnKeyDown (String script)
{
addAttribute ("onkeydown", script);
}
/**
* The onkeyup event occurs when a key is released over an element. This
* attribute may be used with most elements.
*
* @param The
* script
*/
public void setOnKeyUp (String script)
{
addAttribute ("onkeyup", script);
}
}
| gpl-2.0 |
leichunxin/jeesitedemo | src/main/java/com/thinkgem/jeesite/common/utils/Collections3.java | 4768 | /**
* Copyright (c) 2005-2012 springside.org.cn
*
* Licensed under the Apache License, Version 2.0 (the "License");
*/
package com.thinkgem.jeesite.common.utils;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.commons.beanutils.PropertyUtils;
import org.apache.commons.lang3.StringUtils;
/**
* Collections工具集.
* 在JDK的Collections和Guava的Collections2后, 命名为Collections3.
* @author calvin
* @version 2013-01-15
*/
@SuppressWarnings("rawtypes")
public class Collections3 {
/**
* 提取集合中的对象的两个属性(通过Getter函数), 组合成Map.
*
* @param collection 来源集合.
* @param keyPropertyName 要提取为Map中的Key值的属性名.
* @param valuePropertyName 要提取为Map中的Value值的属性名.
*/
@SuppressWarnings("unchecked")
public static Map extractToMap(final Collection collection, final String keyPropertyName,
final String valuePropertyName) {
Map map = new HashMap(collection.size());
try {
for (Object obj : collection) {
map.put(PropertyUtils.getProperty(obj, keyPropertyName),
PropertyUtils.getProperty(obj, valuePropertyName));
}
} catch (Exception e) {
throw Reflections.convertReflectionExceptionToUnchecked(e);
}
return map;
}
/**
* 提取集合中的对象的一个属性(通过Getter函数), 组合成List.
*
* @param collection 来源集合.
* @param propertyName 要提取的属性名.
*/
@SuppressWarnings("unchecked")
public static List extractToList(final Collection collection, final String propertyName) {
List list = new ArrayList(collection.size());
try {
for (Object obj : collection) {
list.add(PropertyUtils.getProperty(obj, propertyName));
}
} catch (Exception e) {
throw Reflections.convertReflectionExceptionToUnchecked(e);
}
return list;
}
/**
* 提取集合中的对象的一个属性(通过Getter函数), 组合成由分割符分隔的字符串.
*
* @param collection 来源集合.
* @param propertyName 要提取的属性名.
* @param separator 分隔符.
*/
public static String extractToString(final Collection collection, final String propertyName, final String separator) {
List list = extractToList(collection, propertyName);
return StringUtils.join(list, separator);
}
/**
* 转换Collection所有元素(通过toString())为String, 中间以 separator分隔。
*/
public static String convertToString(final Collection collection, final String separator) {
return StringUtils.join(collection, separator);
}
/**
* 转换Collection所有元素(通过toString())为String, 每个元素的前面加入prefix,后面加入postfix,如<div>mymessage</div>。
*/
public static String convertToString(final Collection collection, final String prefix, final String postfix) {
StringBuilder builder = new StringBuilder();
for (Object o : collection) {
builder.append(prefix).append(o).append(postfix);
}
return builder.toString();
}
/**
* 判断是否为空.
*/
public static boolean isEmpty(Collection collection) {
return (collection == null || collection.isEmpty());
}
/**
* 取得Collection的第一个元素,如果collection为空返回null.
*/
public static <T> T getFirst(Collection<T> collection) {
if (isEmpty(collection)) {
return null;
}
return collection.iterator().next();
}
/**
* 获取Collection的最后一个元素 ,如果collection为空返回null.
*/
public static <T> T getLast(Collection<T> collection) {
if (isEmpty(collection)) {
return null;
}
//当类型为List时,直接取得最后一个元素 。
if (collection instanceof List) {
List<T> list = (List<T>) collection;
return list.get(list.size() - 1);
}
//其他类型通过iterator滚动到最后一个元素.
Iterator<T> iterator = collection.iterator();
while (true) {
T current = iterator.next();
if (!iterator.hasNext()) {
return current;
}
}
}
/**
* 返回a+b的新List.
*/
public static <T> List<T> union(final Collection<T> a, final Collection<T> b) {
List<T> result = new ArrayList<T>(a);
result.addAll(b);
return result;
}
/**
* 返回a-b的新List.
*/
public static <T> List<T> subtract(final Collection<T> a, final Collection<T> b) {
List<T> list = new ArrayList<T>(a);
for (T element : b) {
list.remove(element);
}
return list;
}
/**
* 返回a与b的交集的新List.
*/
public static <T> List<T> intersection(Collection<T> a, Collection<T> b) {
List<T> list = new ArrayList<T>();
for (T element : a) {
if (b.contains(element)) {
list.add(element);
}
}
return list;
}
}
| apache-2.0 |
WilliamNouet/nifi | nifi-nar-bundles/nifi-email-bundle/nifi-email-processors/src/main/java/org/apache/nifi/processors/email/ExtractTNEFAttachments.java | 10063 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.processors.email;
import org.apache.commons.lang3.StringUtils;
import org.apache.nifi.annotation.behavior.EventDriven;
import org.apache.nifi.annotation.behavior.InputRequirement;
import org.apache.nifi.annotation.behavior.InputRequirement.Requirement;
import org.apache.nifi.annotation.behavior.SideEffectFree;
import org.apache.nifi.annotation.behavior.SupportsBatching;
import org.apache.nifi.annotation.behavior.WritesAttribute;
import org.apache.nifi.annotation.behavior.WritesAttributes;
import org.apache.nifi.annotation.documentation.CapabilityDescription;
import org.apache.nifi.annotation.documentation.Tags;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.flowfile.FlowFile;
import org.apache.nifi.flowfile.attributes.CoreAttributes;
import org.apache.nifi.logging.ComponentLog;
import org.apache.nifi.processor.AbstractProcessor;
import org.apache.nifi.processor.ProcessContext;
import org.apache.nifi.processor.ProcessSession;
import org.apache.nifi.processor.Relationship;
import org.apache.nifi.processor.exception.FlowFileHandlingException;
import org.apache.nifi.processor.io.InputStreamCallback;
import org.apache.nifi.processor.io.OutputStreamCallback;
import org.apache.nifi.stream.io.BufferedInputStream;
import org.apache.poi.hmef.Attachment;
import org.apache.poi.hmef.HMEFMessage;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
@SupportsBatching
@EventDriven
@SideEffectFree
@Tags({"split", "email"})
@InputRequirement(Requirement.INPUT_REQUIRED)
@CapabilityDescription("Extract attachments from a mime formatted email file, splitting them into individual flowfiles.")
@WritesAttributes({
@WritesAttribute(attribute = "filename ", description = "The filename of the attachment"),
@WritesAttribute(attribute = "email.tnef.attachment.parent.filename ", description = "The filename of the parent FlowFile"),
@WritesAttribute(attribute = "email.tnef.attachment.parent.uuid", description = "The UUID of the original FlowFile.")})
public class ExtractTNEFAttachments extends AbstractProcessor {
public static final String ATTACHMENT_ORIGINAL_FILENAME = "email.tnef.attachment.parent.filename";
public static final String ATTACHMENT_ORIGINAL_UUID = "email.tnef.attachment.parent.uuid";
public static final Relationship REL_ATTACHMENTS = new Relationship.Builder()
.name("attachments")
.description("Each individual attachment will be routed to the attachments relationship")
.build();
public static final Relationship REL_ORIGINAL = new Relationship.Builder()
.name("original")
.description("Each original flowfile (i.e. before extraction) will be routed to the original relationship")
.build();
public static final Relationship REL_FAILURE = new Relationship.Builder()
.name("failure")
.description("Each individual flowfile that could not be parsed will be routed to the failure relationship")
.build();
private final static Set<Relationship> RELATIONSHIPS;
private final static List<PropertyDescriptor> DESCRIPTORS;
static {
final Set<Relationship> _relationships = new HashSet<>();
_relationships.add(REL_ATTACHMENTS);
_relationships.add(REL_ORIGINAL);
_relationships.add(REL_FAILURE);
RELATIONSHIPS = Collections.unmodifiableSet(_relationships);
final List<PropertyDescriptor> _descriptors = new ArrayList<>();
DESCRIPTORS = Collections.unmodifiableList(_descriptors);
}
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
final ComponentLog logger = getLogger();
final FlowFile originalFlowFile = session.get();
if (originalFlowFile == null) {
return;
}
final List<FlowFile> attachmentsList = new ArrayList<>();
final List<FlowFile> invalidFlowFilesList = new ArrayList<>();
final List<FlowFile> originalFlowFilesList = new ArrayList<>();
session.read(originalFlowFile, new InputStreamCallback() {
@Override
public void process(final InputStream rawIn) throws IOException {
try (final InputStream in = new BufferedInputStream(rawIn)) {
Properties props = new Properties();
HMEFMessage hmefMessage = null;
// This will trigger an exception in case content is not a TNEF.
hmefMessage = new HMEFMessage(in);
// Add otiginal flowfile (may revert later on in case of errors) //
originalFlowFilesList.add(originalFlowFile);
if (hmefMessage != null) {
// Attachments isn empty, proceeding.
if (!hmefMessage.getAttachments().isEmpty()) {
final String originalFlowFileName = originalFlowFile.getAttribute(CoreAttributes.FILENAME.key());
try {
for (final Attachment attachment : hmefMessage.getAttachments()) {
FlowFile split = session.create(originalFlowFile);
final Map<String, String> attributes = new HashMap<>();
if (StringUtils.isNotBlank(attachment.getLongFilename())) {
attributes.put(CoreAttributes.FILENAME.key(), attachment.getFilename());
}
String parentUuid = originalFlowFile.getAttribute(CoreAttributes.UUID.key());
attributes.put(ATTACHMENT_ORIGINAL_UUID, parentUuid);
attributes.put(ATTACHMENT_ORIGINAL_FILENAME, originalFlowFileName);
// TODO: Extract Mime Type (HMEF doesn't seem to be able to get this info.
split = session.append(split, new OutputStreamCallback() {
@Override
public void process(OutputStream out) throws IOException {
out.write(attachment.getContents());
}
});
split = session.putAllAttributes(split, attributes);
attachmentsList.add(split);
}
} catch (FlowFileHandlingException e) {
// Something went wrong
// Removing splits that may have been created
session.remove(attachmentsList);
// Removing the original flow from its list
originalFlowFilesList.remove(originalFlowFile);
logger.error("Flowfile {} triggered error {} while processing message removing generated FlowFiles from sessions", new Object[]{originalFlowFile, e});
invalidFlowFilesList.add(originalFlowFile);
}
}
}
} catch (Exception e) {
// Another error hit...
// Removing the original flow from its list
originalFlowFilesList.remove(originalFlowFile);
logger.error("Could not parse the flowfile {} as an email, treating as failure", new Object[]{originalFlowFile, e});
// Message is invalid or triggered an error during parsing
invalidFlowFilesList.add(originalFlowFile);
}
}
});
session.transfer(attachmentsList, REL_ATTACHMENTS);
// As per above code, originalFlowfile may be routed to invalid or
// original depending on RFC2822 compliance.
session.transfer(invalidFlowFilesList, REL_FAILURE);
session.transfer(originalFlowFilesList, REL_ORIGINAL);
// check if attachments have been extracted
if (attachmentsList.size() != 0) {
if (attachmentsList.size() > 10) {
// If more than 10, summarise log
logger.info("Split {} into {} files", new Object[]{originalFlowFile, attachmentsList.size()});
} else {
// Otherwise be more verbose and list each individual split
logger.info("Split {} into {} files: {}", new Object[]{originalFlowFile, attachmentsList.size(), attachmentsList});
}
}
}
@Override
public Set<Relationship> getRelationships() {
return this.RELATIONSHIPS;
}
@Override
public final List<PropertyDescriptor> getSupportedPropertyDescriptors() {
return DESCRIPTORS;
}
}
| apache-2.0 |
hawtio/hawtio | platforms/hawtio-osgi-jmx/src/main/java/io/hawt/osgi/jmx/ConfigAdminMXBean.java | 400 | package io.hawt.osgi.jmx;
import java.util.Map;
/**
* This MXBean is to get around a limitation in Jolokia (1.1.2) which prevents it
* from working with JMX APIs that take a TabularData argument such as
* {@code ConfigurationAdminMBean.html.update(String pid, TabularData properties)}.
*/
public interface ConfigAdminMXBean {
void configAdminUpdate(String pid, Map<String, String> data);
}
| apache-2.0 |
goodwinnk/intellij-community | plugins/InspectionGadgets/test/com/siyeh/igtest/errorhandling/bad_exception_caught/BadExceptionCaught.java | 408 | package com.siyeh.igtest.errorhandling.bad_exception_caught;
class BadExceptionCaught {
public void test() {
try
{
// some code here
}
catch(<warning descr="Prohibited exception 'NullPointerException' caught">NullPointerException</warning> | UnsupportedOperationException e)
{
throw e;
}
catch(Exception e)
{
throw new RuntimeException(e);
}
}
} | apache-2.0 |
GlenRSmith/elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java | 7363 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.index.mapper;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.xcontent.XContentType;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Objects;
import java.util.function.Function;
import java.util.function.Supplier;
/**
* Parser for {@link Mapping} provided in {@link CompressedXContent} format
*/
public final class MappingParser {
private final Supplier<MappingParserContext> parserContextSupplier;
private final RootObjectMapper.TypeParser rootObjectTypeParser = new RootObjectMapper.TypeParser();
private final Supplier<Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper>> metadataMappersSupplier;
private final Map<String, MetadataFieldMapper.TypeParser> metadataMapperParsers;
private final Function<String, String> documentTypeResolver;
MappingParser(
Supplier<MappingParserContext> parserContextSupplier,
Map<String, MetadataFieldMapper.TypeParser> metadataMapperParsers,
Supplier<Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper>> metadataMappersSupplier,
Function<String, String> documentTypeResolver
) {
this.parserContextSupplier = parserContextSupplier;
this.metadataMapperParsers = metadataMapperParsers;
this.metadataMappersSupplier = metadataMappersSupplier;
this.documentTypeResolver = documentTypeResolver;
}
/**
* Verify that there are no remaining fields in the provided map that contained mapped fields
*
* @param fieldName the name of the field that is being parsed
* @param fieldNodeMap the map of fields
*/
public static void checkNoRemainingFields(String fieldName, Map<?, ?> fieldNodeMap) {
checkNoRemainingFields(fieldNodeMap, "Mapping definition for [" + fieldName + "] has unsupported parameters: ");
}
/**
* Verify that there are no remaining fields in the provided map that contained mapped fields
*
* @param fieldNodeMap the map of fields
* @param message the error message to be returned in case the provided map contains one or more fields
*/
public static void checkNoRemainingFields(Map<?, ?> fieldNodeMap, String message) {
if (fieldNodeMap.isEmpty() == false) {
throw new MapperParsingException(message + getRemainingFields(fieldNodeMap));
}
}
private static String getRemainingFields(Map<?, ?> map) {
StringBuilder remainingFields = new StringBuilder();
for (Object key : map.keySet()) {
remainingFields.append(" [").append(key).append(" : ").append(map.get(key)).append("]");
}
return remainingFields.toString();
}
@SuppressWarnings("unchecked")
Mapping parse(@Nullable String type, CompressedXContent source) throws MapperParsingException {
Objects.requireNonNull(source, "source cannot be null");
Map<String, Object> mapping = XContentHelper.convertToMap(source.compressedReference(), true, XContentType.JSON).v2();
if (mapping.isEmpty()) {
if (type == null) {
throw new MapperParsingException("malformed mapping, no type name found");
}
} else {
String rootName = mapping.keySet().iterator().next();
if (type == null || type.equals(rootName) || documentTypeResolver.apply(type).equals(rootName)) {
type = rootName;
mapping = (Map<String, Object>) mapping.get(rootName);
}
}
if (type == null) {
throw new MapperParsingException("Failed to derive type");
}
return parse(type, mapping);
}
private Mapping parse(String type, Map<String, Object> mapping) throws MapperParsingException {
MappingParserContext parserContext = parserContextSupplier.get();
RootObjectMapper rootObjectMapper = rootObjectTypeParser.parse(type, mapping, parserContext).build(MapperBuilderContext.ROOT);
Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> metadataMappers = metadataMappersSupplier.get();
Map<String, Object> meta = null;
Iterator<Map.Entry<String, Object>> iterator = mapping.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, Object> entry = iterator.next();
String fieldName = entry.getKey();
Object fieldNode = entry.getValue();
MetadataFieldMapper.TypeParser typeParser = metadataMapperParsers.get(fieldName);
if (typeParser != null) {
iterator.remove();
if (false == fieldNode instanceof Map) {
throw new IllegalArgumentException("[" + fieldName + "] config must be an object");
}
@SuppressWarnings("unchecked")
Map<String, Object> fieldNodeMap = (Map<String, Object>) fieldNode;
MetadataFieldMapper metadataFieldMapper = typeParser.parse(fieldName, fieldNodeMap, parserContext)
.build(MapperBuilderContext.ROOT);
metadataMappers.put(metadataFieldMapper.getClass(), metadataFieldMapper);
fieldNodeMap.remove("type");
checkNoRemainingFields(fieldName, fieldNodeMap);
}
}
@SuppressWarnings("unchecked")
Map<String, Object> removed = (Map<String, Object>) mapping.remove("_meta");
if (removed != null) {
/*
* It may not be required to copy meta here to maintain immutability but the cost is pretty low here.
*
* Note: this copy can not be replaced by Map#copyOf because we rely on consistent serialization order since we do
* byte-level checks on the mapping between what we receive from the master and what we have locally. As Map#copyOf
* is not necessarily the same underlying map implementation, we could end up with a different iteration order.
* For reference, see MapperService#assertSerializtion and GitHub issues #10302 and #10318.
*
* Do not change this to Map#copyOf or any other method of copying meta that could change the iteration order.
*
* TODO:
* - this should almost surely be a copy as a LinkedHashMap to have the ordering guarantees that we are relying on
* - investigate the above note about whether or not we need to be copying here, the ideal outcome would be to not
*/
meta = Collections.unmodifiableMap(new HashMap<>(removed));
}
checkNoRemainingFields(mapping, "Root mapping definition has unsupported parameters: ");
return new Mapping(rootObjectMapper, metadataMappers.values().toArray(new MetadataFieldMapper[0]), meta);
}
}
| apache-2.0 |
ottobackwards/metron | metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/configuration/ConfigurationType.java | 2030 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.metron.stellar.common.configuration;
import com.google.common.base.Function;
import org.apache.metron.stellar.common.Constants;
import org.apache.metron.stellar.common.utils.JSONUtils;
import java.io.IOException;
import java.util.Map;
public enum ConfigurationType implements Function<String, Object> {
GLOBAL("global",".", s -> {
try {
return JSONUtils.INSTANCE.load(s, JSONUtils.MAP_SUPPLIER);
} catch (IOException e) {
throw new RuntimeException("Unable to load " + s, e);
}
});
String name;
String directory;
String zookeeperRoot;
Function<String,?> deserializer;
ConfigurationType(String name, String directory, Function<String, ?> deserializer) {
this.name = name;
this.directory = directory;
this.zookeeperRoot = Constants.ZOOKEEPER_TOPOLOGY_ROOT + "/" + name;
this.deserializer = deserializer;
}
public String getName() {
return name;
}
public String getDirectory() {
return directory;
}
public Object deserialize(String s) {
return deserializer.apply(s);
}
@Override
public Object apply(String s) {
return deserialize(s);
}
public String getZookeeperRoot() {
return zookeeperRoot;
}
}
| apache-2.0 |
phantomjinx/modeshape | modeshape-jcr/src/main/java/org/modeshape/jcr/query/optimize/CopyCriteria.java | 9694 | /*
* ModeShape (http://www.modeshape.org)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.modeshape.jcr.query.optimize;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Set;
import org.modeshape.common.annotation.Immutable;
import org.modeshape.jcr.query.QueryContext;
import org.modeshape.jcr.query.model.Column;
import org.modeshape.jcr.query.model.Constraint;
import org.modeshape.jcr.query.model.EquiJoinCondition;
import org.modeshape.jcr.query.model.JoinCondition;
import org.modeshape.jcr.query.model.PropertyExistence;
import org.modeshape.jcr.query.model.PropertyValue;
import org.modeshape.jcr.query.model.ReferenceValue;
import org.modeshape.jcr.query.model.SameNodeJoinCondition;
import org.modeshape.jcr.query.model.SelectorName;
import org.modeshape.jcr.query.model.Visitable;
import org.modeshape.jcr.query.model.Visitors;
import org.modeshape.jcr.query.model.Visitors.AbstractVisitor;
import org.modeshape.jcr.query.plan.PlanNode;
import org.modeshape.jcr.query.plan.PlanNode.Property;
import org.modeshape.jcr.query.plan.PlanNode.Type;
import org.modeshape.jcr.query.plan.PlanUtil;
/**
* An {@link OptimizerRule optimizer rule} that copies SELECT nodes that apply to one side of a equi-join condition so that they
* also apply to the other side fo the equi-join condition.
*/
@Immutable
public class CopyCriteria implements OptimizerRule {
public static final CopyCriteria INSTANCE = new CopyCriteria();
@Override
public PlanNode execute( QueryContext context,
PlanNode plan,
LinkedList<OptimizerRule> ruleStack ) {
Set<PlanNode> copiedSelectNodes = new HashSet<PlanNode>();
for (PlanNode join : plan.findAllAtOrBelow(Type.JOIN)) {
// Get the join condition ...
JoinCondition joinCondition = join.getProperty(Property.JOIN_CONDITION, JoinCondition.class);
if (joinCondition instanceof EquiJoinCondition) {
EquiJoinCondition equiJoinCondition = (EquiJoinCondition)joinCondition;
SelectorName selector1 = equiJoinCondition.selector1Name();
SelectorName selector2 = equiJoinCondition.selector2Name();
String property1 = equiJoinCondition.getProperty1Name();
String property2 = equiJoinCondition.getProperty2Name();
// Walk up the tree looking for SELECT nodes that apply to one of the sides ...
PlanNode node = join.getParent();
while (node != null) {
if (!copiedSelectNodes.contains(node)) {
PlanNode copy = copySelectNode(context, node, selector1, property1, selector2, property2);
if (copy != null) {
node.insertAsParent(copy);
copiedSelectNodes.add(node);
copiedSelectNodes.add(copy);
} else {
copy = copySelectNode(context, node, selector2, property2, selector1, property1);
if (copy != null) {
node.insertAsParent(copy);
copiedSelectNodes.add(node);
copiedSelectNodes.add(copy);
}
}
}
node = node.getParent();
}
}
if (joinCondition instanceof EquiJoinCondition || joinCondition instanceof SameNodeJoinCondition) {
// Then for each side of the join ...
PlanNode left = join.getFirstChild();
PlanNode right = join.getLastChild();
copySelectNodes(context, left, right);
copySelectNodes(context, right, left);
}
}
return plan;
}
protected void copySelectNodes( QueryContext context,
PlanNode fromJoined,
PlanNode toJoined ) {
// Find all of the selectors used on the 'to' side ...
Set<SelectorName> toSelectors = new HashSet<SelectorName>();
for (PlanNode toNode : toJoined.findAllAtOrBelow()) {
toSelectors.addAll(toNode.getSelectors());
}
PlanNode nodeBelowSelects = null;
// Walk down the 'fromJoined' side looking for all SELECT nodes ...
for (PlanNode select : fromJoined.findAllAtOrBelow(Type.SELECT)) {
// If all of the SELECT's selectors are also found on the right ...
if (toSelectors.containsAll(select.getSelectors())) {
// Copy the criteria ...
PlanNode copy = new PlanNode(Type.SELECT, select.getSelectors());
copy.setProperty(Property.SELECT_CRITERIA, select.getProperty(Property.SELECT_CRITERIA));
if (nodeBelowSelects == null) {
nodeBelowSelects = toJoined.findAtOrBelow(Type.SOURCE, Type.JOIN, Type.SET_OPERATION, Type.NULL);
if (nodeBelowSelects == null) {
nodeBelowSelects = toJoined;
}
}
nodeBelowSelects.insertAsParent(copy);
nodeBelowSelects = copy;
}
}
}
protected PlanNode copySelectNode( QueryContext context,
PlanNode selectNode,
SelectorName selectorName,
String propertyName,
SelectorName copySelectorName,
String copyPropertyName ) {
if (selectNode.isNot(Type.SELECT)) return null;
if (selectNode.getSelectors().size() != 1 || !selectNode.getSelectors().contains(selectorName)) return null;
Constraint constraint = selectNode.getProperty(Property.SELECT_CRITERIA, Constraint.class);
Set<Column> columns = getColumnsReferencedBy(constraint);
if (columns.size() != 1) return null;
Column column = columns.iterator().next();
if (!column.selectorName().equals(selectorName)) return null;
if (!column.getPropertyName().equals(propertyName)) return null;
// We know that this constraint ONLY applies to the referenced selector and property,
// so we will duplicate this constraint ...
// Create the new node ...
PlanNode copy = new PlanNode(Type.SELECT, copySelectorName);
// Copy the constraint, but change the references to the copy selector and property ...
PlanUtil.ColumnMapping mappings = new PlanUtil.ColumnMapping(selectorName);
mappings.map(propertyName, new Column(copySelectorName, copyPropertyName, copyPropertyName));
Constraint newCriteria = PlanUtil.replaceReferences(context, constraint, mappings, copy);
copy.setProperty(Property.SELECT_CRITERIA, newCriteria);
return copy;
}
@Override
public String toString() {
return getClass().getSimpleName();
}
/**
* Get the set of Column objects that represent those columns referenced by the visitable object.
*
* @param visitable the object to be visited
* @return the set of Column objects, with column names that always are the string-form of the {@link Column#getPropertyName()
* property name}; never null
*/
public static Set<Column> getColumnsReferencedBy( Visitable visitable ) {
if (visitable == null) return Collections.emptySet();
final Set<Column> symbols = new HashSet<Column>();
// Walk the entire structure, so only supply a StrategyVisitor (that does no navigation) ...
Visitors.visitAll(visitable, new AbstractVisitor() {
protected void addColumnFor( SelectorName selectorName,
String property ) {
symbols.add(new Column(selectorName, property, property));
}
@Override
public void visit( Column column ) {
symbols.add(column);
}
@Override
public void visit( EquiJoinCondition joinCondition ) {
addColumnFor(joinCondition.selector1Name(), joinCondition.getProperty1Name());
addColumnFor(joinCondition.selector2Name(), joinCondition.getProperty2Name());
}
@Override
public void visit( PropertyExistence prop ) {
addColumnFor(prop.selectorName(), prop.getPropertyName());
}
@Override
public void visit( PropertyValue prop ) {
addColumnFor(prop.selectorName(), prop.getPropertyName());
}
@Override
public void visit( ReferenceValue ref ) {
String propertyName = ref.getPropertyName();
if (propertyName != null) {
addColumnFor(ref.selectorName(), propertyName);
}
}
});
return symbols;
}
}
| apache-2.0 |
cooldoger/cassandra | src/java/org/apache/cassandra/repair/SyncTask.java | 3812 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.repair;
import java.util.List;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.AbstractFuture;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.db.Keyspace;
import org.apache.cassandra.dht.Range;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.locator.InetAddressAndPort;
import org.apache.cassandra.streaming.PreviewKind;
import org.apache.cassandra.tracing.Tracing;
public abstract class SyncTask extends AbstractFuture<SyncStat> implements Runnable
{
private static Logger logger = LoggerFactory.getLogger(SyncTask.class);
protected final RepairJobDesc desc;
protected final List<Range<Token>> rangesToSync;
protected final PreviewKind previewKind;
protected final SyncNodePair nodePair;
protected volatile long startTime = Long.MIN_VALUE;
protected final SyncStat stat;
protected SyncTask(RepairJobDesc desc, InetAddressAndPort primaryEndpoint, InetAddressAndPort peer, List<Range<Token>> rangesToSync, PreviewKind previewKind)
{
Preconditions.checkArgument(!peer.equals(primaryEndpoint), "Sending and receiving node are the same: %s", peer);
this.desc = desc;
this.rangesToSync = rangesToSync;
this.nodePair = new SyncNodePair(primaryEndpoint, peer);
this.previewKind = previewKind;
this.stat = new SyncStat(nodePair, rangesToSync.size());
}
protected abstract void startSync();
public SyncNodePair nodePair()
{
return nodePair;
}
/**
* Compares trees, and triggers repairs for any ranges that mismatch.
*/
public final void run()
{
startTime = System.currentTimeMillis();
// choose a repair method based on the significance of the difference
String format = String.format("%s Endpoints %s and %s %%s for %s", previewKind.logPrefix(desc.sessionId), nodePair.coordinator, nodePair.peer, desc.columnFamily);
if (rangesToSync.isEmpty())
{
logger.info(String.format(format, "are consistent"));
Tracing.traceRepair("Endpoint {} is consistent with {} for {}", nodePair.coordinator, nodePair.peer, desc.columnFamily);
set(stat);
return;
}
// non-0 difference: perform streaming repair
logger.info(String.format(format, "have " + rangesToSync.size() + " range(s) out of sync"));
Tracing.traceRepair("Endpoint {} has {} range(s) out of sync with {} for {}", nodePair.coordinator, rangesToSync.size(), nodePair.peer, desc.columnFamily);
startSync();
}
public boolean isLocal()
{
return false;
}
protected void finished()
{
if (startTime != Long.MIN_VALUE)
Keyspace.open(desc.keyspace).getColumnFamilyStore(desc.columnFamily).metric.syncTime.update(System.currentTimeMillis() - startTime, TimeUnit.MILLISECONDS);
}
}
| apache-2.0 |
illerax/jcommune | jcommune-model/src/test/java/org/jtalks/jcommune/model/dao/hibernate/BranchHibernateDaoTest.java | 9916 | /**
* Copyright (C) 2011 JTalks.org Team
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
package org.jtalks.jcommune.model.dao.hibernate;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.jtalks.common.model.entity.Section;
import org.jtalks.jcommune.model.entity.PersistedObjectsFactory;
import org.jtalks.jcommune.model.dao.BranchDao;
import org.jtalks.jcommune.model.entity.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.testng.AbstractTransactionalTestNGSpringContextTests;
import org.springframework.test.context.transaction.TransactionConfiguration;
import org.springframework.transaction.annotation.Transactional;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import javax.validation.ConstraintViolationException;
import java.util.ArrayList;
import java.util.List;
import static org.testng.Assert.*;
import static org.unitils.reflectionassert.ReflectionAssert.assertReflectionEquals;
/**
* @author Kirill Afonin
* @author masyan
*/
@ContextConfiguration(locations = {"classpath:/org/jtalks/jcommune/model/entity/applicationContext-dao.xml"})
@TransactionConfiguration(transactionManager = "transactionManager", defaultRollback = true)
@Transactional
public class BranchHibernateDaoTest extends AbstractTransactionalTestNGSpringContextTests {
@Autowired
private SessionFactory sessionFactory;
@Autowired
private BranchDao dao;
private Session session;
Branch branch;
@BeforeMethod
public void setUp() throws Exception {
session = sessionFactory.getCurrentSession();
PersistedObjectsFactory.setSession(session);
branch = ObjectsFactory.getDefaultBranch();
}
/*===== Common methods =====*/
@Test
public void testSave() {
Branch branch = ObjectsFactory.getDefaultBranch();
dao.saveOrUpdate(branch);
assertNotSame(branch.getId(), 0, "Id not created");
session.evict(branch);
Branch result = (Branch) session.get(Branch.class, branch.getId());
assertReflectionEquals(branch, result);
}
@Test(expectedExceptions = ConstraintViolationException.class)
public void testSaveBranchWithNameNotNullViolation() {
Branch branch = ObjectsFactory.getDefaultBranch();
session.save(branch);
branch.setName(null);
dao.saveOrUpdate(branch);
session.flush();
}
@Test
public void testGet() {
Branch branch = ObjectsFactory.getDefaultBranch();
session.save(branch);
Branch result = dao.get(branch.getId());
assertNotNull(result);
assertEquals(result.getId(), branch.getId());
}
@Test
public void testGetInvalidId() {
Branch result = dao.get(-567890L);
assertNull(result);
}
@Test
public void testUpdate() {
String newName = "new name";
Branch branch = ObjectsFactory.getDefaultBranch();
session.save(branch);
branch.setName(newName);
dao.saveOrUpdate(branch);
session.flush();
session.evict(branch);
Branch result = (Branch) session.get(Branch.class, branch.getId());
assertEquals(result.getName(), newName);
}
@Test(expectedExceptions = javax.validation.ConstraintViolationException.class)
public void testUpdateNotNullViolation() {
Branch branch = ObjectsFactory.getDefaultBranch();
session.save(branch);
branch.setName(null);
dao.saveOrUpdate(branch);
session.flush();
}
@Test
public void testIsExist() {
Branch branch = ObjectsFactory.getDefaultBranch();
session.save(branch);
assertTrue(dao.isExist(branch.getId()));
}
@Test
public void testIsNotExist() {
assertFalse(dao.isExist(99999L));
}
@Test
public void testDeleteTopicFromBranchCascade() {
Branch branch = ObjectsFactory.getDefaultBranch();
JCUser author = ObjectsFactory.getDefaultUser();
session.save(author);
Topic topic = new Topic(author, "title", "Discussion");
Post post = new Post(author, "content");
topic.addPost(post);
branch.addTopic(topic);
session.save(branch);
branch.deleteTopic(topic);
dao.saveOrUpdate(branch);
session.flush();
assertEquals(getCount("select count(*) from org.jtalks.jcommune.model.entity.Branch"), 1);
assertEquals(getCount("select count(*) from Topic"), 0);
assertEquals(getCount("select count(*) from Post"), 0);
}
private int getCount(String hql) {
return ((Number) session.createQuery(hql).uniqueResult()).intValue();
}
private List<Branch> createAndSaveBranchList(int size, int sectionPosition) {
List<Branch> branches = new ArrayList<>();
Section section = ObjectsFactory.getDefaultSection();
section.setPosition(sectionPosition);
for (int i = 0; i < size; i++) {
Branch newBranch = new Branch("Branch #" + i, "Branch #" + i);
section.addOrUpdateBranch(newBranch);
newBranch.setSection(section);
branches.add(newBranch);
}
session.save(section);
return branches;
}
@Test
public void shouldReturnNoBranchesWhenDbIsEmpty() {
Section emptySection = ObjectsFactory.getDefaultSection();
session.save(emptySection);
List<Branch> selectedBranches = dao.getAllBranches();
assertTrue(selectedBranches.isEmpty());
}
@Test
public void testGetAllBranches() {
int sectionSize = 5;
List<Branch> branchesOfFirstSection = createAndSaveBranchList(sectionSize, 1);
List<Branch> branchesOfSecondSection = createAndSaveBranchList(sectionSize, 0);
// build desired order
List<Branch> createdBranches = new ArrayList<>(branchesOfSecondSection);
createdBranches.addAll(branchesOfFirstSection);
List<Branch> selectedBranches = dao.getAllBranches();
assertEquals(createdBranches, selectedBranches);//checking the order
}
@Test
public void testGetCountPostsInBranch() {
//topic with one post
Topic topic = PersistedObjectsFactory.getDefaultTopic();
Branch branch = topic.getBranch();
//add two posts
topic.addPost(new Post(topic.getTopicStarter(), "Second post"));
topic.addPost(new Post(topic.getTopicStarter(), "Third post"));
//
session.save(branch);
int expectedCount = topic.getPosts().size();
int actualCount = dao.getCountPostsInBranch(branch);
assertEquals(actualCount, expectedCount, "Count of posts in the branch is wrong");
}
@Test
public void testGetSubscribersWithAllowedPermission() {
JCUser subscriber = PersistedObjectsFactory.getDefaultUserWithGroups();
branch.getSubscribers().add(subscriber);
session.save(branch);
PersistedObjectsFactory.createAndSaveViewTopicsBranchesEntity(
branch.getId(), String.valueOf(subscriber.getGroups().get(0).getId()), true);
assertEquals(dao.getAllowedSubscribers(branch).size(), 1,
"Should return subscribers which are contained in some group with VIEW_TOPIC permission.");
}
@Test
public void testGetSubscribersWithDisallowedPermission() {
JCUser subscriber = PersistedObjectsFactory.getDefaultUserWithGroups();
branch.getSubscribers().add(subscriber);
session.save(branch);
PersistedObjectsFactory.createAndSaveViewTopicsBranchesEntity(
branch.getId(), String.valueOf(subscriber.getGroups().get(0).getId()), false);
assertEquals(dao.getAllowedSubscribers(branch).size(), 0,
"Should not return subscribers which are contained in any group with disallowed VIEW_TOPIC permission.");
}
@Test
public void testGetSubscribersWithAllowedAndDisallowedPermission() {
JCUser subscriber = PersistedObjectsFactory.getDefaultUserWithGroups();
branch.getSubscribers().add(subscriber);
session.save(branch);
PersistedObjectsFactory.createAndSaveViewTopicsBranchesEntity(
branch.getId(), String.valueOf(subscriber.getGroups().get(0).getId()), false);
PersistedObjectsFactory.createAndSaveViewTopicsBranchesEntity(
branch.getId(), String.valueOf(subscriber.getGroups().get(1).getId()), true);
assertEquals(dao.getAllowedSubscribers(branch).size(), 0,
"Should not return subscribers which are contained in any group with disallowed VIEW_TOPIC permission.");
}
@Test
public void testGetSubscribersWithoutAllowedAndDisallowedPermission() {
JCUser subscriber = PersistedObjectsFactory.getDefaultUserWithGroups();
branch.getSubscribers().add(subscriber);
session.save(branch);
assertEquals(dao.getAllowedSubscribers(branch).size(), 0,
"Should not return subscribers which are not contained in any group with allowed VIEW_TOPIC permission.");
}
} | lgpl-2.1 |
illerax/jcommune | jcommune-plugin-api/src/main/java/org/jtalks/jcommune/plugin/api/service/transactional/TransactionalPluginBranchService.java | 2447 | /**
* Copyright (C) 2011 JTalks.org Team
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
package org.jtalks.jcommune.plugin.api.service.transactional;
import org.jtalks.jcommune.model.entity.Branch;
import org.jtalks.jcommune.plugin.api.exceptions.NotFoundException;
import org.jtalks.jcommune.plugin.api.service.PluginBranchService;
/**
* Service for manipulating {@link org.jtalks.jcommune.model.entity.Branch} instances from plugins.
* For manipulating {@link org.jtalks.jcommune.model.entity.Branch} instances from jcommune use classes from service
* module
*
* This class is singleton because we can't use spring dependency injection mechanism in plugins due plugins can be
* added or removed in runtime.
*
* @author Mikhail Stryzhonok
*/
public class TransactionalPluginBranchService implements PluginBranchService {
private static final TransactionalPluginBranchService INSTANCE = new TransactionalPluginBranchService();
private PluginBranchService branchService;
/** Use {@link #getInstance()}, this class is singleton. */
private TransactionalPluginBranchService() {
}
/**
* Gets instance of {@link TransactionalPluginBranchService}
*
* @return instance of {@link TransactionalPluginBranchService}
*/
public static PluginBranchService getInstance() {
return INSTANCE;
}
/**
* {@inheritDoc}
*/
@Override
public Branch get(Long id) throws NotFoundException {
return branchService.get(id);
}
/**
* Sets branch service. Should be used once, during initialization
*
* @param branchService
*/
public void setBranchService(PluginBranchService branchService) {
this.branchService = branchService;
}
}
| lgpl-2.1 |
asedunov/intellij-community | platform/core-api/src/com/intellij/openapi/vfs/pointers/VirtualFilePointerListener.java | 1053 | /*
* Copyright 2000-2017 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.openapi.vfs.pointers;
import com.intellij.util.messages.Topic;
import org.jetbrains.annotations.NotNull;
public interface VirtualFilePointerListener {
Topic<VirtualFilePointerListener> TOPIC = Topic.create("VirtualFilePointer", VirtualFilePointerListener.class);
default void beforeValidityChanged(@NotNull VirtualFilePointer[] pointers) {
}
default void validityChanged(@NotNull VirtualFilePointer[] pointers) {
}
}
| apache-2.0 |
mhajas/keycloak | server-spi-private/src/main/java/org/keycloak/migration/migrators/MigrateTo1_7_0.java | 2976 | /*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.migration.migrators;
import org.keycloak.migration.MigrationProvider;
import org.keycloak.migration.ModelVersion;
import org.keycloak.models.AuthenticationFlowModel;
import org.keycloak.models.Constants;
import org.keycloak.models.IdentityProviderModel;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.RealmModel;
import org.keycloak.models.utils.DefaultAuthenticationFlows;
import org.keycloak.representations.idm.RealmRepresentation;
import java.util.List;
/**
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
public class MigrateTo1_7_0 implements Migration {
public static final ModelVersion VERSION = new ModelVersion("1.7.0");
public ModelVersion getVersion() {
return VERSION;
}
public void migrate(KeycloakSession session) {
List<RealmModel> realms = session.realms().getRealms();
for (RealmModel realm : realms) {
migrateRealm(session, realm);
}
}
@Override
public void migrateImport(KeycloakSession session, RealmModel realm, RealmRepresentation rep, boolean skipUserDependent) {
migrateRealm(session, realm);
}
protected void migrateRealm(KeycloakSession session, RealmModel realm) {
// Set default accessToken timeout for implicit flow
realm.setAccessTokenLifespanForImplicitFlow(Constants.DEFAULT_ACCESS_TOKEN_LIFESPAN_FOR_IMPLICIT_FLOW_TIMEOUT);
// Add 'admin-cli' builtin client
MigrationProvider migrationProvider = session.getProvider(MigrationProvider.class);
migrationProvider.setupAdminCli(realm);
// add firstBrokerLogin flow and set it to all identityProviders
DefaultAuthenticationFlows.migrateFlows(realm);
AuthenticationFlowModel firstBrokerLoginFlow = realm.getFlowByAlias(DefaultAuthenticationFlows.FIRST_BROKER_LOGIN_FLOW);
List<IdentityProviderModel> identityProviders = realm.getIdentityProviders();
for (IdentityProviderModel identityProvider : identityProviders) {
if (identityProvider.getFirstBrokerLoginFlowId() == null) {
identityProvider.setFirstBrokerLoginFlowId(firstBrokerLoginFlow.getId());
realm.updateIdentityProvider(identityProvider);
}
}
}
}
| apache-2.0 |
openweave/openweave-core | third_party/android/platform-libcore/android-platform-libcore/luni/src/main/java/java/util/spi/LocaleServiceProvider.java | 1447 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package java.util.spi;
import java.util.Locale;
/**
* The base class for all the locale related service provider interfaces (SPIs).
* <p>Note that Android does not support user-supplied locale service providers.
* @since 1.6
* @hide
*/
public abstract class LocaleServiceProvider {
/**
* Default constructor, for use by subclasses.
*/
protected LocaleServiceProvider() {
// do nothing
}
/**
* Returns all locales for which this locale service provider has localized objects or names.
*/
public abstract Locale[] getAvailableLocales();
}
| apache-2.0 |
asedunov/intellij-community | java/java-psi-api/src/com/intellij/psi/PsiAnnotationMemberValue.java | 1212 | /*
* Copyright 2000-2012 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.psi;
import com.intellij.util.ArrayFactory;
import org.jetbrains.annotations.NotNull;
/**
* Represents a PSI element which can be used as the value of an annotation element.
*
* @author ven
*/
public interface PsiAnnotationMemberValue extends PsiElement {
/**
* The empty array of PSI annotation member values which can be reused to avoid unnecessary allocations.
*/
PsiAnnotationMemberValue[] EMPTY_ARRAY = new PsiAnnotationMemberValue[0];
ArrayFactory<PsiAnnotationMemberValue> ARRAY_FACTORY = count -> count == 0 ? EMPTY_ARRAY : new PsiAnnotationMemberValue[count];
}
| apache-2.0 |
mvolaart/openhab2-addons | addons/binding/org.openhab.binding.dlinksmarthome/src/main/java/org/openhab/binding/dlinksmarthome/internal/DLinkSmartHomeHandlerFactory.java | 1505 | /**
* Copyright (c) 2010-2017 by the respective copyright holders.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*/
package org.openhab.binding.dlinksmarthome.internal;
import static org.openhab.binding.dlinksmarthome.DLinkSmartHomeBindingConstants.*;
import org.eclipse.smarthome.core.thing.Thing;
import org.eclipse.smarthome.core.thing.ThingTypeUID;
import org.eclipse.smarthome.core.thing.binding.BaseThingHandlerFactory;
import org.eclipse.smarthome.core.thing.binding.ThingHandler;
import org.openhab.binding.dlinksmarthome.handler.DLinkMotionSensorHandler;
/**
* The {@link DLinkSmartHomeHandlerFactory} is responsible for creating things and thing
* handlers.
*
* @author Mike Major - Initial contribution
*/
public class DLinkSmartHomeHandlerFactory extends BaseThingHandlerFactory {
@Override
public boolean supportsThingType(final ThingTypeUID thingTypeUID) {
return SUPPORTED_THING_TYPES_UIDS.contains(thingTypeUID);
}
@Override
protected ThingHandler createHandler(final Thing thing) {
final ThingTypeUID thingTypeUID = thing.getThingTypeUID();
if (thingTypeUID.equals(THING_TYPE_DCHS150)) {
return new DLinkMotionSensorHandler(thing);
}
return null;
}
}
| epl-1.0 |
bitblender/drill | exec/vector/src/main/java/org/apache/drill/exec/vector/complex/impl/MapOrListWriterImpl.java | 7389 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.drill.exec.vector.complex.impl;
import org.apache.drill.exec.vector.complex.writer.BaseWriter;
import org.apache.drill.exec.vector.complex.writer.BaseWriter.MapOrListWriter;
import org.apache.drill.exec.vector.complex.writer.BigIntWriter;
import org.apache.drill.exec.vector.complex.writer.BitWriter;
import org.apache.drill.exec.vector.complex.writer.DateWriter;
import org.apache.drill.exec.vector.complex.writer.Decimal18Writer;
import org.apache.drill.exec.vector.complex.writer.Decimal28DenseWriter;
import org.apache.drill.exec.vector.complex.writer.Decimal28SparseWriter;
import org.apache.drill.exec.vector.complex.writer.Decimal38DenseWriter;
import org.apache.drill.exec.vector.complex.writer.Decimal38SparseWriter;
import org.apache.drill.exec.vector.complex.writer.Decimal9Writer;
import org.apache.drill.exec.vector.complex.writer.Float4Writer;
import org.apache.drill.exec.vector.complex.writer.Float8Writer;
import org.apache.drill.exec.vector.complex.writer.IntWriter;
import org.apache.drill.exec.vector.complex.writer.IntervalDayWriter;
import org.apache.drill.exec.vector.complex.writer.IntervalWriter;
import org.apache.drill.exec.vector.complex.writer.IntervalYearWriter;
import org.apache.drill.exec.vector.complex.writer.SmallIntWriter;
import org.apache.drill.exec.vector.complex.writer.TimeStampWriter;
import org.apache.drill.exec.vector.complex.writer.TimeWriter;
import org.apache.drill.exec.vector.complex.writer.TinyIntWriter;
import org.apache.drill.exec.vector.complex.writer.UInt1Writer;
import org.apache.drill.exec.vector.complex.writer.UInt2Writer;
import org.apache.drill.exec.vector.complex.writer.UInt4Writer;
import org.apache.drill.exec.vector.complex.writer.UInt8Writer;
import org.apache.drill.exec.vector.complex.writer.Var16CharWriter;
import org.apache.drill.exec.vector.complex.writer.VarBinaryWriter;
import org.apache.drill.exec.vector.complex.writer.VarCharWriter;
public class MapOrListWriterImpl implements MapOrListWriter {
public final BaseWriter.MapWriter map;
public final BaseWriter.ListWriter list;
public MapOrListWriterImpl(final BaseWriter.MapWriter writer) {
this.map = writer;
this.list = null;
}
public MapOrListWriterImpl(final BaseWriter.ListWriter writer) {
this.map = null;
this.list = writer;
}
public void start() {
if (map != null) {
map.start();
} else {
list.startList();
}
}
public void end() {
if (map != null) {
map.end();
} else {
list.endList();
}
}
public MapOrListWriter map(final String name) {
assert map != null;
return new MapOrListWriterImpl(map.map(name));
}
public MapOrListWriter listoftmap(final String name) {
assert list != null;
return new MapOrListWriterImpl(list.map());
}
public MapOrListWriter list(final String name) {
assert map != null;
return new MapOrListWriterImpl(map.list(name));
}
public boolean isMapWriter() {
return map != null;
}
public boolean isListWriter() {
return list != null;
}
public VarCharWriter varChar(final String name) {
return (map != null) ? map.varChar(name) : list.varChar();
}
public IntWriter integer(final String name) {
return (map != null) ? map.integer(name) : list.integer();
}
public BigIntWriter bigInt(final String name) {
return (map != null) ? map.bigInt(name) : list.bigInt();
}
public Float4Writer float4(final String name) {
return (map != null) ? map.float4(name) : list.float4();
}
public Float8Writer float8(final String name) {
return (map != null) ? map.float8(name) : list.float8();
}
public BitWriter bit(final String name) {
return (map != null) ? map.bit(name) : list.bit();
}
/**
* {@inheritDoc}
*/
@Deprecated
public VarBinaryWriter binary(final String name) {
return (map != null) ? map.varBinary(name) : list.varBinary();
}
@Override
public TinyIntWriter tinyInt(String name) {
return (map != null) ? map.tinyInt(name) : list.tinyInt();
}
@Override
public SmallIntWriter smallInt(String name) {
return (map != null) ? map.smallInt(name) : list.smallInt();
}
@Override
public DateWriter date(String name) {
return (map != null) ? map.date(name) : list.date();
}
@Override
public TimeWriter time(String name) {
return (map != null) ? map.time(name) : list.time();
}
@Override
public TimeStampWriter timeStamp(String name) {
return (map != null) ? map.timeStamp(name) : list.timeStamp();
}
@Override
public VarBinaryWriter varBinary(String name) {
return (map != null) ? map.varBinary(name) : list.varBinary();
}
@Override
public Var16CharWriter var16Char(String name) {
return (map != null) ? map.var16Char(name) : list.var16Char();
}
@Override
public UInt1Writer uInt1(String name) {
return (map != null) ? map.uInt1(name) : list.uInt1();
}
@Override
public UInt2Writer uInt2(String name) {
return (map != null) ? map.uInt2(name) : list.uInt2();
}
@Override
public UInt4Writer uInt4(String name) {
return (map != null) ? map.uInt4(name) : list.uInt4();
}
@Override
public UInt8Writer uInt8(String name) {
return (map != null) ? map.uInt8(name) : list.uInt8();
}
@Override
public IntervalYearWriter intervalYear(String name) {
return (map != null) ? map.intervalYear(name) : list.intervalYear();
}
@Override
public IntervalDayWriter intervalDay(String name) {
return (map != null) ? map.intervalDay(name) : list.intervalDay();
}
@Override
public IntervalWriter interval(String name) {
return (map != null) ? map.interval(name) : list.interval();
}
@Override
public Decimal9Writer decimal9(String name) {
return (map != null) ? map.decimal9(name) : list.decimal9();
}
@Override
public Decimal18Writer decimal18(String name) {
return (map != null) ? map.decimal18(name) : list.decimal18();
}
@Override
public Decimal28DenseWriter decimal28Dense(String name) {
return (map != null) ? map.decimal28Dense(name) : list.decimal28Dense();
}
@Override
public Decimal38DenseWriter decimal38Dense(String name) {
return (map != null) ? map.decimal38Dense(name) : list.decimal38Dense();
}
@Override
public Decimal38SparseWriter decimal38Sparse(String name) {
return (map != null) ? map.decimal38Sparse(name) : list.decimal38Sparse();
}
@Override
public Decimal28SparseWriter decimal28Sparse(String name) {
return (map != null) ? map.decimal28Sparse(name) : list.decimal28Sparse();
}
}
| apache-2.0 |
DadanielZ/incubator-eagle | eagle-core/eagle-query/eagle-storage-base/src/main/java/org/apache/eagle/storage/result/Result.java | 1216 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.eagle.storage.result;
/**
* @since 3/18/15
*/
public class Result {
public boolean isSuccess() {
return success;
}
public void setSuccess(boolean success) {
this.success = success;
}
private boolean success;
private int size;
public void setSize(int size) {
this.size = size;
}
public int getSize() {
return size;
}
} | apache-2.0 |
chanakaudaya/carbon-multitenancy | components/tenant-mgt/org.wso2.carbon.tenant.mgt/src/main/java/org/wso2/carbon/tenant/mgt/util/TenantUMDataDeletionUtil.java | 4834 | /*
* Copyright (c) 2005-2010, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.wso2.carbon.tenant.mgt.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
public class TenantUMDataDeletionUtil {
public static final Log log = LogFactory.getLog(TenantUMDataDeletionUtil.class);
/**
* Delete all tenant information related to tenant stored in UM tables
* @param tenantId id of tenant whose data should be deleted
* @param conn database connection object
* @throws SQLException thrown if an error occurs while executing the queries
*/
public static void deleteTenantUMData(int tenantId, Connection conn) throws Exception {
try {
conn.setAutoCommit(false);
String deleteUserPermissionSql = "DELETE FROM UM_USER_PERMISSION WHERE UM_TENANT_ID = ?";
executeDeleteQuery(conn, deleteUserPermissionSql, tenantId);
String deleteRolePermissionSql = "DELETE FROM UM_ROLE_PERMISSION WHERE UM_TENANT_ID = ?";
executeDeleteQuery(conn, deleteRolePermissionSql, tenantId);
String deletePermissionSql = "DELETE FROM UM_PERMISSION WHERE UM_TENANT_ID = ?";
executeDeleteQuery(conn, deletePermissionSql, tenantId);
String deleteClaimBehaviourSql = "DELETE FROM UM_CLAIM_BEHAVIOR WHERE UM_TENANT_ID = ?";
executeDeleteQuery(conn, deleteClaimBehaviourSql, tenantId);
String deleteProfileConfigSql = "DELETE FROM UM_PROFILE_CONFIG WHERE UM_TENANT_ID = ?";
executeDeleteQuery(conn, deleteProfileConfigSql, tenantId);
String deleteClaimSql = "DELETE FROM UM_CLAIM WHERE UM_TENANT_ID = ?";
executeDeleteQuery(conn, deleteClaimSql, tenantId);
String deleteDialectSql = "DELETE FROM UM_DIALECT WHERE UM_TENANT_ID = ?";
executeDeleteQuery(conn, deleteDialectSql, tenantId);
String deleteUserAttributeSql = "DELETE FROM UM_USER_ATTRIBUTE WHERE UM_TENANT_ID = ?";
executeDeleteQuery(conn, deleteUserAttributeSql, tenantId);
String deleteHybridUserRoleSql = "DELETE FROM UM_HYBRID_USER_ROLE WHERE UM_TENANT_ID = ?";
executeDeleteQuery(conn, deleteHybridUserRoleSql, tenantId);
String deleteHybridRoleSql = "DELETE FROM UM_HYBRID_ROLE WHERE UM_TENANT_ID = ?";
executeDeleteQuery(conn, deleteHybridRoleSql, tenantId);
String deleteHybridRememberMeSql = "DELETE FROM UM_HYBRID_REMEMBER_ME WHERE UM_TENANT_ID = ?";
executeDeleteQuery(conn, deleteHybridRememberMeSql, tenantId);
String deleteUserRoleSql = "DELETE FROM UM_USER_ROLE WHERE UM_TENANT_ID = ?";
executeDeleteQuery(conn, deleteUserRoleSql, tenantId);
String deleteRoleSql = "DELETE FROM UM_ROLE WHERE UM_TENANT_ID = ?";
executeDeleteQuery(conn, deleteRoleSql, tenantId);
String deleteUserSql = "DELETE FROM UM_USER WHERE UM_TENANT_ID = ?";
executeDeleteQuery(conn, deleteUserSql, tenantId);
String deleteTenantSql = "DELETE FROM UM_TENANT WHERE UM_ID = ?";
executeDeleteQuery(conn, deleteTenantSql, tenantId);
conn.commit();
} catch (Exception e) {
conn.rollback();
String errorMsg = "An error occurred while deleting registry data for tenant: " + tenantId;
log.error(errorMsg, e);
throw new Exception(errorMsg, e);
} finally {
conn.close();
}
}
private static void executeDeleteQuery(Connection conn, String query, int tenantId)
throws Exception {
PreparedStatement ps = null;
try {
ps = conn.prepareStatement(query);
ps.setInt(1, tenantId);
ps.executeUpdate();
} catch (SQLException e) {
String errMsg = "Error executing query " + query + " for tenant: " + tenantId;
log.error(errMsg, e);
throw new Exception(errMsg, e);
} finally {
if (ps != null) {
ps.close();
}
}
}
} | apache-2.0 |
gspandy/cat | cat-home/src/main/java/com/dianping/cat/report/page/event/DisplayNames.java | 3268 | package com.dianping.cat.report.page.event;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import com.dianping.cat.consumer.event.model.entity.EventName;
import com.dianping.cat.consumer.event.model.entity.EventReport;
import com.dianping.cat.consumer.event.model.entity.EventType;
public class DisplayNames {
private List<EventNameModel> m_results = new ArrayList<EventNameModel>();
public DisplayNames display(String sorted, String type, String ip, EventReport report) {
Map<String, EventType> types = report.findOrCreateMachine(ip).getTypes();
EventName all = new EventName("TOTAL");
all.setTotalPercent(1);
if (types != null) {
EventType names = types.get(type);
if (names != null) {
for (Entry<String, EventName> entry : names.getNames().entrySet()) {
m_results.add(new EventNameModel(entry.getKey(), entry.getValue()));
mergeName(all, entry.getValue());
}
}
}
if (sorted == null) {
sorted = "avg";
}
Collections.sort(m_results, new EventComparator(sorted));
long total = all.getTotalCount();
for (EventNameModel nameModel : m_results) {
EventName eventName = nameModel.getDetail();
eventName.setTotalPercent(eventName.getTotalCount() / (double) total);
}
m_results.add(0, new EventNameModel("TOTAL", all));
return this;
}
public List<EventNameModel> getResults() {
return m_results;
}
public void mergeName(EventName old, EventName other) {
old.setTotalCount(old.getTotalCount() + other.getTotalCount());
old.setFailCount(old.getFailCount() + other.getFailCount());
if (old.getTotalCount() > 0) {
old.setFailPercent(old.getFailCount() * 100.0 / old.getTotalCount());
}
if (old.getSuccessMessageUrl() == null) {
old.setSuccessMessageUrl(other.getSuccessMessageUrl());
}
if (old.getFailMessageUrl() == null) {
old.setFailMessageUrl(other.getFailMessageUrl());
}
}
public static class EventComparator implements Comparator<EventNameModel> {
private String m_sorted;
public EventComparator(String type) {
m_sorted = type;
}
@Override
public int compare(EventNameModel m1, EventNameModel m2) {
if (m_sorted.equals("name") || m_sorted.equals("type")) {
return m1.getType().compareTo(m2.getType());
}
if (m_sorted.equals("total")) {
return (int) (m2.getDetail().getTotalCount() - m1.getDetail().getTotalCount());
}
if (m_sorted.equals("failure")) {
return (int) (m2.getDetail().getFailCount() - m1.getDetail().getFailCount());
}
if (m_sorted.equals("failurePercent")) {
return (int) (m2.getDetail().getFailPercent() * 100 - m1.getDetail().getFailPercent() * 100);
}
return 0;
}
}
public static class EventNameModel {
private EventName m_detail;
private String m_type;
public EventNameModel(String str, EventName detail) {
m_type = str;
m_detail = detail;
}
public EventName getDetail() {
return m_detail;
}
public String getName() {
String id = m_detail.getId();
try {
return URLEncoder.encode(id, "utf-8");
} catch (Exception e) {
return id;
}
}
public String getType() {
return m_type;
}
}
}
| apache-2.0 |
robin13/elasticsearch | x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlStatsAction.java | 1198 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
package org.elasticsearch.xpack.sql.plugin;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.RestActions;
import org.elasticsearch.xpack.sql.proto.Protocol;
import java.util.List;
import static org.elasticsearch.rest.RestRequest.Method.GET;
public class RestSqlStatsAction extends BaseRestHandler {
@Override
public List<Route> routes() {
return List.of(new Route(GET, Protocol.SQL_STATS_REST_ENDPOINT));
}
@Override
public String getName() {
return "sql_stats";
}
@Override
protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) {
SqlStatsRequest request = new SqlStatsRequest();
return channel -> client.execute(SqlStatsAction.INSTANCE, request, new RestActions.NodesResponseRestListener<>(channel));
}
}
| apache-2.0 |
yelhouti/springfox | springfox-core/src/main/java/springfox/documentation/builders/TokenEndpointBuilder.java | 1384 | /*
*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package springfox.documentation.builders;
import springfox.documentation.service.TokenEndpoint;
public class TokenEndpointBuilder {
private String url;
private String tokenName;
/**
* Updates the token endpoint url
*
* @param url - url
* @return
*/
public TokenEndpointBuilder url(String url) {
this.url = BuilderDefaults.defaultIfAbsent(url, this.url);
return this;
}
/**
* Updates the token name
*
* @param tokenName - token name
* @return
*/
public TokenEndpointBuilder tokenName(String tokenName) {
this.tokenName = BuilderDefaults.defaultIfAbsent(tokenName, this.tokenName);
return this;
}
public TokenEndpoint build() {
return new TokenEndpoint(url, tokenName);
}
} | apache-2.0 |
paplorinc/intellij-community | platform/platform-tests/testSrc/com/intellij/history/integration/PurgingTest.java | 4679 | /*
* Copyright 2000-2010 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.history.integration;
import com.intellij.history.LocalHistory;
import com.intellij.history.core.LocalHistoryTestCase;
import com.intellij.history.core.changes.ChangeSet;
import com.intellij.history.core.revisions.Revision;
import com.intellij.openapi.util.Clock;
import com.intellij.openapi.vfs.VirtualFile;
import org.junit.Test;
import java.io.IOException;
import java.util.List;
public class PurgingTest extends IntegrationTestCase {
@Override
public void setUp() throws Exception {
super.setUp();
getVcs().getChangeListInTests().setIntervalBetweenActivities(2);
}
@Override
protected void setUpInWriteAction() throws Exception {
Clock.setTime(0);
super.setUpInWriteAction();
}
@Test
public void testPurgeWithoutGapsBetweenChanges() {
createChangesWithTimestamps(1, 2, 3);
getVcs().getChangeListInTests().purgeObsolete(2);
assertRemainedChangesTimestamps(3, 2);
}
@Test
public void testPurgeSeveral() {
createChangesWithTimestamps(1, 2, 3);
getVcs().getChangeListInTests().purgeObsolete(1);
assertRemainedChangesTimestamps(3);
}
@Test
public void testPurgeNothing() {
createChangesWithTimestamps(1, 2, 3);
getVcs().getChangeListInTests().purgeObsolete(10);
assertRemainedChangesTimestamps(3, 2, 1, 0);
}
@Test
public void testDoesNotPurgeTheOnlyChange() {
createChangesWithTimestamps(1);
getVcs().getChangeListInTests().purgeObsolete(1);
assertRemainedChangesTimestamps(1);
}
@Test
public void testPurgeWithOneGap() {
createChangesWithTimestamps(1, 2, 4);
getVcs().getChangeListInTests().purgeObsolete(2);
assertRemainedChangesTimestamps(4, 2);
}
@Test
public void testPurgeWithSeveralGaps() {
createChangesWithTimestamps(1, 2, 4, 5, 7, 8);
getVcs().getChangeListInTests().purgeObsolete(5);
assertRemainedChangesTimestamps(8, 7, 5, 4, 2);
}
@Test
public void testPurgeWithLongGaps() {
createChangesWithTimestamps(10, 20, 30, 40);
getVcs().getChangeListInTests().purgeObsolete(3);
assertRemainedChangesTimestamps(40, 30, 20);
}
@Test
public void testPurgeWithBifIntervalBetweenChanges() {
getVcs().getChangeListInTests().setIntervalBetweenActivities(100);
createChangesWithTimestamps(110, 120, 130, 250, 260, 270);
getVcs().getChangeListInTests().purgeObsolete(40);
assertRemainedChangesTimestamps(270, 260, 250, 130, 120);
}
@Test
public void testPurgingEmptyListDoesNotThrowException() {
getVcs().getChangeListInTests().purgeObsolete(50);
}
@Test
public void testChangesAfterPurge() throws IOException {
Clock.setTime(1);
VirtualFile f = createFile("file.txt");
Clock.setTime(2);
setContent(f, "1");
Clock.setTime(3);
setContent(f, "2");
assertEquals(3, LocalHistoryTestCase.collectChanges(getVcs(), f.getPath(), myProject.getLocationHash(), null).size());
getVcs().getChangeListInTests().purgeObsolete(2);
assertEquals(2, LocalHistoryTestCase.collectChanges(getVcs(), f.getPath(), myProject.getLocationHash(), null).size());
}
@Test
public void testLabelsAfterPurge() throws IOException {
Clock.setTime(1);
VirtualFile file = createFile("file");
Clock.setTime(2);
LocalHistory.getInstance().putUserLabel(myProject, "1");
getVcs().getChangeListInTests().purgeObsolete(1);
List<Revision> rr = getRevisionsFor(file);
assertEquals(2, rr.size());
assertEquals("1", rr.get(1).getLabel());
}
private void createChangesWithTimestamps(long... tt) {
for (long t : tt) {
Clock.setTime(t);
getVcs().beginChangeSet();
getVcs().putUserLabel("foo", "project");
getVcs().endChangeSet(null);
}
}
private void assertRemainedChangesTimestamps(long... tt) {
assertEquals(tt.length, getVcs().getChangeListInTests().getChangesInTests().size());
for (int i = 0; i < tt.length; i++) {
long t = tt[i];
ChangeSet c = getVcs().getChangeListInTests().getChangesInTests().get(i);
assertEquals(t, c.getTimestamp());
}
}
}
| apache-2.0 |
taciano-perez/JamVM-PH | src/classpath/gnu/java/awt/font/autofit/Latin.java | 44600 | /* Latin.java -- Latin specific glyph handling
Copyright (C) 2006 Free Software Foundation, Inc.
This file is part of GNU Classpath.
GNU Classpath is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GNU Classpath is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU Classpath; see the file COPYING. If not, write to the
Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA.
Linking this library statically or dynamically with other modules is
making a combined work based on this library. Thus, the terms and
conditions of the GNU General Public License cover the whole
combination.
As a special exception, the copyright holders of this library give you
permission to link this library with independent modules to produce an
executable, regardless of the license terms of these independent
modules, and to copy and distribute the resulting executable under
terms of your choice, provided that you also meet, for each linked
independent module, the terms and conditions of the license of that
module. An independent module is a module which is not derived from
or based on this library. If you modify this library, you may extend
this exception to your version of the library, but you are not
obligated to do so. If you do not wish to do so, delete this
exception statement from your version. */
package gnu.java.awt.font.autofit;
import java.awt.geom.AffineTransform;
import java.util.HashSet;
import gnu.java.awt.font.opentype.OpenTypeFont;
import gnu.java.awt.font.opentype.truetype.Fixed;
import gnu.java.awt.font.opentype.truetype.Point;
import gnu.java.awt.font.opentype.truetype.Zone;
/**
* Implements Latin specific glyph handling.
*/
class Latin
implements Script, Constants
{
static final int MAX_WIDTHS = 16;
private final static int MAX_TEST_CHARS = 12;
/**
* The types of the 6 blue zones.
*/
private static final int CAPITAL_TOP = 0;
private static final int CAPITAL_BOTTOM = 1;
private static final int SMALL_F_TOP = 2;
private static final int SMALL_TOP = 3;
private static final int SMALL_BOTTOM = 4;
private static final int SMALL_MINOR = 5;
static final int BLUE_MAX = 6;
/**
* The test chars for the blue zones.
*
* @see #initBlues(LatinMetrics, OpenTypeFont)
*/
private static final String[] TEST_CHARS =
new String[]{"THEZOCQS", "HEZLOCUS", "fijkdbh",
"xzroesc", "xzroesc", "pqgjy"};
public void applyHints(GlyphHints hints, Zone outline, ScriptMetrics metrics)
{
hints.reload(outline);
hints.rescale(metrics);
if (hints.doHorizontal())
{
detectFeatures(hints, DIMENSION_HORZ);
}
if (hints.doVertical())
{
detectFeatures(hints, DIMENSION_VERT);
computeBlueEdges(hints, (LatinMetrics) metrics);
}
// Grid-fit the outline.
for (int dim = 0; dim < DIMENSION_MAX; dim++)
{
if (dim == DIMENSION_HORZ && hints.doHorizontal()
|| dim == DIMENSION_VERT && hints.doVertical())
{
hintEdges(hints, dim);
if (hints.doAlignEdgePoints())
hints.alignEdgePoints(dim);
if (hints.doAlignStrongPoints())
hints.alignStrongPoints(dim);
if (hints.doAlignWeakPoints())
hints.alignWeakPoints(dim);
}
}
// FreeType does a save call here. I guess that's not needed as we operate
// on the live glyph data anyway.
}
private void hintEdges(GlyphHints hints, int dim)
{
AxisHints axis = hints.axis[dim];
Edge[] edges = axis.edges;
int numEdges = axis.numEdges;
Edge anchor = null;
int hasSerifs = 0;
// We begin by aligning all stems relative to the blue zone if
// needed -- that's only for horizontal edges.
if (dim == DIMENSION_VERT)
{
for (int e = 0; e < numEdges; e++)
{
Edge edge = edges[e];
if ((edge.flags & Segment.FLAG_EDGE_DONE) != 0)
continue;
Width blue = edge.blueEdge;
Edge edge1 = null;
Edge edge2 = edge.link;
if (blue != null)
{
edge1 = edge;
}
else if (edge2 != null && edge2.blueEdge != null)
{
blue = edge2.blueEdge;
edge1 = edge2;
edge2 = edge;
}
if (edge1 == null)
continue;
edge1.pos = blue.fit;
edge1.flags |= Segment.FLAG_EDGE_DONE;
if (edge2 != null && edge2.blueEdge == null)
{
alignLinkedEdge(hints, dim, edge1, edge2);
edge2.flags |= Segment.FLAG_EDGE_DONE;
}
if (anchor == null)
anchor = edge;
}
}
// Now we will align all stem edges, trying to maintain the
// relative order of stems in the glyph.
for (int e = 0; e < numEdges; e++)
{
Edge edge = edges[e];
if ((edge.flags & Segment.FLAG_EDGE_DONE) != 0)
continue;
Edge edge2 = edge.link;
if (edge2 == null)
{
hasSerifs++;
continue;
}
// Now align the stem.
// This should not happen, but it's better to be safe.
if (edge2.blueEdge != null || axis.getEdgeIndex(edge2) < e)
{
alignLinkedEdge(hints, dim, edge2, edge);
edge.flags |= Segment.FLAG_EDGE_DONE;
continue;
}
if (anchor == null)
{
int orgLen = edge2.opos - edge.opos;
int curLen = computeStemWidth(hints, dim, orgLen, edge.flags,
edge2.flags);
int uOff, dOff, orgCenter, curPos1, error1, error2;
if (curLen <= 64) // < 1 Pixel.
{
uOff = 32;
dOff = 32;
}
else
{
uOff = 38;
dOff = 26;
}
if (curLen < 96)
{
orgCenter = edge.opos + (orgLen >> 1);
curPos1 = Utils.pixRound(orgCenter);
error1 = orgCenter - (curPos1 - uOff);
if (error1 < 0)
error1 = -error1;
error2 = orgCenter - (curPos1 + dOff);
if (error2 < 0)
error2 = -error2;
if (error1 < error2)
{
curPos1 -= uOff;
}
else
{
curPos1 += dOff;
}
edge.pos = curPos1 - curLen / 2;
edge2.pos = curPos1 + curLen / 2;
}
else
{
edge.pos = Utils.pixRound(edge.opos);
}
anchor = edge;
edge.flags |= Segment.FLAG_EDGE_DONE;
alignLinkedEdge(hints, dim, edge, edge2);
}
else
{
int aDiff = edge.opos - anchor.opos;
int orgPos = anchor.pos + aDiff;
int orgLen = edge2.opos - edge.opos;
int orgCenter = orgPos + (orgLen >> 1);
int curLen = computeStemWidth(hints, dim, orgLen, edge.flags,
edge2.flags);
//System.err.println("stem width: " + curLen);
if (curLen < 96)
{
int uOff, dOff;
int curPos1 = Utils.pixRound(orgCenter);
if (curLen <= 64)
{
uOff = 32;
dOff = 32;
}
else
{
uOff = 38;
dOff = 26;
}
int delta1 = orgCenter - (curPos1 - uOff);
if (delta1 < 0)
delta1 = -delta1;
int delta2 = orgCenter - (curPos1 + dOff);
if (delta2 < 0)
delta2 = -delta2;
if (delta1 < delta2)
{
curPos1 -= uOff;
}
else
{
curPos1 += dOff;
}
edge.pos = curPos1 - curLen / 2;
edge2.pos = curPos1 + curLen / 2;
}
else
{
orgPos = anchor.pos + (edge.opos - anchor.opos);
orgLen = edge2.opos - edge.opos;
orgCenter = orgPos + (orgLen >> 1);
curLen = computeStemWidth(hints, dim, orgLen, edge.flags,
edge2.flags);
int curPos1 = Utils.pixRound(orgPos);
int delta1 = curPos1 + (curLen >> 1) - orgCenter;
if (delta1 < 0)
delta1 = -delta1;
int curPos2 = Utils.pixRound(orgPos + orgLen) - curLen;
int delta2 = curPos2 + (curLen >> 1) - orgCenter;
if (delta2 < 0)
delta2 = -delta2;
edge.pos = (delta1 < delta2) ? curPos1 : curPos2;
edge2.pos = edge.pos + curLen;
}
edge.flags |= Segment.FLAG_EDGE_DONE;
edge2.flags |= Segment.FLAG_EDGE_DONE;
if (e > 0 && edge.pos < edges[e - 1].pos)
{
edge.pos = edges[e - 1].pos;
}
}
}
// TODO: Implement the lowercase m symmetry thing.
// Now we hint the remaining edges (serifs and singles) in order
// to complete our processing.
if (hasSerifs > 0 || anchor == null)
{
for (int e = 0; e < numEdges; e++)
{
Edge edge = edges[e];
if ((edge.flags & Segment.FLAG_EDGE_DONE) != 0)
continue;
if (edge.serif != null)
{
alignSerifEdge(hints, edge.serif, edge);
}
else if (anchor == null)
{
edge.pos = Utils.pixRound(edge.opos);
anchor = edge;
}
else
{
edge.pos = anchor.pos
+ Utils.pixRound(edge.opos - anchor.opos);
}
edge.flags |= Segment.FLAG_EDGE_DONE;
if (e > 0 && edge.pos < edges[e - 1].pos)
{
edge.pos = edges[e - 1].pos;
}
if (e + 1 < numEdges
&& (edges[e + 1].flags & Segment.FLAG_EDGE_DONE) != 0
&& edge.pos > edges[e + 1].pos)
{
edge.pos = edges[e + 1].pos;
}
}
}
// Debug: print all hinted edges.
// System.err.println("hinted edges: " );
// for (int i = 0; i < numEdges; i++)
// {
// System.err.println("edge#" + i + ": " + edges[i]);
// }
}
private void alignSerifEdge(GlyphHints hints, Edge base, Edge serif)
{
serif.pos = base.pos + (serif.opos - base.opos);
}
private int computeStemWidth(GlyphHints hints, int dim, int width,
int baseFlags, int stemFlags)
{
LatinMetrics metrics = (LatinMetrics) hints.metrics;
LatinAxis axis = metrics.axis[dim];
int dist = width;
int sign = 0;
boolean vertical = dim == DIMENSION_VERT;
if (! doStemAdjust(hints))
return width;
if (dist < 0)
{
dist = -width;
sign = 1;
}
if ((vertical && ! doVertSnap(hints)) || ! vertical && ! doHorzSnap(hints))
{
// Smooth hinting process. Very lightly quantize the stem width.
// Leave the widths of serifs alone.
if ((stemFlags & Segment.FLAG_EDGE_SERIF) != 0 && vertical
&& dist < 3 * 64)
{
return doneWidth(dist, sign);
}
else if ((baseFlags & Segment.FLAG_EDGE_ROUND) != 0)
{
if (dist < 80)
dist = 64;
}
else if (dist < 56)
{
dist = 56;
}
if (axis.widthCount > 0)
{
int delta;
if (axis.widthCount > 0)
{
delta = dist - axis.widths[0].cur;
if (delta < 0)
{
delta = -delta;
}
if (delta < 40)
{
dist = axis.widths[0].cur;
if (dist < 48)
dist = 48;
return doneWidth(dist, sign);
}
}
if (dist < 3 * 64) // < 3 pixels.
{
delta = dist & 63;
dist &= -64;
if (delta < 10)
dist += delta;
else if (delta < 32)
dist += 10;
else if (delta < 54)
dist += 54;
else
dist += delta;
}
else
{
dist = (dist + 32) & ~63;
}
}
}
else
{
// Strong hinting process: Snap the stem width to integer pixels.
dist = snapWidth(axis.widths, axis.widthCount, dist);
if (vertical)
{
// In the case of vertical hinting, always round
// the stem heights to integer pixels.
if (dist >= 64)
dist = (dist + 16) & ~63;
else
dist = 64;
}
else
{
if (doMono(hints))
{
// Monochrome horizontal hinting: Snap widths to integer pixels
// with a different threshold.
if (dist < 64)
dist = 64;
else
dist = (dist + 32) & ~63;
}
else
{
// For anti-aliased hinting, we adopt a more subtle
// approach: We strengthen small stems, round those stems
// whose size is between 1 and 2 pixels to an integer,
// otherwise nothing.
if (dist < 48)
dist = (dist + 64) >> 1;
else if (dist < 128)
dist = (dist + 22) & ~63;
else
// Round otherwise to prevent color fringes in LCD mode.
dist = (dist + 32) & ~63;
}
}
}
return doneWidth(dist, sign);
}
private boolean doMono(GlyphHints hints)
{
return true;
}
private int snapWidth(Width[] widths, int count, int width)
{
int best = 64 + 32 + 2;
int reference = width;
for (int n = 0; n < count; n++)
{
int w = widths[n].cur;
int dist = width - w;
if (dist < 0)
dist = -dist;
if (dist < best)
{
best = dist;
reference = w;
}
}
int scaled = Utils.pixRound(reference);
if (width >= reference)
{
if (width < scaled + 48)
width = reference;
}
else
{
if (width > scaled + 48)
width = reference;
}
return width;
}
private int doneWidth(int w, int s)
{
if (s == 1)
w = -w;
return w;
}
private boolean doVertSnap(GlyphHints hints)
{
// TODO Auto-generated method stub
return true;
}
private boolean doHorzSnap(GlyphHints hints)
{
// TODO Auto-generated method stub
return true;
}
private boolean doStemAdjust(GlyphHints hints)
{
// TODO Auto-generated method stub
return true;
}
private void alignLinkedEdge(GlyphHints hints, int dim, Edge base, Edge stem)
{
int dist = stem.opos - base.opos;
int fitted = computeStemWidth(hints, dim, dist, base.flags, stem.flags);
stem.pos = base.pos + fitted;
}
public void doneMetrics(ScriptMetrics metrics)
{
// TODO Auto-generated method stub
}
/**
* Initializes the <code>hints</code> object.
*
* @param hints the hints to initialize
* @param metrics the metrics to use
*/
public void initHints(GlyphHints hints, ScriptMetrics metrics)
{
hints.rescale(metrics);
LatinMetrics lm = (LatinMetrics) metrics;
hints.xScale = lm.axis[DIMENSION_HORZ].scale;
hints.xDelta = lm.axis[DIMENSION_HORZ].delta;
hints.yScale = lm.axis[DIMENSION_VERT].scale;
hints.yDelta = lm.axis[DIMENSION_VERT].delta;
// TODO: Set the scaler and other flags.
}
/**
* Initializes the script metrics.
*
* @param metrics the script metrics to initialize
* @param face the font
*/
public void initMetrics(ScriptMetrics metrics, OpenTypeFont face)
{
assert metrics instanceof LatinMetrics;
LatinMetrics lm = (LatinMetrics) metrics;
lm.unitsPerEm = face.unitsPerEm;
// TODO: Check for latin charmap.
initWidths(lm, face, 'o');
initBlues(lm, face);
}
public void scaleMetrics(ScriptMetrics metrics, HintScaler scaler)
{
LatinMetrics lm = (LatinMetrics) metrics;
lm.scaler.renderMode = scaler.renderMode;
lm.scaler.face = scaler.face;
scaleMetricsDim(lm, scaler, DIMENSION_HORZ);
scaleMetricsDim(lm, scaler, DIMENSION_VERT);
}
private void scaleMetricsDim(LatinMetrics lm, HintScaler scaler, int dim)
{
int scale;
int delta;
if (dim == DIMENSION_HORZ)
{
scale = scaler.xScale;
delta = scaler.xDelta;
}
else
{
scale = scaler.yScale;
delta = scaler.yDelta;
}
LatinAxis axis = lm.axis[dim];
if (axis.orgScale == scale && axis.orgDelta == delta)
// No change, no need to adjust.
return;
axis.orgScale = scale;
axis.orgDelta = delta;
// Correct X and Y scale to optimize the alignment of the top small
// letters to the pixel grid.
LatinAxis axis2 = lm.axis[DIMENSION_VERT];
LatinBlue blue = null;
// for (int nn = 0; nn < axis2.blueCount; nn++)
// {
// if ((axis2.blues[nn].flags & LatinBlue.FLAG_ADJUSTMENT) != 0)
// {
// blue = axis2.blues[nn];
// break;
// }
// }
// if (blue != null)
// {
// int scaled = Fixed.mul16(blue.shoot.org, scaler.yScale);
// int fitted = Utils.pixRound(scaled);
// if (scaled != fitted)
// {
// if (dim == DIMENSION_HORZ)
// {
// if (fitted < scaled)
// {
// scale -= scale / 50;
// }
// }
// else
// {
// scale = Utils.mulDiv(scale, fitted, scaled);
// }
// }
// }
axis.scale = scale;
axis.delta = delta;
if (dim == DIMENSION_HORZ)
{
lm.scaler.xScale = scale;
lm.scaler.xDelta = delta;
}
else
{
lm.scaler.yScale = scale;
lm.scaler.yDelta = delta;
}
// Scale the standard widths.
for (int nn = 0; nn < axis.widthCount; nn++)
{
Width w = axis.widths[nn];
w.cur = Fixed.mul16(w.org, scale);
w.fit = w.cur;
}
// Scale blue zones.
if (dim == DIMENSION_VERT)
{
for (int nn = 0; nn < axis.blueCount; nn++)
{
blue = axis.blues[nn];
blue.ref.cur = Fixed.mul16(blue.ref.org, scale) + delta;
blue.ref.fit = blue.ref.cur;
blue.shoot.cur = Fixed.mul16(blue.ref.org, scale) + delta;
blue.flags &= ~LatinBlue.FLAG_BLUE_ACTIVE;
// A blue zone is only active if it is less than 3/4 pixels tall.
int dist = Fixed.mul16(blue.ref.org - blue.shoot.org, scale);
if (dist <= 48 && dist >= -48)
{
int delta1 = blue.shoot.org - blue.ref.org;
int delta2 = delta1;
if (delta1 < 0)
delta2 = -delta2;
delta2 = Fixed.mul16(delta2, scale);
if (delta2 < 32)
delta2 = 0;
else if (delta2 < 64)
delta2 = 32 + (((delta2 - 32) + 16) & ~31);
else
delta2 = Utils.pixRound(delta2);
if (delta1 < 0)
delta2 = -delta2;
blue.ref.fit = Utils.pixRound(blue.ref.cur);
blue.shoot.fit = blue.ref.fit + delta2;
blue.flags |= LatinBlue.FLAG_BLUE_ACTIVE;
}
}
}
}
/**
* Determines the standard stem widths.
*
* @param metrics the metrics to use
* @param face the font face
* @param ch the character that is used for getting the widths
*/
private void initWidths(LatinMetrics metrics, OpenTypeFont face, char ch)
{
GlyphHints hints = new GlyphHints();
metrics.axis[DIMENSION_HORZ].widthCount = 0;
metrics.axis[DIMENSION_VERT].widthCount = 0;
int glyphIndex = face.getGlyph(ch);
Zone outline = face.getRawGlyphOutline(glyphIndex, IDENTITY);
LatinMetrics dummy = new LatinMetrics();
HintScaler scaler = dummy.scaler;
dummy.unitsPerEm = metrics.unitsPerEm;
scaler.xScale = scaler.yScale = 10000;
scaler.xDelta = scaler.yDelta = 0;
scaler.face = face;
hints.rescale(dummy);
hints.reload(outline);
for (int dim = 0; dim < DIMENSION_MAX; dim++)
{
LatinAxis axis = metrics.axis[dim];
AxisHints axHints = hints.axis[dim];
int numWidths = 0;
computeSegments(hints, dim);
linkSegments(hints, dim);
Segment[] segs = axHints.segments;
HashSet<Segment> touched = new HashSet<Segment>();
for (int i = 0; i < segs.length; i++)
{
Segment seg = segs[i];
Segment link = seg.link;
if (link != null && link.link == seg && ! touched.contains(link))
{
int dist = Math.abs(seg.pos - link.pos);
if (numWidths < MAX_WIDTHS)
axis.widths[numWidths++] = new Width(dist);
}
touched.add(seg);
}
Utils.sort(numWidths, axis.widths);
axis.widthCount = numWidths;
}
for (int dim = 0; dim < DIMENSION_MAX; dim++)
{
LatinAxis axis = metrics.axis[dim];
int stdw = axis.widthCount > 0 ? axis.widths[0].org
: constant(metrics, 50);
axis.edgeDistanceTreshold= stdw / 5;
}
}
void linkSegments(GlyphHints hints, int dim)
{
AxisHints axis = hints.axis[dim];
Segment[] segments = axis.segments;
int numSegs = axis.numSegments;
int majorDir = axis.majorDir;
int lenThreshold = constant((LatinMetrics) hints.metrics, 8);
lenThreshold = Math.min(1, lenThreshold);
int lenScore = constant((LatinMetrics) hints.metrics, 3000);
for (int i1 = 0; i1 < numSegs; i1++)
{
Segment seg1 = segments[i1];
// The fake segments are introduced to hint the metrics.
// Never link them to anything.
if (seg1.first == seg1.last || seg1.dir != majorDir)
continue;
for (int i2 = 0; i2 < numSegs; i2++)
{
Segment seg2 = segments[i2];
if (seg2 != seg1 && seg1.dir + seg2.dir == 0)
{
int pos1 = seg1.pos;
int pos2 = seg2.pos;
// The vertical coords are swapped compared to how FT handles
// this.
int dist = dim == DIMENSION_VERT ? pos1 - pos2 : pos2 - pos1;
if (dist >= 0)
{
int min = seg1.minPos;
int max = seg1.maxPos;
int len, score;
if (min < seg2.minPos)
min = seg2.minPos;
if (max > seg2.maxPos)
max = seg2.maxPos;
len = max - min;
if (len > lenThreshold)
{
score = dist + lenScore / len;
if (score < seg1.score)
{
seg1.score = score;
seg1.link = seg2;
}
if (score < seg2.score)
{
seg2.score = score;
seg2.link = seg1;
}
}
}
}
}
}
for (int i1 = 0; i1 < numSegs; i1++)
{
Segment seg1 = segments[i1];
Segment seg2 = seg1.link;
if (seg2 != null)
{
seg2.numLinked++;
if (seg2.link != seg1)
{
seg1.link = null;
seg1.serif = seg2.link;
}
}
// Uncomment to show all segments.
// System.err.println("segment#" + i1 + ": " + seg1);
}
}
/**
* Initializes the blue zones of the font.
*
* @param metrics the metrics to use
* @param face the font face to analyze
*/
private void initBlues(LatinMetrics metrics, OpenTypeFont face)
{
int[] flats = new int[MAX_TEST_CHARS];
int[] rounds = new int[MAX_TEST_CHARS];
int numFlats;
int numRounds;
LatinBlue blue;
LatinAxis axis = metrics.axis[DIMENSION_VERT];
// We compute the blues simply by loading each character in the test
// strings, then compute its topmost or bottommost points.
for (int bb = 0; bb < BLUE_MAX; bb++)
{
String p = TEST_CHARS[bb];
int blueRef;
int blueShoot;
numFlats = 0;
numRounds = 0;
for (int i = 0; i < p.length(); i++)
{
// Load the character.
int glyphIndex = face.getGlyph(p.charAt(i));
Zone glyph =
face.getRawGlyphOutline(glyphIndex, IDENTITY);
// Now compute the min and max points.
int numPoints = glyph.getSize() - 4; // 4 phantom points.
Point[] points = glyph.getPoints();
Point point = points[0];
int extremum = 0;
int index = 1;
if (isTopBlue(bb))
{
for (; index < numPoints; index++)
{
point = points[index];
// We have the vertical direction swapped. The higher
// points have smaller (negative) Y.
if (point.getOrigY() < points[extremum].getOrigY())
extremum = index;
}
}
else
{
for (; index < numPoints; index++)
{
point = points[index];
// We have the vertical direction swapped. The higher
// points have smaller (negative) Y.
if (point.getOrigY() > points[extremum].getOrigY())
extremum = index;
}
}
// Debug, prints out the maxima.
// System.err.println("extremum for " + bb + " / "+ p.charAt(i)
// + ": " + points[extremum]);
// Now determine if the point is part of a straight or round
// segment.
boolean round;
int idx = extremum;
int first, last, prev, next, end;
int dist;
last = -1;
first = 0;
for (int n = 0; n < glyph.getNumContours(); n++)
{
end = glyph.getContourEnd(n);
// System.err.println("contour end for " + n + ": " + end);
if (end >= idx)
{
last = end;
break;
}
first = end + 1;
}
// Should never happen.
assert last >= 0;
// Now look for the previous and next points that are not on the
// same Y coordinate. Threshold the 'closeness'.
prev = idx;
next = prev;
do
{
if (prev > first)
prev--;
else
prev = last;
dist = points[prev].getOrigY() - points[extremum].getOrigY();
if (dist < -5 || dist > 5)
break;
} while (prev != idx);
do
{
if (next < last)
next++;
else
next = first;
dist = points[next].getOrigY() - points[extremum].getOrigY();
if (dist < -5 || dist > 5)
break;
} while (next != idx);
round = points[prev].isControlPoint()
|| points[next].isControlPoint();
if (round)
{
rounds[numRounds++] = points[extremum].getOrigY();
// System.err.println("new round extremum: " + bb + ": "
// + points[extremum].getOrigY());
}
else
{
flats[numFlats++] = points[extremum].getOrigY();
// System.err.println("new flat extremum: " + bb + ": "
// + points[extremum].getOrigY());
}
}
// We have computed the contents of the rounds and flats tables.
// Now determine the reference and overshoot position of the blues --
// we simply take the median after a simple sort.
Utils.sort(numRounds, rounds);
Utils.sort(numFlats, flats);
blue = axis.blues[axis.blueCount] = new LatinBlue();
axis.blueCount++;
if (numFlats == 0)
{
blue.ref = blue.shoot = new Width(rounds[numRounds / 2]);
}
else if (numRounds == 0)
{
blue.ref = blue.shoot = new Width(flats[numFlats / 2]);
}
else
{
blue.ref = new Width(flats[numFlats / 2]);
blue.shoot = new Width(rounds[numRounds / 2]);
}
// There are sometimes problems: if the overshoot position of top
// zones is under its reference position, or the opposite for bottom
// zones. We must check everything there and correct problems.
if (blue.shoot != blue.ref)
{
int ref = blue.ref.org;
int shoot = blue.shoot.org;
// Inversed vertical coordinates!
boolean overRef = shoot < ref;
if (isTopBlue(bb) ^ overRef)
{
blue.shoot = blue.ref = new Width((shoot + ref) / 2);
}
}
blue.flags = 0;
if (isTopBlue(bb))
blue.flags |= LatinBlue.FLAG_TOP;
// The following flag is used later to adjust y and x scales in
// order to optimize the pixel grid alignment of the top small
// letters.
if (bb == SMALL_TOP)
{
blue.flags |= LatinBlue.FLAG_ADJUSTMENT;
}
// Debug: print out the blue zones.
// System.err.println("blue zone #" + bb + ": " + blue);
}
}
private static final AffineTransform IDENTITY = new AffineTransform();
private int constant(LatinMetrics metrics, int c)
{
return c * (metrics.unitsPerEm / 2048);
}
private void computeSegments(GlyphHints hints, int dim)
{
Point[] points = hints.points;
if (dim == DIMENSION_HORZ)
{
for (int i = 0; i < hints.numPoints; i++)
{
points[i].setU(points[i].getOrigX());
points[i].setV(points[i].getOrigY());
}
}
else
{
for (int i = 0; i < hints.numPoints; i++)
{
points[i].setU(points[i].getOrigY());
points[i].setV(points[i].getOrigX());
}
}
// Now look at each contour.
AxisHints axis = hints.axis[dim];
int majorDir = Math.abs(axis.majorDir);
int segmentDir = majorDir;
Point[] contours = hints.contours;
int numContours = hints.numContours;
Segment segment = null;
for (int i = 0; i < numContours; i++)
{
int minPos = 32000;
int maxPos = -32000;
Point point = contours[i];
Point last = point.getPrev();
if (point == last) // Skip singletons.
continue;
if (Math.abs(last.getOutDir()) == majorDir
&& Math.abs(point.getOutDir()) == majorDir)
{
// We are already on an edge. Locate its start.
last = point;
while (true)
{
point = point.getPrev();
if (Math.abs(point.getOutDir()) != majorDir)
{
point = point.getNext();
break;
}
if (point == last)
break;
}
}
last = point;
boolean passed = false;
boolean onEdge = false;
while (true)
{
int u, v;
if (onEdge)
{
u = point.getU();
if (u < minPos)
minPos = u;
if (u > maxPos)
maxPos = u;
if (point.getOutDir() != segmentDir || point == last)
{
// Leaving an edge. Record new segment.
segment.last = point;
// (minPos + maxPos) / 2.
segment.pos = (minPos + maxPos) >> 1;
if (segment.first.isControlPoint()
|| point.isControlPoint())
segment.flags |= Segment.FLAG_EDGE_ROUND;
minPos = maxPos = point.getV();
v = segment.first.getV();
if (v < minPos)
minPos = v;
if (v > maxPos)
maxPos = v;
segment.minPos = minPos;
segment.maxPos = maxPos;
onEdge = false;
segment = null;
}
}
if (point == last)
{
if (passed)
break;
passed = true;
}
if (! onEdge && Math.abs(point.getOutDir()) == majorDir)
{
// This is the start of a new segment.
segmentDir = point.getOutDir();
segment = axis.newSegment();
segment.dir = segmentDir;
segment.flags = Segment.FLAG_EDGE_NORMAL;
minPos = maxPos = point.getU();
segment.first = point;
segment.last = point;
segment.contour = contours[i];
segment.score = 32000;
segment.len = 0;
segment.link = null;
onEdge = true;
}
point = point.getNext();
}
}
}
private boolean isTopBlue(int b)
{
return b == CAPITAL_TOP || b == SMALL_F_TOP || b == SMALL_TOP;
}
private void detectFeatures(GlyphHints hints, int dim)
{
computeSegments(hints, dim);
linkSegments(hints, dim);
computeEdges(hints, dim);
}
private void computeEdges(GlyphHints hints, int dim)
{
AxisHints axis = hints.axis[dim];
LatinAxis laxis = ((LatinMetrics) hints.metrics).axis[dim];
Segment[] segments = axis.segments;
int numSegments = axis.numSegments;
Segment seg;
int upDir;
int scale;
int edgeDistanceThreshold;
axis.numEdges = 0;
scale = dim == DIMENSION_HORZ ? hints.xScale : hints.yScale;
upDir = dim == DIMENSION_HORZ ? DIR_UP : DIR_RIGHT;
// We will begin by generating a sorted table of edges for the
// current direction. To do so, we simply scan each segment and try
// to find an edge in our table that corresponds to its position.
//
// If no edge is found, we create one and insert a new edge in the
// sorted table. Otherwise, we simply add the segment to the egde's
// list which will be processed in the second step to compute the
// edge's properties.
//
// Note that the edge table is sorted along the segment/edge
// position.
edgeDistanceThreshold = Fixed.mul16(laxis.edgeDistanceTreshold, scale);
if (edgeDistanceThreshold > 64 / 4)
edgeDistanceThreshold = 64 / 4;
edgeDistanceThreshold = Fixed.div16(edgeDistanceThreshold, scale);
for (int i = 0; i < numSegments; i++)
{
seg = segments[i];
Edge found = null;
for (int ee = 0; ee < axis.numEdges; ee++)
{
Edge edge = axis.edges[ee];
int dist = seg.pos - edge.fpos;
if (dist < 0)
dist = -dist;
if (dist < edgeDistanceThreshold)
{
found = edge;
break;
}
}
if (found == null)
{
// Insert new edge in the list and sort according to
// the position.
Edge edge = axis.newEdge(seg.pos);
edge.first = seg;
edge.last = seg;
edge.fpos = seg.pos;
edge.opos = edge.pos = Fixed.mul16(seg.pos, scale);
seg.edgeNext = seg;
seg.edge = edge;
}
else
{
seg.edgeNext = found.first;
found.last.edgeNext = seg;
found.last = seg;
seg.edge = found;
}
}
// Good. We will now compute each edge's properties according to
// segments found on its position. Basically these are:
// - Edge's main direction.
// - Stem edge, serif edge, or both (which defaults to stem edge).
// - Rounded edge, straight or both (which defaults to straight).
// - Link for edge.
// Now, compute each edge properties.
for (int e = 0; e < axis.numEdges; e++)
{
Edge edge = axis.edges[e];
// Does it contain round segments?
int isRound = 0;
// Does it contain straight segments?
int isStraight = 0;
// Number of upward segments.
int ups = 0;
// Number of downward segments.
int downs = 0;
seg = edge.first;
do
{
// Check for roundness of segment.
if ((seg.flags & Segment.FLAG_EDGE_ROUND) != 0)
isRound++;
else
isStraight++;
// Check for segment direction.
if (seg.dir == upDir)
ups += seg.maxPos - seg.minPos;
else
downs += seg.maxPos - seg.minPos;
// Check for links. If seg.serif is set, then seg.link must
// be ignored.
boolean isSerif = seg.serif != null && seg.serif.edge != edge;
if (seg.link != null || isSerif)
{
Edge edge2 = edge.link;
Segment seg2 = seg.link;
if (isSerif)
{
seg2 = seg.serif;
edge2 = edge.serif;
}
if (edge2 != null)
{
int edgeDelta = edge.fpos - edge2.fpos;
if (edgeDelta < 0)
edgeDelta = -edgeDelta;
int segDelta = seg.pos - seg2.pos;
if (segDelta < 0)
segDelta = -segDelta;
if (segDelta < edgeDelta)
edge2 = seg2.edge;
}
else
{
edge2 = seg2.edge;
}
if (isSerif)
{
edge.serif = edge2;
edge2.flags |= Segment.FLAG_EDGE_SERIF;
}
else
{
edge.link = edge2;
}
}
seg = seg.edgeNext;
} while (seg != edge.first);
edge.flags = Segment.FLAG_EDGE_NORMAL;
if (isRound > 0 && isRound > isStraight)
edge.flags |= Segment.FLAG_EDGE_ROUND;
// Set the edge's main direction.
edge.dir = DIR_NONE;
if (ups > downs)
edge.dir = upDir;
else if (ups < downs)
edge.dir = -upDir;
else if (ups == downs)
edge.dir = 0;
// Gets rid of serif if link is set. This gets rid of many
// unpleasant artifacts.
if (edge.serif != null && edge.link != null)
{
edge.serif = null;
}
// Debug: Print out all edges.
// System.err.println("edge# " + e + ": " + edge);
}
}
private void computeBlueEdges(GlyphHints hints, LatinMetrics metrics)
{
AxisHints axis = hints.axis[DIMENSION_VERT];
Edge[] edges = axis.edges;
int numEdges = axis.numEdges;
LatinAxis latin = metrics.axis[DIMENSION_VERT];
int scale = latin.scale;
// Compute which blue zones are active. I.e. have their scaled
// size < 3/4 pixels.
// For each horizontal edge search the blue zone that is closest.
for (int e = 0; e < numEdges; e++)
{
Edge edge = edges[e];
// System.err.println("checking edge: " + edge);
Width bestBlue = null;
int bestDist = Fixed.mul16(metrics.unitsPerEm / 40, scale);
if (bestDist > 64 / 2)
bestDist = 64 / 2;
for (int bb = 0; bb < BLUE_MAX; bb++)
{
LatinBlue blue = latin.blues[bb];
// System.err.println("checking blue: " + blue);
// Skip inactive blue zones, i.e. those that are too small.
if ((blue.flags & LatinBlue.FLAG_BLUE_ACTIVE) == 0)
continue;
// If it is a top zone, check for right edges. If it is a bottom
// zone, check for left edges.
boolean isTopBlue = (blue.flags & LatinBlue.FLAG_TOP) != 0;
boolean isMajorDir = edge.dir == axis.majorDir;
// If it is a top zone, the edge must be against the major
// direction. If it is a bottom zone it must be in the major
// direction.
if (isTopBlue ^ isMajorDir)
{
int dist = edge.fpos - blue.ref.org;
if (dist < 0)
dist = -dist;
dist = Fixed.mul16(dist, scale);
if (dist < bestDist)
{
bestDist = dist;
bestBlue = blue.ref;
}
// Now, compare it to the overshoot position if the edge is
// rounded, and if the edge is over the reference position of
// a top zone, or under the reference position of a bottom
// zone.
if ((edge.flags & Segment.FLAG_EDGE_ROUND) != 0 && dist != 0)
{
// Inversed vertical coordinates!
boolean isUnderRef = edge.fpos > blue.ref.org;
if (isTopBlue ^ isUnderRef)
{
blue = latin.blues[bb]; // Needed?
dist = edge.fpos - blue.shoot.org;
if (dist < 0)
dist = -dist;
dist = Fixed.mul16(dist, scale);
if (dist < bestDist)
{
bestDist = dist;
bestBlue = blue.shoot;
}
}
}
}
}
if (bestBlue != null)
{
edge.blueEdge = bestBlue;
// Debug: Print out the blue edges.
// System.err.println("blue edge for: " + edge + ": " + bestBlue);
}
}
}
}
| gpl-2.0 |
DariusX/camel | components/camel-twilio/src/main/java/org/apache/camel/component/twilio/internal/TwilioConstants.java | 1180 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.twilio.internal;
/**
* Constants for Twilio component.
*/
public interface TwilioConstants {
/**
* Suffix for parameters when passed as exchange header properties
*/
String PROPERTY_PREFIX = "CamelTwilio.";
/**
* Thread profile name for this component
*/
String THREAD_PROFILE_NAME = "CamelTwilio";
}
| apache-2.0 |
uchida/selendroid | selendroid-standalone/src/main/java/io/selendroid/standalone/server/handler/CreateSessionHandler.java | 2051 | /*
* Copyright 2012-2014 eBay Software Foundation and selendroid committers.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package io.selendroid.standalone.server.handler;
import java.util.logging.Level;
import java.util.logging.Logger;
import io.selendroid.standalone.server.BaseSelendroidStandaloneHandler;
import org.json.JSONException;
import org.json.JSONObject;
import io.selendroid.common.SelendroidCapabilities;
import io.selendroid.server.common.Response;
import io.selendroid.server.common.SelendroidResponse;
import io.selendroid.server.common.StatusCode;
import io.selendroid.server.common.http.HttpRequest;
public class CreateSessionHandler extends BaseSelendroidStandaloneHandler {
private static final Logger log = Logger.getLogger(CreateSessionHandler.class.getName());
public CreateSessionHandler(String mappedUri) {
super(mappedUri);
}
@Override
public Response handleRequest(HttpRequest request, JSONObject payload) throws JSONException {
JSONObject desiredCapabilities = payload.getJSONObject("desiredCapabilities");
try {
String sessionID = getSelendroidDriver(request).createNewTestSession(desiredCapabilities);
SelendroidCapabilities caps = getSelendroidDriver(request).getSessionCapabilities(sessionID);
return new SelendroidResponse(sessionID, new JSONObject(caps.asMap()));
} catch (Exception e) {
log.log(Level.SEVERE, "Error while creating new session", e);
return new SelendroidResponse("", StatusCode.SESSION_NOT_CREATED_EXCEPTION, e);
}
}
}
| apache-2.0 |
akhettar/camel | components/camel-quartz/src/test/java/org/apache/camel/routepolicy/quartz/SpringQuartzTwoAppsClusteredFailoverTest.java | 4999 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.routepolicy.quartz;
import org.apache.camel.CamelContext;
import org.apache.camel.ProducerTemplate;
import org.apache.camel.component.mock.MockEndpoint;
import org.apache.camel.test.junit4.TestSupport;
import org.apache.camel.util.IOHelper;
import org.junit.Test;
import org.quartz.Scheduler;
import org.springframework.context.support.AbstractXmlApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;
/**
* Tests a Quartz based cluster setup of two Camel Apps being triggered through {@link CronScheduledRoutePolicy}.
*
* @version
*/
public class SpringQuartzTwoAppsClusteredFailoverTest extends TestSupport {
@Test
public void testQuartzPersistentStoreClusteredApp() throws Exception {
// boot up the database the two apps are going to share inside a clustered quartz setup
AbstractXmlApplicationContext db = new ClassPathXmlApplicationContext("org/apache/camel/routepolicy/quartz/SpringQuartzClusteredAppDatabase.xml");
db.start();
// now launch the first clustered app which will acquire the quartz database lock and become the master
AbstractXmlApplicationContext app = new ClassPathXmlApplicationContext("org/apache/camel/routepolicy/quartz/SpringQuartzClusteredAppOne.xml");
app.start();
// as well as the second one which will run in slave mode as it will not be able to acquire the same lock
AbstractXmlApplicationContext app2 = new ClassPathXmlApplicationContext("org/apache/camel/routepolicy/quartz/SpringQuartzClusteredAppTwo.xml");
app2.start();
CamelContext camel = app.getBean("camelContext", CamelContext.class);
MockEndpoint mock = camel.getEndpoint("mock:result", MockEndpoint.class);
mock.expectedMessageCount(1);
mock.expectedBodiesReceived("clustering PINGS!");
// wait a bit to make sure the route has already been properly started through the given route policy
Thread.sleep(5000);
app.getBean("template", ProducerTemplate.class).sendBody("direct:start", "clustering");
mock.assertIsSatisfied();
// now let's simulate a crash of the first app (the quartz instance 'app-one')
log.warn("The first app is going to crash NOW!");
// we need to stop the Scheduler first as the CamelContext will gracefully shutdown and
// delete all scheduled jobs, so there would be nothing for the second CamelContext to
// failover from
app.getBean(Scheduler.class).shutdown();
IOHelper.close(app);
log.warn("Crashed...");
log.warn("Crashed...");
log.warn("Crashed...");
// wait long enough until the second app takes it over...
Thread.sleep(20000);
// inside the logs one can then clearly see how the route of the second app ('app-two') gets started:
// 2013-09-24 22:51:34,215 [main ] WARN ersistentStoreClusteredAppTest - Crashed...
// 2013-09-24 22:51:34,215 [main ] WARN ersistentStoreClusteredAppTest - Crashed...
// 2013-09-24 22:51:34,215 [main ] WARN ersistentStoreClusteredAppTest - Crashed...
// 2013-09-24 22:51:49,188 [_ClusterManager] INFO LocalDataSourceJobStore - ClusterManager: detected 1 failed or restarted instances.
// 2013-09-24 22:51:49,188 [_ClusterManager] INFO LocalDataSourceJobStore - ClusterManager: Scanning for instance "app-one"'s failed in-progress jobs.
// 2013-09-24 22:51:49,211 [eduler_Worker-1] INFO SpringCamelContext - Route: myRoute started and consuming from: Endpoint[direct://start]
CamelContext camel2 = app2.getBean("camelContext2", CamelContext.class);
MockEndpoint mock2 = camel2.getEndpoint("mock:result", MockEndpoint.class);
mock2.expectedMessageCount(1);
mock2.expectedBodiesReceived("clustering PONGS!");
app2.getBean("template", ProducerTemplate.class).sendBody("direct:start", "clustering");
mock2.assertIsSatisfied();
// and as the last step shutdown the second app as well as the database
IOHelper.close(app2, db);
}
}
| apache-2.0 |
GlenRSmith/elasticsearch | modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ProcessorsWhitelistExtension.java | 1173 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.ingest.common;
import org.elasticsearch.painless.spi.PainlessExtension;
import org.elasticsearch.painless.spi.Whitelist;
import org.elasticsearch.painless.spi.WhitelistLoader;
import org.elasticsearch.script.IngestScript;
import org.elasticsearch.script.ScriptContext;
import java.util.Collections;
import java.util.List;
import java.util.Map;
public class ProcessorsWhitelistExtension implements PainlessExtension {
private static final Whitelist WHITELIST = WhitelistLoader.loadFromResourceFiles(
ProcessorsWhitelistExtension.class,
"processors_whitelist.txt"
);
@Override
public Map<ScriptContext<?>, List<Whitelist>> getContextWhitelists() {
return Collections.singletonMap(IngestScript.CONTEXT, Collections.singletonList(WHITELIST));
}
}
| apache-2.0 |
nagyistoce/camunda-bpm-platform | engine/src/main/java/org/camunda/bpm/engine/delegate/BaseDelegateExecution.java | 1263 | /* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.engine.delegate;
/**
*
* @author Daniel Meyer
* @author Roman Smirnov
* @author Sebastian Menski
*
*/
public interface BaseDelegateExecution extends VariableScope {
/** Unique id of this path of execution that can be used as a handle to provide external signals back into the engine after wait states. */
String getId();
/** The {@link ExecutionListener#EVENTNAME_START event name} in case this execution is passed in for an {@link ExecutionListener} */
String getEventName();
/** The business key for this execution. Only returns a value if the delegate execution
* is a a root execution (such as a process instance).
*/
String getBusinessKey();
}
| apache-2.0 |
xuegongzi/rabbitframework | rabbitframework-security-pom/rabbitframework-security/src/main/java/org/apache/shiro/realm/ldap/package-info.java | 983 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Realms that acquire security data from an LDAP (Lightweight Directory Access Protocol) server
* utilizing LDAP/Naming APIs.
*/
package org.apache.shiro.realm.ldap;
| apache-2.0 |
rokn/Count_Words_2015 | testing/openjdk2/jaxws/src/share/jaxws_classes/com/sun/xml/internal/ws/wsdl/writer/document/ParamType.java | 1801 | /*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package com.sun.xml.internal.ws.wsdl.writer.document;
import javax.xml.namespace.QName;
import com.sun.xml.internal.txw2.TypedXmlWriter;
import com.sun.xml.internal.txw2.annotation.XmlAttribute;
import com.sun.xml.internal.ws.wsdl.writer.document.Documented;
/**
*
* @author WS Development Team
*/
public interface ParamType
extends TypedXmlWriter, Documented
{
@XmlAttribute
public com.sun.xml.internal.ws.wsdl.writer.document.ParamType message(QName value);
@XmlAttribute
public com.sun.xml.internal.ws.wsdl.writer.document.ParamType name(String value);
}
| mit |
dmlloyd/openjdk-modules | test/lib/jdk/test/lib/hprof/model/ReferenceChain.java | 2093 | /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* The Original Code is HAT. The Initial Developer of the
* Original Code is Bill Foote, with contributions from others
* at JavaSoft/Sun.
*/
package jdk.test.lib.hprof.model;
/**
* Represents a chain of references to some target object
*
* @author Bill Foote
*/
public class ReferenceChain {
JavaHeapObject obj; // Object referred to
ReferenceChain next; // Next in chain
public ReferenceChain(JavaHeapObject obj, ReferenceChain next) {
this.obj = obj;
this.next = next;
}
public JavaHeapObject getObj() {
return obj;
}
public ReferenceChain getNext() {
return next;
}
public int getDepth() {
int count = 1;
ReferenceChain tmp = next;
while (tmp != null) {
count++;
tmp = tmp.next;
}
return count;
}
}
| gpl-2.0 |
cocosli/mondrian | testsrc/main/mondrian/test/AccessControlTest.java | 153611 | /*
// This software is subject to the terms of the Eclipse Public License v1.0
// Agreement, available at the following URL:
// http://www.eclipse.org/legal/epl-v10.html.
// You must accept the terms of that agreement to use this software.
//
// Copyright (C) 2003-2005 Julian Hyde
// Copyright (C) 2005-2013 Pentaho
// All Rights Reserved.
*/
package mondrian.test;
import mondrian.olap.*;
import mondrian.olap.Role.HierarchyAccess;
import mondrian.rolap.RolapHierarchy.LimitedRollupMember;
import junit.framework.Assert;
import org.olap4j.mdx.IdentifierNode;
import java.util.List;
/**
* <code>AccessControlTest</code> is a set of unit-tests for access-control.
* For these tests, all of the roles are of type RoleImpl.
*
* @see Role
*
* @author jhyde
* @since Feb 21, 2003
*/
public class AccessControlTest extends FoodMartTestCase {
private static final String BiServer1574Role1 =
"<Role name=\"role1\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Warehouse\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Store Size in SQFT]\" access=\"custom\" rollupPolicy=\"partial\">\n"
+ " <MemberGrant member=\"[Store Size in SQFT].[20319]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store Size in SQFT].[21215]\" access=\"none\"/>\n"
+ " </HierarchyGrant>\n"
+ " <HierarchyGrant hierarchy=\"[Store Type]\" access=\"custom\" rollupPolicy=\"partial\">\n"
+ " <MemberGrant member=\"[Store Type].[Supermarket]\" access=\"all\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>";
public AccessControlTest(String name) {
super(name);
}
public void testSchemaReader() {
final TestContext testContext = getTestContext();
final Connection connection = testContext.getConnection();
Schema schema = connection.getSchema();
final boolean fail = true;
Cube cube = schema.lookupCube("Sales", fail);
final SchemaReader schemaReader =
cube.getSchemaReader(connection.getRole());
final SchemaReader schemaReader1 = schemaReader.withoutAccessControl();
assertNotNull(schemaReader1);
final SchemaReader schemaReader2 = schemaReader1.withoutAccessControl();
assertNotNull(schemaReader2);
}
public void testGrantDimensionNone() {
final TestContext context = getTestContext().withFreshConnection();
final Connection connection = context.getConnection();
RoleImpl role = ((RoleImpl) connection.getRole()).makeMutableClone();
Schema schema = connection.getSchema();
Cube salesCube = schema.lookupCube("Sales", true);
// todo: add Schema.lookupDimension
final SchemaReader schemaReader = salesCube.getSchemaReader(role);
Dimension genderDimension =
(Dimension) schemaReader.lookupCompound(
salesCube, Id.Segment.toList("Gender"), true,
Category.Dimension);
role.grant(genderDimension, Access.NONE);
role.makeImmutable();
connection.setRole(role);
context.assertAxisThrows(
"[Gender].children",
"MDX object '[Gender]' not found in cube 'Sales'");
}
public void testRestrictMeasures() {
final TestContext testContext = TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"Role1\">\n"
+ " <SchemaGrant access=\"all\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Measures]\" access=\"all\">\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>"
+ "<Role name=\"Role2\">\n"
+ " <SchemaGrant access=\"all\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Measures]\" access=\"custom\">\n"
+ " <MemberGrant member=\"[Measures].[Unit Sales]\" access=\"all\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>");
final TestContext role1 = testContext.withRole("Role1");
final TestContext role2 = testContext.withRole("Role2");
role1.assertQueryReturns(
"SELECT {[Measures].Members} ON COLUMNS FROM [SALES]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "{[Measures].[Store Cost]}\n"
+ "{[Measures].[Store Sales]}\n"
+ "{[Measures].[Sales Count]}\n"
+ "{[Measures].[Customer Count]}\n"
+ "{[Measures].[Promotion Sales]}\n"
+ "Row #0: 266,773\n"
+ "Row #0: 225,627.23\n"
+ "Row #0: 565,238.13\n"
+ "Row #0: 86,837\n"
+ "Row #0: 5,581\n"
+ "Row #0: 151,211.21\n");
role2.assertQueryReturns(
"SELECT {[Measures].Members} ON COLUMNS FROM [SALES]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Row #0: 266,773\n");
}
public void testRoleMemberAccessNonExistentMemberFails() {
final TestContext testContext = TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"Role1\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Store]\" access=\"custom\" rollupPolicy=\"partial\">\n"
+ " <MemberGrant member=\"[Store].[USA].[Non Existent]\" access=\"all\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>")
.withRole("Role1");
testContext.assertQueryThrows(
"select {[Store].Children} on 0 from [Sales]",
"Member '[Store].[USA].[Non Existent]' not found");
}
public void testRoleMemberAccess() {
final Connection connection = getRestrictedConnection();
// because CA has access
assertMemberAccess(connection, Access.CUSTOM, "[Store].[USA]");
assertMemberAccess(connection, Access.CUSTOM, "[Store].[Mexico]");
assertMemberAccess(connection, Access.NONE, "[Store].[Mexico].[DF]");
assertMemberAccess(
connection, Access.NONE, "[Store].[Mexico].[DF].[Mexico City]");
assertMemberAccess(connection, Access.NONE, "[Store].[Canada]");
assertMemberAccess(
connection, Access.NONE, "[Store].[Canada].[BC].[Vancouver]");
assertMemberAccess(
connection, Access.ALL, "[Store].[USA].[CA].[Los Angeles]");
assertMemberAccess(
connection, Access.NONE, "[Store].[USA].[CA].[San Diego]");
// USA deny supercedes OR grant
assertMemberAccess(
connection, Access.NONE, "[Store].[USA].[OR].[Portland]");
assertMemberAccess(
connection, Access.NONE, "[Store].[USA].[WA].[Seattle]");
assertMemberAccess(connection, Access.NONE, "[Store].[USA].[WA]");
// above top level
assertMemberAccess(connection, Access.NONE, "[Store].[All Stores]");
}
private void assertMemberAccess(
final Connection connection,
Access expectedAccess,
String memberName)
{
final Role role = connection.getRole(); // restricted
Schema schema = connection.getSchema();
final boolean fail = true;
Cube salesCube = schema.lookupCube("Sales", fail);
final SchemaReader schemaReader =
salesCube.getSchemaReader(null).withLocus();
final Member member =
schemaReader.getMemberByUniqueName(
Util.parseIdentifier(memberName), true);
final Access actualAccess = role.getAccess(member);
Assert.assertEquals(memberName, expectedAccess, actualAccess);
}
private void assertCubeAccess(
final Connection connection,
Access expectedAccess,
String cubeName)
{
final Role role = connection.getRole();
Schema schema = connection.getSchema();
final boolean fail = true;
Cube cube = schema.lookupCube(cubeName, fail);
final Access actualAccess = role.getAccess(cube);
Assert.assertEquals(cubeName, expectedAccess, actualAccess);
}
private void assertHierarchyAccess(
final Connection connection,
Access expectedAccess,
String cubeName,
String hierarchyName)
{
final Role role = connection.getRole();
Schema schema = connection.getSchema();
final boolean fail = true;
Cube cube = schema.lookupCube(cubeName, fail);
final SchemaReader schemaReader =
cube.getSchemaReader(null); // unrestricted
final Hierarchy hierarchy =
(Hierarchy) schemaReader.lookupCompound(
cube, Util.parseIdentifier(hierarchyName), fail,
Category.Hierarchy);
final Access actualAccess = role.getAccess(hierarchy);
Assert.assertEquals(cubeName, expectedAccess, actualAccess);
}
private Role.HierarchyAccess getHierarchyAccess(
final Connection connection,
String cubeName,
String hierarchyName)
{
final Role role = connection.getRole();
Schema schema = connection.getSchema();
final boolean fail = true;
Cube cube = schema.lookupCube(cubeName, fail);
final SchemaReader schemaReader =
cube.getSchemaReader(null); // unrestricted
final Hierarchy hierarchy =
(Hierarchy) schemaReader.lookupCompound(
cube, Util.parseIdentifier(hierarchyName), fail,
Category.Hierarchy);
return role.getAccessDetails(hierarchy);
}
public void testGrantHierarchy1a() {
// assert: can access Mexico (explicitly granted)
// assert: can not access Canada (explicitly denied)
// assert: can access USA (rule 3 - parent of allowed member San
// Francisco)
getRestrictedTestContext().assertAxisReturns(
"[Store].level.members",
"[Store].[Mexico]\n" + "[Store].[USA]");
}
public void testGrantHierarchy1aAllMembers() {
// assert: can access Mexico (explicitly granted)
// assert: can not access Canada (explicitly denied)
// assert: can access USA (rule 3 - parent of allowed member San
// Francisco)
getRestrictedTestContext().assertAxisReturns(
"[Store].level.allmembers",
"[Store].[Mexico]\n" + "[Store].[USA]");
}
public void testGrantHierarchy1b() {
// can access Mexico (explicitly granted) which is the first accessible
// one
getRestrictedTestContext().assertAxisReturns(
"[Store].defaultMember",
"[Store].[Mexico]");
}
public void testGrantHierarchy1c() {
// the root element is All Customers
getRestrictedTestContext().assertAxisReturns(
"[Customers].defaultMember",
"[Customers].[Canada].[BC]");
}
public void testGrantHierarchy2() {
// assert: can access California (parent of allowed member)
final TestContext testContext = getRestrictedTestContext();
testContext.assertAxisReturns(
"[Store].[USA].children",
"[Store].[USA].[CA]");
testContext.assertAxisReturns(
"[Store].[USA].children",
"[Store].[USA].[CA]");
testContext.assertAxisReturns(
"[Store].[USA].[CA].children",
"[Store].[USA].[CA].[Los Angeles]\n"
+ "[Store].[USA].[CA].[San Francisco]");
}
public void testGrantHierarchy3() {
// assert: can not access Washington (child of denied member)
final TestContext testContext = getRestrictedTestContext();
testContext.assertAxisThrows("[Store].[USA].[WA]", "not found");
}
private TestContext getRestrictedTestContext() {
return new DelegatingTestContext(getTestContext()) {
public Connection getConnection() {
return getRestrictedConnection();
}
};
}
public void testGrantHierarchy4() {
// assert: can not access Oregon (rule 1 - order matters)
final TestContext testContext = getRestrictedTestContext();
testContext.assertAxisThrows(
"[Store].[USA].[OR].children", "not found");
}
public void testGrantHierarchy5() {
// assert: can not access All (above top level)
final TestContext testContext = getRestrictedTestContext();
testContext.assertAxisThrows("[Store].[All Stores]", "not found");
testContext.assertAxisReturns(
"[Store].members",
// note:
// no: [All Stores] -- above top level
// no: [Canada] -- not explicitly allowed
// yes: [Mexico] -- explicitly allowed -- and all its children
// except [DF]
// no: [Mexico].[DF]
// yes: [USA] -- implicitly allowed
// yes: [CA] -- implicitly allowed
// no: [OR], [WA]
// yes: [San Francisco] -- explicitly allowed
// no: [San Diego]
"[Store].[Mexico]\n"
+ "[Store].[Mexico].[Guerrero]\n"
+ "[Store].[Mexico].[Guerrero].[Acapulco]\n"
+ "[Store].[Mexico].[Guerrero].[Acapulco].[Store 1]\n"
+ "[Store].[Mexico].[Jalisco]\n"
+ "[Store].[Mexico].[Jalisco].[Guadalajara]\n"
+ "[Store].[Mexico].[Jalisco].[Guadalajara].[Store 5]\n"
+ "[Store].[Mexico].[Veracruz]\n"
+ "[Store].[Mexico].[Veracruz].[Orizaba]\n"
+ "[Store].[Mexico].[Veracruz].[Orizaba].[Store 10]\n"
+ "[Store].[Mexico].[Yucatan]\n"
+ "[Store].[Mexico].[Yucatan].[Merida]\n"
+ "[Store].[Mexico].[Yucatan].[Merida].[Store 8]\n"
+ "[Store].[Mexico].[Zacatecas]\n"
+ "[Store].[Mexico].[Zacatecas].[Camacho]\n"
+ "[Store].[Mexico].[Zacatecas].[Camacho].[Store 4]\n"
+ "[Store].[Mexico].[Zacatecas].[Hidalgo]\n"
+ "[Store].[Mexico].[Zacatecas].[Hidalgo].[Store 12]\n"
+ "[Store].[Mexico].[Zacatecas].[Hidalgo].[Store 18]\n"
+ "[Store].[USA]\n"
+ "[Store].[USA].[CA]\n"
+ "[Store].[USA].[CA].[Los Angeles]\n"
+ "[Store].[USA].[CA].[Los Angeles].[Store 7]\n"
+ "[Store].[USA].[CA].[San Francisco]\n"
+ "[Store].[USA].[CA].[San Francisco].[Store 14]");
}
public void testGrantHierarchy6() {
// assert: parent if at top level is null
getRestrictedTestContext().assertAxisReturns(
"[Customers].[USA].[CA].parent",
"");
}
public void testGrantHierarchy7() {
// assert: members above top level do not exist
final TestContext testContext = getRestrictedTestContext();
testContext.assertAxisThrows(
"[Customers].[Canada].children",
"MDX object '[Customers].[Canada]' not found in cube 'Sales'");
}
public void testGrantHierarchy8() {
// assert: can not access Catherine Abel in San Francisco (below bottom
// level)
final TestContext testContext = getRestrictedTestContext();
testContext.assertAxisThrows(
"[Customers].[USA].[CA].[San Francisco].[Catherine Abel]",
"not found");
testContext.assertAxisReturns(
"[Customers].[USA].[CA].[San Francisco].children",
"");
Axis axis = testContext.executeAxis("[Customers].members");
// 13 states, 109 cities
Assert.assertEquals(122, axis.getPositions().size());
}
public void testGrantHierarchy8AllMembers() {
// assert: can not access Catherine Abel in San Francisco (below bottom
// level)
final TestContext testContext = getRestrictedTestContext();
testContext.assertAxisThrows(
"[Customers].[USA].[CA].[San Francisco].[Catherine Abel]",
"not found");
testContext.assertAxisReturns(
"[Customers].[USA].[CA].[San Francisco].children",
"");
Axis axis = testContext.executeAxis("[Customers].allmembers");
// 13 states, 109 cities
Assert.assertEquals(122, axis.getPositions().size());
}
/**
* Tests for Mondrian BUG 1201 - Native Rollups did not handle
* access-control with more than one member where granted access=all
*/
public void testBugMondrian_1201_MultipleMembersInRoleAccessControl() {
String test_1201_Roles =
"<Role name=\"Role1\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Store]\" access=\"custom\" rollupPolicy=\"partial\">\n"
+ " <MemberGrant member=\"[Store].[USA].[WA]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[USA].[OR]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[USA].[CA].[San Francisco]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[USA].[CA].[Los Angeles]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[Mexico]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[Mexico].[DF]\" access=\"none\"/>\n"
+ " <MemberGrant member=\"[Store].[Canada]\" access=\"none\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>\n"
+ "<Role name=\"Role2\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Store]\" access=\"custom\" rollupPolicy=\"full\">\n"
+ " <MemberGrant member=\"[Store].[USA].[WA]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[USA].[OR]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[USA].[CA].[San Francisco]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[USA].[CA].[Los Angeles]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[Mexico]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[Mexico].[DF]\" access=\"none\"/>\n"
+ " <MemberGrant member=\"[Store].[Canada]\" access=\"none\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>";
final TestContext partialRollupTestContext =
TestContext.instance().create(
null, null, null, null, null, test_1201_Roles)
.withRole("Role1");
final TestContext fullRollupTestContext =
TestContext.instance().create(
null, null, null, null, null, test_1201_Roles)
.withRole("Role2");
// Must return only 2 [USA].[CA] stores
partialRollupTestContext.assertQueryReturns(
"select NON EMPTY {[Measures].[Unit Sales]} ON COLUMNS, \n"
+ " Filter( [Store].[USA].[CA].children,"
+ " [Measures].[Unit Sales]>0) ON ROWS \n"
+ "from [Sales] \n"
+ "where ([Time].[1997].[Q1].[2])",
"Axis #0:\n"
+ "{[Time].[1997].[Q1].[2]}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Store].[USA].[CA].[Los Angeles]}\n"
+ "{[Store].[USA].[CA].[San Francisco]}\n"
+ "Row #0: 2,614\n"
+ "Row #1: 187\n");
// Must return only 2 [USA].[CA] stores
partialRollupTestContext.assertQueryReturns(
"select NON EMPTY {[Measures].[Unit Sales]} ON COLUMNS, \n"
+ " TopCount( [Store].[USA].[CA].children, 20,"
+ " [Measures].[Unit Sales]) ON ROWS \n"
+ "from [Sales] \n"
+ "where ([Time].[1997].[Q1].[2])",
"Axis #0:\n"
+ "{[Time].[1997].[Q1].[2]}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Store].[USA].[CA].[Los Angeles]}\n"
+ "{[Store].[USA].[CA].[San Francisco]}\n"
+ "Row #0: 2,614\n"
+ "Row #1: 187\n");
// Partial Rollup: [USA].[CA] rolls up only up to 2.801
partialRollupTestContext.assertQueryReturns(
"select NON EMPTY {[Measures].[Unit Sales]} ON COLUMNS, \n"
+ " Filter( [Store].[Store State].Members,"
+ " [Measures].[Unit Sales]>4000) ON ROWS \n"
+ "from [Sales] \n"
+ "where ([Time].[1997].[Q1].[2])",
"Axis #0:\n"
+ "{[Time].[1997].[Q1].[2]}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Store].[USA].[OR]}\n"
+ "{[Store].[USA].[WA]}\n"
+ "Row #0: 4,617\n"
+ "Row #1: 10,319\n");
// Full Rollup: [USA].[CA] rolls up to 6.021
fullRollupTestContext.assertQueryReturns(
"select NON EMPTY {[Measures].[Unit Sales]} ON COLUMNS, \n"
+ " Filter( [Store].[Store State].Members,"
+ " [Measures].[Unit Sales]>4000) ON ROWS \n"
+ "from [Sales] \n"
+ "where ([Time].[1997].[Q1].[2])",
"Axis #0:\n"
+ "{[Time].[1997].[Q1].[2]}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Store].[USA].[CA]}\n"
+ "{[Store].[USA].[OR]}\n"
+ "{[Store].[USA].[WA]}\n"
+ "Row #0: 6,021\n"
+ "Row #1: 4,617\n"
+ "Row #2: 10,319\n");
}
public void testBugMondrian_1201_CacheAwareOfRoleAccessControl() {
String test_1201_Roles =
"<Role name=\"Role1\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Store]\" access=\"custom\" rollupPolicy=\"partial\">\n"
+ " <MemberGrant member=\"[Store].[USA].[WA]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[USA].[OR]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[USA].[CA].[San Francisco]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[USA].[CA].[Los Angeles]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[Mexico]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[Mexico].[DF]\" access=\"none\"/>\n"
+ " <MemberGrant member=\"[Store].[Canada]\" access=\"none\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>\n"
+ "<Role name=\"Role2\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Store]\" access=\"custom\" rollupPolicy=\"partial\">\n"
+ " <MemberGrant member=\"[Store].[USA].[WA]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[USA].[OR]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[USA].[CA].[San Francisco]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[Mexico]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[Mexico].[DF]\" access=\"none\"/>\n"
+ " <MemberGrant member=\"[Store].[Canada]\" access=\"none\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>";
final TestContext partialRollupTestContext1 =
TestContext.instance().create(
null, null, null, null, null, test_1201_Roles)
.withRole("Role1");
final TestContext partialRollupTestContext2 =
TestContext.instance().create(
null, null, null, null, null, test_1201_Roles)
.withRole("Role2");
// Put query into cache
partialRollupTestContext1.assertQueryReturns(
"select NON EMPTY {[Measures].[Unit Sales]} ON COLUMNS, \n"
+ " Filter( [Store].[USA].[CA].children,"
+ " [Measures].[Unit Sales]>0) ON ROWS \n"
+ "from [Sales] \n"
+ "where ([Time].[1997].[Q1].[2])",
"Axis #0:\n"
+ "{[Time].[1997].[Q1].[2]}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Store].[USA].[CA].[Los Angeles]}\n"
+ "{[Store].[USA].[CA].[San Francisco]}\n"
+ "Row #0: 2,614\n"
+ "Row #1: 187\n");
// Run same query using another role with different access controls
partialRollupTestContext2.assertQueryReturns(
"select NON EMPTY {[Measures].[Unit Sales]} ON COLUMNS, \n"
+ " TopCount( [Store].[USA].[CA].children, 20,"
+ " [Measures].[Unit Sales]) ON ROWS \n"
+ "from [Sales] \n"
+ "where ([Time].[1997].[Q1].[2])",
"Axis #0:\n"
+ "{[Time].[1997].[Q1].[2]}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Store].[USA].[CA].[San Francisco]}\n"
+ "Row #0: 187\n");
}
/**
* Tests for Mondrian BUG 1127 - Native Top Count was not taking into
* account user roles
*/
public void testBugMondrian1127OneSlicerOnly() {
final TestContext testContext = getRestrictedTestContext();
testContext.assertQueryReturns(
"select NON EMPTY {[Measures].[Unit Sales]} ON COLUMNS, \n"
+ " TopCount([Store].[USA].[CA].Children, 10,"
+ " [Measures].[Unit Sales]) ON ROWS \n"
+ "from [Sales] \n"
+ "where ([Time].[1997].[Q1].[2])",
"Axis #0:\n"
+ "{[Time].[1997].[Q1].[2]}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Store].[USA].[CA].[Los Angeles]}\n"
+ "{[Store].[USA].[CA].[San Francisco]}\n"
+ "Row #0: 2,614\n"
+ "Row #1: 187\n");
final TestContext unrestrictedTestContext = getTestContext();
unrestrictedTestContext.assertQueryReturns(
"select NON EMPTY {[Measures].[Unit Sales]} ON COLUMNS, \n"
+ " TopCount([Store].[USA].[CA].Children, 10, "
+ " [Measures].[Unit Sales]) ON ROWS \n"
+ "from [Sales] \n"
+ "where ([Time].[1997].[Q1].[2])",
"Axis #0:\n"
+ "{[Time].[1997].[Q1].[2]}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Store].[USA].[CA].[Los Angeles]}\n"
+ "{[Store].[USA].[CA].[San Diego]}\n"
+ "{[Store].[USA].[CA].[Beverly Hills]}\n"
+ "{[Store].[USA].[CA].[San Francisco]}\n"
+ "Row #0: 2,614\n"
+ "Row #1: 1,879\n"
+ "Row #2: 1,341\n"
+ "Row #3: 187\n");
}
public void testBugMondrian1127MultipleSlicers() {
final TestContext testContext = getRestrictedTestContext();
testContext.assertQueryReturns(
"select NON EMPTY {[Measures].[Unit Sales]} ON COLUMNS, \n"
+ " TopCount([Store].[USA].[CA].Children, 10,"
+ " [Measures].[Unit Sales]) ON ROWS \n"
+ "from [Sales] \n"
+ "where ([Time].[1997].[Q1].[2] : [Time].[1997].[Q1].[3])",
"Axis #0:\n"
+ "{[Time].[1997].[Q1].[2]}\n"
+ "{[Time].[1997].[Q1].[3]}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Store].[USA].[CA].[Los Angeles]}\n"
+ "{[Store].[USA].[CA].[San Francisco]}\n"
+ "Row #0: 4,497\n"
+ "Row #1: 337\n");
final TestContext unrestrictedTestContext = getTestContext();
unrestrictedTestContext.assertQueryReturns(
"select NON EMPTY {[Measures].[Unit Sales]} ON COLUMNS, \n"
+ " TopCount([Store].[USA].[CA].Children, 10, "
+ " [Measures].[Unit Sales]) ON ROWS \n"
+ "from [Sales] \n"
+ "where ([Time].[1997].[Q1].[2] : [Time].[1997].[Q1].[3])",
"Axis #0:\n"
+ "{[Time].[1997].[Q1].[2]}\n"
+ "{[Time].[1997].[Q1].[3]}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Store].[USA].[CA].[Los Angeles]}\n"
+ "{[Store].[USA].[CA].[San Diego]}\n"
+ "{[Store].[USA].[CA].[Beverly Hills]}\n"
+ "{[Store].[USA].[CA].[San Francisco]}\n"
+ "Row #0: 4,497\n"
+ "Row #1: 4,094\n"
+ "Row #2: 2,585\n"
+ "Row #3: 337\n");
}
/**
* Tests that we only aggregate over SF, LA, even when called from
* functions.
*/
public void testGrantHierarchy9() {
// Analysis services doesn't allow aggregation within calculated
// measures, so use the following query to generate the results:
//
// with member [Store].[SF LA] as
// 'Aggregate({[USA].[CA].[San Francisco], [Store].[USA].[CA].[Los
// Angeles]})'
// select {[Measures].[Unit Sales]} on columns,
// {[Gender].children} on rows
// from Sales
// where ([Marital Status].[S], [Store].[SF LA])
final TestContext tc = new RestrictedTestContext();
tc.assertQueryReturns(
"with member [Measures].[California Unit Sales] as "
+ " 'Aggregate({[Store].[USA].[CA].children}, [Measures].[Unit Sales])'\n"
+ "select {[Measures].[California Unit Sales]} on columns,\n"
+ " {[Gender].children} on rows\n"
+ "from Sales\n"
+ "where ([Marital Status].[S])",
"Axis #0:\n"
+ "{[Marital Status].[S]}\n"
+ "Axis #1:\n"
+ "{[Measures].[California Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Gender].[F]}\n"
+ "{[Gender].[M]}\n"
+ "Row #0: 6,636\n"
+ "Row #1: 7,329\n");
}
public void testGrantHierarchyA() {
final TestContext tc = new RestrictedTestContext();
// assert: totals for USA include missing cells
tc.assertQueryReturns(
"select {[Unit Sales]} on columns,\n"
+ "{[Store].[USA], [Store].[USA].children} on rows\n"
+ "from [Sales]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Store].[USA]}\n"
+ "{[Store].[USA].[CA]}\n"
+ "Row #0: 266,773\n"
+ "Row #1: 74,748\n");
}
public void _testSharedObjectsInGrantMappingsBug() {
final TestContext testContext = new TestContext() {
public Connection getConnection() {
boolean mustGet = true;
Connection connection = super.getConnection();
Schema schema = connection.getSchema();
Cube salesCube = schema.lookupCube("Sales", mustGet);
Cube warehouseCube = schema.lookupCube("Warehouse", mustGet);
Hierarchy measuresInSales = salesCube.lookupHierarchy(
new Id.NameSegment("Measures", Id.Quoting.UNQUOTED), false);
Hierarchy storeInWarehouse = warehouseCube.lookupHierarchy(
new Id.NameSegment("Store", Id.Quoting.UNQUOTED), false);
RoleImpl role = new RoleImpl();
role.grant(schema, Access.NONE);
role.grant(salesCube, Access.NONE);
// For using hierarchy Measures in #assertExprThrows
Role.RollupPolicy rollupPolicy = Role.RollupPolicy.FULL;
role.grant(
measuresInSales, Access.ALL, null, null, rollupPolicy);
role.grant(warehouseCube, Access.NONE);
role.grant(storeInWarehouse.getDimension(), Access.ALL);
role.makeImmutable();
connection.setRole(role);
return connection;
}
};
// Looking up default member on dimension Store in cube Sales should
// fail.
testContext.assertExprThrows(
"[Store].DefaultMember",
"'[Store]' not found in cube 'Sales'");
}
public void testNoAccessToCube() {
final TestContext tc = new RestrictedTestContext();
tc.assertQueryThrows("select from [HR]", "MDX cube 'HR' not found");
}
private Connection getRestrictedConnection() {
return getRestrictedConnection(true);
}
/**
* Returns a connection with limited access to the schema.
*
* @param restrictCustomers true to restrict access to the customers
* dimension. This will change the defaultMember of the dimension,
* all cell values will be null because there are no sales data
* for Canada
*
* @return restricted connection
*/
private Connection getRestrictedConnection(boolean restrictCustomers) {
Connection connection =
getTestContext().withSchemaPool(false).getConnection();
RoleImpl role = new RoleImpl();
Schema schema = connection.getSchema();
final boolean fail = true;
Cube salesCube = schema.lookupCube("Sales", fail);
final SchemaReader schemaReader =
salesCube.getSchemaReader(null).withLocus();
Hierarchy storeHierarchy = salesCube.lookupHierarchy(
new Id.NameSegment("Store", Id.Quoting.UNQUOTED), false);
role.grant(schema, Access.ALL_DIMENSIONS);
role.grant(salesCube, Access.ALL);
Level nationLevel =
Util.lookupHierarchyLevel(storeHierarchy, "Store Country");
Role.RollupPolicy rollupPolicy = Role.RollupPolicy.FULL;
role.grant(
storeHierarchy, Access.CUSTOM, nationLevel, null, rollupPolicy);
role.grant(
schemaReader.getMemberByUniqueName(
Util.parseIdentifier("[Store].[All Stores].[USA].[OR]"), fail),
Access.ALL);
role.grant(
schemaReader.getMemberByUniqueName(
Util.parseIdentifier("[Store].[All Stores].[USA]"), fail),
Access.CUSTOM);
role.grant(
schemaReader.getMemberByUniqueName(
Util.parseIdentifier(
"[Store].[All Stores].[USA].[CA].[San Francisco]"), fail),
Access.ALL);
role.grant(
schemaReader.getMemberByUniqueName(
Util.parseIdentifier(
"[Store].[All Stores].[USA].[CA].[Los Angeles]"), fail),
Access.ALL);
role.grant(
schemaReader.getMemberByUniqueName(
Util.parseIdentifier(
"[Store].[All Stores].[Mexico]"), fail),
Access.ALL);
role.grant(
schemaReader.getMemberByUniqueName(
Util.parseIdentifier(
"[Store].[All Stores].[Mexico].[DF]"), fail),
Access.NONE);
role.grant(
schemaReader.getMemberByUniqueName(
Util.parseIdentifier(
"[Store].[All Stores].[Canada]"), fail),
Access.NONE);
if (restrictCustomers) {
Hierarchy customersHierarchy =
salesCube.lookupHierarchy(
new Id.NameSegment("Customers", Id.Quoting.UNQUOTED),
false);
Level stateProvinceLevel =
Util.lookupHierarchyLevel(customersHierarchy, "State Province");
Level customersCityLevel =
Util.lookupHierarchyLevel(customersHierarchy, "City");
role.grant(
customersHierarchy,
Access.CUSTOM,
stateProvinceLevel,
customersCityLevel,
rollupPolicy);
}
// No access to HR cube.
Cube hrCube = schema.lookupCube("HR", fail);
role.grant(hrCube, Access.NONE);
role.makeImmutable();
connection.setRole(role);
return connection;
}
// todo: test that access to restricted measure fails
// (will not work -- have not fixed Cube.getMeasures)
private class RestrictedTestContext extends TestContext {
public synchronized Connection getConnection() {
return getRestrictedConnection(false);
}
}
/**
* Test context where the [Store] hierarchy has restricted access
* and cell values are rolled up with 'partial' policy.
*/
private TestContext getRollupTestContext() {
return getTestContext().create(
null, null, null, null, null,
"<Role name=\"Role1\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"custom\">\n"
+ " <DimensionGrant dimension=\"[Measures]\" access=\"all\"/>\n"
+ " <DimensionGrant dimension=\"[Gender]\" access=\"all\"/>\n"
+ " <HierarchyGrant hierarchy=\"[Store]\" access=\"custom\" rollupPolicy=\"partial\">\n"
+ " <MemberGrant member=\"[Store].[USA]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[USA].[CA]\" access=\"none\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>")
.withRole("Role1");
}
/**
* Basic test of partial rollup policy. [USA] = [OR] + [WA], not
* the usual [CA] + [OR] + [WA].
*/
public void testRollupPolicyBasic() {
getRollupTestContext().assertQueryReturns(
"select {[Store].[USA], [Store].[USA].Children} on 0\n"
+ "from [Sales]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Store].[USA]}\n"
+ "{[Store].[USA].[OR]}\n"
+ "{[Store].[USA].[WA]}\n"
+ "Row #0: 192,025\n"
+ "Row #0: 67,659\n"
+ "Row #0: 124,366\n");
}
/**
* The total for [Store].[All Stores] is similarly reduced. All
* children of [All Stores] are visible, but one grandchild is not.
* Normally the total is 266,773.
*/
public void testRollupPolicyAll() {
getRollupTestContext().assertExprReturns(
"([Store].[All Stores])",
"192,025");
}
/**
* Access [Store].[All Stores] implicitly as it is the default member
* of the [Stores] hierarchy.
*/
public void testRollupPolicyAllAsDefault() {
getRollupTestContext().assertExprReturns(
"([Store])",
"192,025");
}
/**
* Access [Store].[All Stores] via the Parent relationship (to check
* that this doesn't circumvent access control).
*/
public void testRollupPolicyAllAsParent() {
getRollupTestContext().assertExprReturns(
"([Store].[USA].Parent)",
"192,025");
}
/**
* Tests that an access-controlled dimension affects results even if not
* used in the query. Unit test for
* <a href="http://jira.pentaho.com/browse/mondrian-1283">MONDRIAN-1283,
* "Mondrian doesn't restrict dimension members when dimension isn't
* included"</a>.
*/
public void testUnusedAccessControlledDimension() {
getRollupTestContext().assertQueryReturns(
"select [Gender].Children on 0 from [Sales]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Gender].[F]}\n"
+ "{[Gender].[M]}\n"
+ "Row #0: 94,799\n"
+ "Row #0: 97,226\n");
getTestContext().assertQueryReturns(
"select [Gender].Children on 0 from [Sales]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Gender].[F]}\n"
+ "{[Gender].[M]}\n"
+ "Row #0: 131,558\n"
+ "Row #0: 135,215\n");
}
/**
* Tests that members below bottom level are regarded as visible.
*/
public void testRollupBottomLevel() {
rollupPolicyBottom(
Role.RollupPolicy.FULL, "74,748", "36,759", "266,773");
rollupPolicyBottom(
Role.RollupPolicy.PARTIAL, "72,739", "35,775", "264,764");
rollupPolicyBottom(Role.RollupPolicy.HIDDEN, "", "", "");
}
private void rollupPolicyBottom(
Role.RollupPolicy rollupPolicy,
String v1,
String v2,
String v3)
{
final TestContext testContext =
TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"Role1\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Customers]\" access=\"custom\" rollupPolicy=\""
+ rollupPolicy
+ "\" bottomLevel=\"[Customers].[City]\">\n"
+ " <MemberGrant member=\"[Customers].[USA]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Customers].[USA].[CA]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Customers].[USA].[CA].[Los Angeles]\" access=\"none\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>")
.withRole("Role1");
// All of the children of [San Francisco] are invisible, because [City]
// is the bottom level, but that shouldn't affect the total.
testContext.assertExprReturns(
"([Customers].[USA].[CA].[San Francisco])", "88");
testContext.assertExprThrows(
"([Customers].[USA].[CA].[Los Angeles])",
"MDX object '[Customers].[USA].[CA].[Los Angeles]' not found in cube 'Sales'");
testContext.assertExprReturns("([Customers].[USA].[CA])", v1);
testContext.assertExprReturns(
"([Customers].[USA].[CA], [Gender].[F])", v2);
testContext.assertExprReturns("([Customers].[USA])", v3);
checkQuery(
testContext,
"select [Customers].Children on 0, "
+ "[Gender].Members on 1 from [Sales]");
}
/**
* Calls various {@link SchemaReader} methods on the members returned in
* a result set.
*
* @param testContext Test context
* @param mdx MDX query
*/
private void checkQuery(TestContext testContext, String mdx) {
Result result = testContext.executeQuery(mdx);
final SchemaReader schemaReader =
testContext.getConnection().getSchemaReader().withLocus();
for (Axis axis : result.getAxes()) {
for (Position position : axis.getPositions()) {
for (Member member : position) {
final Member accessControlledParent =
schemaReader.getMemberParent(member);
if (member.getParentMember() == null) {
assertNull(accessControlledParent);
}
final List<Member> accessControlledChildren =
schemaReader.getMemberChildren(member);
assertNotNull(accessControlledChildren);
}
}
}
}
/**
* Tests that a bad value for the rollupPolicy attribute gives the
* appropriate error.
*/
public void testRollupPolicyNegative() {
final TestContext testContext =
TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"Role1\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Customers]\" access=\"custom\" rollupPolicy=\"bad\" bottomLevel=\"[Customers].[City]\">\n"
+ " <MemberGrant member=\"[Customers].[USA]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Customers].[USA].[CA].[Los Angeles]\" access=\"none\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>")
.withRole("Role1");
testContext.assertQueryThrows(
"select from [Sales]",
"Illegal rollupPolicy value 'bad'");
}
/**
* Tests where all children are visible but a grandchild is not.
*/
public void testRollupPolicyGreatGrandchildInvisible() {
rollupPolicyGreatGrandchildInvisible(
Role.RollupPolicy.FULL, "266,773", "74,748");
rollupPolicyGreatGrandchildInvisible(
Role.RollupPolicy.PARTIAL, "266,767", "74,742");
rollupPolicyGreatGrandchildInvisible(
Role.RollupPolicy.HIDDEN, "", "");
}
private void rollupPolicyGreatGrandchildInvisible(
Role.RollupPolicy policy,
String v1,
String v2)
{
final TestContext testContext =
TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"Role1\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Customers]\" access=\"custom\" rollupPolicy=\""
+ policy
+ "\">\n"
+ " <MemberGrant member=\"[Customers].[USA]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Customers].[USA].[CA].[San Francisco].[Gladys Evans]\" access=\"none\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>")
.withRole("Role1");
testContext.assertExprReturns("[Measures].[Unit Sales]", v1);
testContext.assertExprReturns(
"([Measures].[Unit Sales], [Customers].[USA])",
v1);
testContext.assertExprReturns(
"([Measures].[Unit Sales], [Customers].[USA].[CA])",
v2);
}
/**
* Tests where two hierarchies are simultaneously access-controlled.
*/
public void testRollupPolicySimultaneous() {
// note that v2 is different for full vs partial, v3 is the same
rollupPolicySimultaneous(
Role.RollupPolicy.FULL, "266,773", "74,748", "25,635");
rollupPolicySimultaneous(
Role.RollupPolicy.PARTIAL, "72,631", "72,631", "25,635");
rollupPolicySimultaneous(
Role.RollupPolicy.HIDDEN, "", "", "");
}
private void rollupPolicySimultaneous(
Role.RollupPolicy policy,
String v1,
String v2,
String v3)
{
final TestContext testContext =
TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"Role1\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Customers]\" access=\"custom\" rollupPolicy=\""
+ policy
+ "\">\n"
+ " <MemberGrant member=\"[Customers].[USA]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Customers].[USA].[CA].[San Francisco].[Gladys Evans]\" access=\"none\"/>\n"
+ " </HierarchyGrant>\n"
+ " <HierarchyGrant hierarchy=\"[Store]\" access=\"custom\" rollupPolicy=\""
+ policy
+ "\">\n"
+ " <MemberGrant member=\"[Store].[USA].[CA]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[USA].[CA].[San Francisco].[Store 14]\" access=\"none\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>")
.withRole("Role1");
testContext.assertExprReturns("[Measures].[Unit Sales]", v1);
testContext.assertExprReturns(
"([Measures].[Unit Sales], [Customers].[USA])",
v1);
testContext.assertExprReturns(
"([Measures].[Unit Sales], [Customers].[USA].[CA])",
v2);
testContext.assertExprReturns(
"([Measures].[Unit Sales], "
+ "[Customers].[USA].[CA], [Store].[USA].[CA])",
v2);
testContext.assertExprReturns(
"([Measures].[Unit Sales], "
+ "[Customers].[USA].[CA], "
+ "[Store].[USA].[CA].[San Diego])",
v3);
}
// todo: performance test where 1 of 1000 children is not visible
public void testUnionRole() {
final TestContext testContext =
TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"Role1\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Customers]\" access=\"custom\" rollupPolicy=\"Partial\">\n"
+ " <MemberGrant member=\"[Customers].[USA].[CA]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Customers].[USA].[CA].[San Francisco].[Gladys Evans]\" access=\"none\"/>\n"
+ " </HierarchyGrant>\n"
+ " <HierarchyGrant hierarchy=\"[Promotion Media]\" access=\"all\"/>\n"
+ " <HierarchyGrant hierarchy=\"[Marital Status]\" access=\"none\"/>\n"
+ " <HierarchyGrant hierarchy=\"[Gender]\" access=\"none\"/>\n"
+ " <HierarchyGrant hierarchy=\"[Store]\" access=\"custom\" rollupPolicy=\"Partial\" topLevel=\"[Store].[Store State]\"/>\n"
+ " </CubeGrant>\n"
+ " <CubeGrant cube=\"Warehouse\" access=\"all\"/>\n"
+ " </SchemaGrant>\n"
+ "</Role>\n"
+ "<Role name=\"Role2\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"none\">\n"
+ " <HierarchyGrant hierarchy=\"[Customers]\" access=\"custom\" rollupPolicy=\"Hidden\">\n"
+ " <MemberGrant member=\"[Customers].[USA]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Customers].[USA].[CA]\" access=\"none\"/>\n"
+ " <MemberGrant member=\"[Customers].[USA].[OR]\" access=\"none\"/>\n"
+ " <MemberGrant member=\"[Customers].[USA].[OR].[Portland]\" access=\"all\"/>\n"
+ " </HierarchyGrant>\n"
+ " <HierarchyGrant hierarchy=\"[Store]\" access=\"all\" rollupPolicy=\"Hidden\"/>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>\n");
Connection connection;
try {
connection = testContext.withRole("Role3,Role2").getConnection();
fail("expected exception, got " + connection);
} catch (RuntimeException e) {
final String message = e.getMessage();
assertTrue(message, message.indexOf("Role 'Role3' not found") >= 0);
}
try {
connection = testContext.withRole("Role1,Role3").getConnection();
fail("expected exception, got " + connection);
} catch (RuntimeException e) {
final String message = e.getMessage();
assertTrue(message, message.indexOf("Role 'Role3' not found") >= 0);
}
connection = testContext.withRole("Role1,Role2").getConnection();
// Cube access:
// Both can see [Sales]
// Role1 only see [Warehouse]
// Neither can see [Warehouse and Sales]
assertCubeAccess(connection, Access.ALL, "Sales");
assertCubeAccess(connection, Access.ALL, "Warehouse");
assertCubeAccess(connection, Access.NONE, "Warehouse and Sales");
// Hierarchy access:
// Both can see [Customers] with Custom access
// Both can see [Store], Role1 with Custom access, Role2 with All access
// Role1 can see [Promotion Media], Role2 cannot
// Neither can see [Marital Status]
assertHierarchyAccess(
connection, Access.CUSTOM, "Sales", "[Customers]");
assertHierarchyAccess(
connection, Access.ALL, "Sales", "[Store]");
assertHierarchyAccess(
connection, Access.ALL, "Sales", "[Promotion Media]");
assertHierarchyAccess(
connection, Access.NONE, "Sales", "[Marital Status]");
// Rollup policy is the greater of Role1's partian and Role2's hidden
final Role.HierarchyAccess hierarchyAccess =
getHierarchyAccess(connection, "Sales", "[Store]");
assertEquals(
Role.RollupPolicy.PARTIAL,
hierarchyAccess.getRollupPolicy());
// One of the roles is restricting the levels, so we
// expect only the levels from 2 to 4 to be available.
assertEquals(2, hierarchyAccess.getTopLevelDepth());
assertEquals(4, hierarchyAccess.getBottomLevelDepth());
// Member access:
// both can see [USA]
assertMemberAccess(connection, Access.CUSTOM, "[Customers].[USA]");
// Role1 can see [CA], Role2 cannot
assertMemberAccess(connection, Access.CUSTOM, "[Customers].[USA].[CA]");
// Role1 cannoy see [USA].[OR].[Portland], Role2 can
assertMemberAccess(
connection, Access.ALL, "[Customers].[USA].[OR].[Portland]");
// Role1 cannot see [USA].[OR], Role2 can see it by virtue of [Portland]
assertMemberAccess(
connection, Access.CUSTOM, "[Customers].[USA].[OR]");
// Neither can see Beaverton
assertMemberAccess(
connection, Access.NONE, "[Customers].[USA].[OR].[Beaverton]");
// Rollup policy
String mdx = "select Hierarchize(\n"
+ "{[Customers].[USA].Children,\n"
+ " [Customers].[USA].[OR].Children}) on 0\n"
+ "from [Sales]";
testContext.assertQueryReturns(
mdx,
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Customers].[USA].[CA]}\n"
+ "{[Customers].[USA].[OR]}\n"
+ "{[Customers].[USA].[OR].[Albany]}\n"
+ "{[Customers].[USA].[OR].[Beaverton]}\n"
+ "{[Customers].[USA].[OR].[Corvallis]}\n"
+ "{[Customers].[USA].[OR].[Lake Oswego]}\n"
+ "{[Customers].[USA].[OR].[Lebanon]}\n"
+ "{[Customers].[USA].[OR].[Milwaukie]}\n"
+ "{[Customers].[USA].[OR].[Oregon City]}\n"
+ "{[Customers].[USA].[OR].[Portland]}\n"
+ "{[Customers].[USA].[OR].[Salem]}\n"
+ "{[Customers].[USA].[OR].[W. Linn]}\n"
+ "{[Customers].[USA].[OR].[Woodburn]}\n"
+ "{[Customers].[USA].[WA]}\n"
+ "Row #0: 74,748\n"
+ "Row #0: 67,659\n"
+ "Row #0: 6,806\n"
+ "Row #0: 4,558\n"
+ "Row #0: 9,539\n"
+ "Row #0: 4,910\n"
+ "Row #0: 9,596\n"
+ "Row #0: 5,145\n"
+ "Row #0: 3,708\n"
+ "Row #0: 3,583\n"
+ "Row #0: 7,678\n"
+ "Row #0: 4,175\n"
+ "Row #0: 7,961\n"
+ "Row #0: 124,366\n");
testContext.withRole("Role1").assertQueryThrows(
mdx,
"MDX object '[Customers].[USA].[OR]' not found in cube 'Sales'");
testContext.withRole("Role2").assertQueryThrows(
mdx,
"MDX cube 'Sales' not found");
// Compared to above:
// a. cities in Oregon are missing besides Portland
// b. total for Oregon = total for Portland
testContext.withRole("Role1,Role2").assertQueryReturns(
mdx,
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Customers].[USA].[CA]}\n"
+ "{[Customers].[USA].[OR]}\n"
+ "{[Customers].[USA].[OR].[Portland]}\n"
+ "{[Customers].[USA].[WA]}\n"
+ "Row #0: 74,742\n"
+ "Row #0: 3,583\n"
+ "Row #0: 3,583\n"
+ "Row #0: 124,366\n");
checkQuery(testContext.withRole("Role1,Role2"), mdx);
}
/**
* This is a test for
* <a href="http://jira.pentaho.com/browse/MONDRIAN-1384">MONDRIAN-1384</a>
*/
public void testUnionRoleHasInaccessibleDescendants() throws Exception {
final TestContext testContext =
TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"Role1\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " </SchemaGrant>\n"
+ "</Role>\n"
+ "<Role name=\"Role2\">\n"
+ " <SchemaGrant access=\"all\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Customers]\" access=\"custom\" rollupPolicy=\"partial\">\n"
+ " <MemberGrant member=\"[Customers].[USA].[OR]\" access=\"all\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>\n");
final Connection connection =
testContext.withRole("Role1,Role2").getConnection();
final Cube cube =
connection.getSchema()
.lookupCube("Sales", true);
final HierarchyAccess accessDetails =
connection.getRole().getAccessDetails(
cube.lookupHierarchy(
new Id.NameSegment("Customers", Id.Quoting.UNQUOTED),
false));
final SchemaReader scr =
cube.getSchemaReader(null).withLocus();
assertEquals(
true,
accessDetails.hasInaccessibleDescendants(
scr.getMemberByUniqueName(
Util.parseIdentifier("[Customers].[USA]"),
true)));
}
/**
* This is a test for
* <a href="http://jira.pentaho.com/browse/MONDRIAN-1168">MONDRIAN-1168</a>
* Union of roles would sometimes return levels which should be restricted
* by ACL.
*/
public void testRoleUnionWithLevelRestrictions() throws Exception {
final TestContext testContext =
TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"Role1\">\n"
+ " <SchemaGrant access=\"all\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Customers]\" access=\"custom\" rollupPolicy=\"Partial\" topLevel=\"[Customers].[State Province]\" bottomLevel=\"[Customers].[State Province]\">\n"
+ " <MemberGrant member=\"[Customers].[USA].[CA]\" access=\"all\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>\n"
+ "<Role name=\"Role2\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " </SchemaGrant>\n"
+ "</Role>\n").withRole("Role1,Role2");
testContext.assertQueryReturns(
"select {[Customers].[State Province].Members} on columns from [Sales]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Customers].[USA].[CA]}\n"
+ "Row #0: 74,748\n");
testContext.assertQueryReturns(
"select {[Customers].[Country].Members} on columns from [Sales]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n");
SchemaReader reader =
testContext.getConnection().getSchemaReader().withLocus();
Cube cube = null;
for (Cube c : reader.getCubes()) {
if (c.getName().equals("Sales")) {
cube = c;
}
}
assertNotNull(cube);
reader =
cube.getSchemaReader(testContext.getConnection().getRole());
final List<Dimension> dimensions =
reader.getCubeDimensions(cube);
Dimension dimension = null;
for (Dimension dim : dimensions) {
if (dim.getName().equals("Customers")) {
dimension = dim;
}
}
assertNotNull(dimension);
Hierarchy hierarchy =
reader.getDimensionHierarchies(dimension).get(0);
assertNotNull(hierarchy);
final List<Level> levels =
reader.getHierarchyLevels(hierarchy);
// Do some tests
assertEquals(1, levels.size());
assertEquals(
2,
testContext.getConnection()
.getRole().getAccessDetails(hierarchy)
.getBottomLevelDepth());
assertEquals(
2,
testContext.getConnection()
.getRole().getAccessDetails(hierarchy)
.getTopLevelDepth());
}
/**
* Test to verify that non empty crossjoins enforce role access.
* Testcase for bug <a href="http://jira.pentaho.com/browse/MONDRIAN-369">
* MONDRIAN-369, "Non Empty Crossjoin fails to enforce role access".
*/
public void testNonEmptyAccess() {
final TestContext testContext =
TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"Role1\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Product]\" access=\"custom\">\n"
+ " <MemberGrant member=\"[Product].[Drink]\" access=\"all\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>")
.withRole("Role1");
// regular crossjoin returns the correct list of product children
final String expected =
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Gender].[All Gender], [Product].[Drink]}\n"
+ "Row #0: 24,597\n";
final String mdx =
"select {[Measures].[Unit Sales]} ON COLUMNS, "
+ " Crossjoin({[Gender].[All Gender]}, "
+ "[Product].Children) ON ROWS "
+ "from [Sales]";
testContext.assertQueryReturns(mdx, expected);
checkQuery(testContext, mdx);
// with bug MONDRIAN-397, non empty crossjoin did not return the correct
// list
final String mdx2 =
"select {[Measures].[Unit Sales]} ON COLUMNS, "
+ "NON EMPTY Crossjoin({[Gender].[All Gender]}, "
+ "[Product].[All Products].Children) ON ROWS "
+ "from [Sales]";
testContext.assertQueryReturns(mdx2, expected);
checkQuery(testContext, mdx2);
}
public void testNonEmptyAccessLevelMembers() {
final TestContext testContext = TestContext.instance().create(
null,
null,
null,
null,
null,
"<Role name=\"Role1\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Product]\" access=\"custom\">\n"
+ " <MemberGrant member=\"[Product].[Drink]\" access=\"all\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>").withRole("Role1");
// <Level>.members inside regular crossjoin returns the correct list of
// product members
final String expected =
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Gender].[All Gender], [Product].[Drink]}\n"
+ "Row #0: 24,597\n";
final String mdx =
"select {[Measures].[Unit Sales]} ON COLUMNS, "
+ " Crossjoin({[Gender].[All Gender]}, "
+ "[Product].[Product Family].Members) ON ROWS "
+ "from [Sales]";
testContext.assertQueryReturns(mdx, expected);
checkQuery(testContext, mdx);
// with bug MONDRIAN-397, <Level>.members inside non empty crossjoin did
// not return the correct list
final String mdx2 =
"select {[Measures].[Unit Sales]} ON COLUMNS, "
+ "NON EMPTY Crossjoin({[Gender].[All Gender]}, "
+ "[Product].[Product Family].Members) ON ROWS "
+ "from [Sales]";
testContext.assertQueryReturns(mdx2, expected);
checkQuery(testContext, mdx2);
}
/**
* Testcase for bug <a href="http://jira.pentaho.com/browse/MONDRIAN-406">
* MONDRIAN-406, "Rollup policy doesn't work for members
* that are implicitly visible"</a>.
*/
public void testGoodman() {
final String query = "select {[Measures].[Unit Sales]} ON COLUMNS,\n"
+ "Hierarchize(Union(Union(Union({[Store].[All Stores]},"
+ " [Store].[All Stores].Children),"
+ " [Store].[All Stores].[USA].Children),"
+ " [Store].[All Stores].[USA].[CA].Children)) ON ROWS\n"
+ "from [Sales]\n"
+ "where [Time].[1997]";
// Note that total for [Store].[All Stores] and [Store].[USA] is sum
// of visible children [Store].[CA] and [Store].[OR].[Portland].
final TestContext testContext =
goodmanContext(Role.RollupPolicy.PARTIAL);
testContext.assertQueryReturns(
query,
"Axis #0:\n"
+ "{[Time].[1997]}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Store].[All Stores]}\n"
+ "{[Store].[USA]}\n"
+ "{[Store].[USA].[CA]}\n"
+ "{[Store].[USA].[CA].[Alameda]}\n"
+ "{[Store].[USA].[CA].[Beverly Hills]}\n"
+ "{[Store].[USA].[CA].[Los Angeles]}\n"
+ "{[Store].[USA].[CA].[San Diego]}\n"
+ "{[Store].[USA].[CA].[San Francisco]}\n"
+ "{[Store].[USA].[OR]}\n"
+ "Row #0: 100,827\n"
+ "Row #1: 100,827\n"
+ "Row #2: 74,748\n"
+ "Row #3: \n"
+ "Row #4: 21,333\n"
+ "Row #5: 25,663\n"
+ "Row #6: 25,635\n"
+ "Row #7: 2,117\n"
+ "Row #8: 26,079\n");
goodmanContext(Role.RollupPolicy.FULL).assertQueryReturns(
query,
"Axis #0:\n"
+ "{[Time].[1997]}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Store].[All Stores]}\n"
+ "{[Store].[USA]}\n"
+ "{[Store].[USA].[CA]}\n"
+ "{[Store].[USA].[CA].[Alameda]}\n"
+ "{[Store].[USA].[CA].[Beverly Hills]}\n"
+ "{[Store].[USA].[CA].[Los Angeles]}\n"
+ "{[Store].[USA].[CA].[San Diego]}\n"
+ "{[Store].[USA].[CA].[San Francisco]}\n"
+ "{[Store].[USA].[OR]}\n"
+ "Row #0: 266,773\n"
+ "Row #1: 266,773\n"
+ "Row #2: 74,748\n"
+ "Row #3: \n"
+ "Row #4: 21,333\n"
+ "Row #5: 25,663\n"
+ "Row #6: 25,635\n"
+ "Row #7: 2,117\n"
+ "Row #8: 67,659\n");
goodmanContext(Role.RollupPolicy.HIDDEN).assertQueryReturns(
query,
"Axis #0:\n"
+ "{[Time].[1997]}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Store].[All Stores]}\n"
+ "{[Store].[USA]}\n"
+ "{[Store].[USA].[CA]}\n"
+ "{[Store].[USA].[CA].[Alameda]}\n"
+ "{[Store].[USA].[CA].[Beverly Hills]}\n"
+ "{[Store].[USA].[CA].[Los Angeles]}\n"
+ "{[Store].[USA].[CA].[San Diego]}\n"
+ "{[Store].[USA].[CA].[San Francisco]}\n"
+ "{[Store].[USA].[OR]}\n"
+ "Row #0: \n"
+ "Row #1: \n"
+ "Row #2: 74,748\n"
+ "Row #3: \n"
+ "Row #4: 21,333\n"
+ "Row #5: 25,663\n"
+ "Row #6: 25,635\n"
+ "Row #7: 2,117\n"
+ "Row #8: \n");
checkQuery(testContext, query);
}
private static TestContext goodmanContext(final Role.RollupPolicy policy) {
return
TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"California manager\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Store]\" rollupPolicy=\""
+ policy.name().toLowerCase()
+ "\" access=\"custom\">\n"
+ " <MemberGrant member=\"[Store].[USA].[CA]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[USA].[OR].[Portland]\" access=\"all\"/>\n"
+ " </HierarchyGrant>"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>")
.withRole("California manager");
}
/**
* Test case for bug <a href="http://jira.pentaho.com/browse/MONDRIAN-402">
* MONDRIAN-402, "Bug in RolapCubeHierarchy.hashCode() ?"</a>.
* Access-control elements for hierarchies with
* same name in different cubes could not be distinguished.
*/
public void testBugMondrian402() {
final TestContext testContext =
TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"California manager\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Store]\" access=\"none\" />\n"
+ " </CubeGrant>\n"
+ " <CubeGrant cube=\"Sales Ragged\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Store]\" access=\"custom\" />\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>")
.withRole("California manager");
assertHierarchyAccess(
testContext.getConnection(), Access.NONE, "Sales", "Store");
assertHierarchyAccess(
testContext.getConnection(),
Access.CUSTOM,
"Sales Ragged",
"Store");
}
public void testPartialRollupParentChildHierarchy() {
final TestContext testContext = TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"Buggy Role\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"HR\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Employees]\" access=\"custom\"\n"
+ " rollupPolicy=\"partial\">\n"
+ " <MemberGrant\n"
+ " member=\"[Employees].[All Employees].[Sheri Nowmer].[Darren Stanz]\"\n"
+ " access=\"all\"/>\n"
+ " </HierarchyGrant>\n"
+ " <HierarchyGrant hierarchy=\"[Store]\" access=\"custom\"\n"
+ " rollupPolicy=\"partial\">\n"
+ " <MemberGrant member=\"[Store].[All Stores].[USA].[CA]\" access=\"all\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>")
.withRole("Buggy Role");
final String mdx = "select\n"
+ " {[Measures].[Number of Employees]} on columns,\n"
+ " {[Store]} on rows\n"
+ "from HR";
testContext.assertQueryReturns(
mdx,
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[Number of Employees]}\n"
+ "Axis #2:\n"
+ "{[Store].[All Stores]}\n"
+ "Row #0: 1\n");
checkQuery(testContext, mdx);
final String mdx2 = "select\n"
+ " {[Measures].[Number of Employees]} on columns,\n"
+ " {[Employees]} on rows\n"
+ "from HR";
testContext.assertQueryReturns(
mdx2,
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[Number of Employees]}\n"
+ "Axis #2:\n"
+ "{[Employees].[All Employees]}\n"
+ "Row #0: 1\n");
checkQuery(testContext, mdx2);
}
public void testParentChildUserDefinedRole()
{
TestContext testContext = getTestContext().withCube("HR");
final Connection connection = testContext.getConnection();
final Role savedRole = connection.getRole();
try {
// Run queries as top-level employee.
connection.setRole(
new PeopleRole(
savedRole, connection.getSchema(), "Sheri Nowmer"));
testContext.assertExprReturns(
"[Employees].Members.Count",
"1,156");
// Level 2 employee
connection.setRole(
new PeopleRole(
savedRole, connection.getSchema(), "Derrick Whelply"));
testContext.assertExprReturns(
"[Employees].Members.Count",
"605");
testContext.assertAxisReturns(
"Head([Employees].Members, 4),"
+ "Tail([Employees].Members, 2)",
"[Employees].[All Employees]\n"
+ "[Employees].[Sheri Nowmer]\n"
+ "[Employees].[Sheri Nowmer].[Derrick Whelply]\n"
+ "[Employees].[Sheri Nowmer].[Derrick Whelply].[Beverly Baker]\n"
+ "[Employees].[Sheri Nowmer].[Derrick Whelply].[Laurie Borges].[Ed Young].[Gregory Whiting].[Merrill Steel]\n"
+ "[Employees].[Sheri Nowmer].[Derrick Whelply].[Laurie Borges].[Ed Young].[Gregory Whiting].[Melissa Marple]");
// Leaf employee
connection.setRole(
new PeopleRole(
savedRole, connection.getSchema(), "Ann Weyerhaeuser"));
testContext.assertExprReturns(
"[Employees].Members.Count",
"7");
testContext.assertAxisReturns(
"[Employees].Members",
"[Employees].[All Employees]\n"
+ "[Employees].[Sheri Nowmer]\n"
+ "[Employees].[Sheri Nowmer].[Derrick Whelply]\n"
+ "[Employees].[Sheri Nowmer].[Derrick Whelply].[Laurie Borges]\n"
+ "[Employees].[Sheri Nowmer].[Derrick Whelply].[Laurie Borges].[Cody Goldey]\n"
+ "[Employees].[Sheri Nowmer].[Derrick Whelply].[Laurie Borges].[Cody Goldey].[Shanay Steelman]\n"
+ "[Employees].[Sheri Nowmer].[Derrick Whelply].[Laurie Borges].[Cody Goldey].[Shanay Steelman].[Ann Weyerhaeuser]");
} finally {
connection.setRole(savedRole);
}
}
/**
* Test case for
* <a href="http://jira.pentaho.com/browse/BISERVER-1574">BISERVER-1574,
* "Cube role rollupPolicy='partial' failure"</a>. The problem was a
* NullPointerException in
* {@link SchemaReader#getMemberParent(mondrian.olap.Member)} when called
* on a members returned in a result set. JPivot calls that method but
* Mondrian normally does not.
*/
public void testBugBiserver1574() {
final TestContext testContext =
TestContext.instance().create(
null, null, null, null, null, BiServer1574Role1)
.withRole("role1");
final String mdx =
"select {([Measures].[Store Invoice], [Store Size in SQFT].[All Store Size in SQFTs])} ON COLUMNS,\n"
+ " {[Warehouse].[All Warehouses]} ON ROWS\n"
+ "from [Warehouse]";
checkQuery(testContext, mdx);
testContext.assertQueryReturns(
mdx,
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[Store Invoice], [Store Size in SQFT].[All Store Size in SQFTs]}\n"
+ "Axis #2:\n"
+ "{[Warehouse].[All Warehouses]}\n"
+ "Row #0: 4,042.96\n");
}
/**
* Testcase for bug <a href="http://jira.pentaho.com/browse/MONDRIAN-435">
* MONDRIAN-435, "Internal error in HierarchizeArrayComparator"</a>. Occurs
* when apply Hierarchize function to tuples on a hierarchy with
* partial-rollup.
*/
public void testBugMondrian435() {
final TestContext testContext =
TestContext.instance().create(
null, null, null, null, null, BiServer1574Role1)
.withRole("role1");
// minimal testcase
testContext.assertQueryReturns(
"select hierarchize("
+ " crossjoin({[Store Size in SQFT], [Store Size in SQFT].Children}, {[Product]})"
+ ") on 0,"
+ "[Store Type].Members on 1 from [Warehouse]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Store Size in SQFT].[All Store Size in SQFTs], [Product].[All Products]}\n"
+ "{[Store Size in SQFT].[20319], [Product].[All Products]}\n"
+ "Axis #2:\n"
+ "{[Store Type].[All Store Types]}\n"
+ "{[Store Type].[Supermarket]}\n"
+ "Row #0: 4,042.96\n"
+ "Row #0: 4,042.96\n"
+ "Row #1: 4,042.96\n"
+ "Row #1: 4,042.96\n");
// explicit tuples, not crossjoin
testContext.assertQueryReturns(
"select hierarchize("
+ " { ([Store Size in SQFT], [Product]),\n"
+ " ([Store Size in SQFT].[20319], [Product].[Food]),\n"
+ " ([Store Size in SQFT], [Product].[Drink].[Dairy]),\n"
+ " ([Store Size in SQFT].[20319], [Product]) }\n"
+ ") on 0,"
+ "[Store Type].Members on 1 from [Warehouse]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Store Size in SQFT].[All Store Size in SQFTs], [Product].[All Products]}\n"
+ "{[Store Size in SQFT].[All Store Size in SQFTs], [Product].[Drink].[Dairy]}\n"
+ "{[Store Size in SQFT].[20319], [Product].[All Products]}\n"
+ "{[Store Size in SQFT].[20319], [Product].[Food]}\n"
+ "Axis #2:\n"
+ "{[Store Type].[All Store Types]}\n"
+ "{[Store Type].[Supermarket]}\n"
+ "Row #0: 4,042.96\n"
+ "Row #0: 82.454\n"
+ "Row #0: 4,042.96\n"
+ "Row #0: 2,696.758\n"
+ "Row #1: 4,042.96\n"
+ "Row #1: 82.454\n"
+ "Row #1: 4,042.96\n"
+ "Row #1: 2,696.758\n");
// extended testcase; note that [Store Size in SQFT].Parent is null,
// so disappears
testContext.assertQueryReturns(
"select non empty hierarchize("
+ "union("
+ " union("
+ " crossjoin({[Store Size in SQFT]}, {[Product]}),"
+ " crossjoin({[Store Size in SQFT], [Store Size in SQFT].Children}, {[Product]}),"
+ " all),"
+ " union("
+ " crossjoin({[Store Size in SQFT].Parent}, {[Product].[Drink]}),"
+ " crossjoin({[Store Size in SQFT].Children}, {[Product].[Food]}),"
+ " all),"
+ " all)) on 0,"
+ "[Store Type].Members on 1 from [Warehouse]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Store Size in SQFT].[All Store Size in SQFTs], [Product].[All Products]}\n"
+ "{[Store Size in SQFT].[All Store Size in SQFTs], [Product].[All Products]}\n"
+ "{[Store Size in SQFT].[20319], [Product].[All Products]}\n"
+ "{[Store Size in SQFT].[20319], [Product].[Food]}\n"
+ "Axis #2:\n"
+ "{[Store Type].[All Store Types]}\n"
+ "{[Store Type].[Supermarket]}\n"
+ "Row #0: 4,042.96\n"
+ "Row #0: 4,042.96\n"
+ "Row #0: 4,042.96\n"
+ "Row #0: 2,696.758\n"
+ "Row #1: 4,042.96\n"
+ "Row #1: 4,042.96\n"
+ "Row #1: 4,042.96\n"
+ "Row #1: 2,696.758\n");
testContext.assertQueryReturns(
"select Hierarchize(\n"
+ " CrossJoin\n("
+ " CrossJoin(\n"
+ " {[Product].[All Products], "
+ " [Product].[Food],\n"
+ " [Product].[Food].[Eggs],\n"
+ " [Product].[Drink].[Dairy]},\n"
+ " [Store Type].MEMBERS),\n"
+ " [Store Size in SQFT].MEMBERS),\n"
+ " PRE) on 0\n"
+ "from [Warehouse]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Product].[All Products], [Store Type].[All Store Types], [Store Size in SQFT].[All Store Size in SQFTs]}\n"
+ "{[Product].[All Products], [Store Type].[All Store Types], [Store Size in SQFT].[20319]}\n"
+ "{[Product].[All Products], [Store Type].[Supermarket], [Store Size in SQFT].[All Store Size in SQFTs]}\n"
+ "{[Product].[All Products], [Store Type].[Supermarket], [Store Size in SQFT].[20319]}\n"
+ "{[Product].[Drink].[Dairy], [Store Type].[All Store Types], [Store Size in SQFT].[All Store Size in SQFTs]}\n"
+ "{[Product].[Drink].[Dairy], [Store Type].[All Store Types], [Store Size in SQFT].[20319]}\n"
+ "{[Product].[Drink].[Dairy], [Store Type].[Supermarket], [Store Size in SQFT].[All Store Size in SQFTs]}\n"
+ "{[Product].[Drink].[Dairy], [Store Type].[Supermarket], [Store Size in SQFT].[20319]}\n"
+ "{[Product].[Food], [Store Type].[All Store Types], [Store Size in SQFT].[All Store Size in SQFTs]}\n"
+ "{[Product].[Food], [Store Type].[All Store Types], [Store Size in SQFT].[20319]}\n"
+ "{[Product].[Food], [Store Type].[Supermarket], [Store Size in SQFT].[All Store Size in SQFTs]}\n"
+ "{[Product].[Food], [Store Type].[Supermarket], [Store Size in SQFT].[20319]}\n"
+ "{[Product].[Food].[Eggs], [Store Type].[All Store Types], [Store Size in SQFT].[All Store Size in SQFTs]}\n"
+ "{[Product].[Food].[Eggs], [Store Type].[All Store Types], [Store Size in SQFT].[20319]}\n"
+ "{[Product].[Food].[Eggs], [Store Type].[Supermarket], [Store Size in SQFT].[All Store Size in SQFTs]}\n"
+ "{[Product].[Food].[Eggs], [Store Type].[Supermarket], [Store Size in SQFT].[20319]}\n"
+ "Row #0: 4,042.96\n"
+ "Row #0: 4,042.96\n"
+ "Row #0: 4,042.96\n"
+ "Row #0: 4,042.96\n"
+ "Row #0: 82.454\n"
+ "Row #0: 82.454\n"
+ "Row #0: 82.454\n"
+ "Row #0: 82.454\n"
+ "Row #0: 2,696.758\n"
+ "Row #0: 2,696.758\n"
+ "Row #0: 2,696.758\n"
+ "Row #0: 2,696.758\n"
+ "Row #0: \n"
+ "Row #0: \n"
+ "Row #0: \n"
+ "Row #0: \n");
}
/**
* Testcase for bug <a href="http://jira.pentaho.com/browse/MONDRIAN-436">
* MONDRIAN-436, "SubstitutingMemberReader.getMemberBuilder gives
* UnsupportedOperationException"</a>.
*/
public void testBugMondrian436() {
propSaver.set(propSaver.properties.EnableNativeCrossJoin, true);
propSaver.set(propSaver.properties.EnableNativeFilter, true);
propSaver.set(propSaver.properties.EnableNativeNonEmpty, true);
propSaver.set(propSaver.properties.EnableNativeTopCount, true);
propSaver.set(propSaver.properties.ExpandNonNative, true);
// Run with native enabled, then with whatever properties are set for
// this test run.
checkBugMondrian436();
propSaver.reset();
checkBugMondrian436();
}
private void checkBugMondrian436() {
final TestContext testContext =
TestContext.instance().create(
null, null, null, null, null, BiServer1574Role1)
.withRole("role1");
testContext.assertQueryReturns(
"select non empty {[Measures].[Units Ordered],\n"
+ " [Measures].[Units Shipped]} on 0,\n"
+ "non empty hierarchize(\n"
+ " union(\n"
+ " crossjoin(\n"
+ " {[Store Size in SQFT]},\n"
+ " {[Product].[Drink],\n"
+ " [Product].[Food],\n"
+ " [Product].[Drink].[Dairy]}),\n"
+ " crossjoin(\n"
+ " {[Store Size in SQFT].[20319]},\n"
+ " {[Product].Children}))) on 1\n"
+ "from [Warehouse]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[Units Ordered]}\n"
+ "{[Measures].[Units Shipped]}\n"
+ "Axis #2:\n"
+ "{[Store Size in SQFT].[All Store Size in SQFTs], [Product].[Drink]}\n"
+ "{[Store Size in SQFT].[All Store Size in SQFTs], [Product].[Drink].[Dairy]}\n"
+ "{[Store Size in SQFT].[All Store Size in SQFTs], [Product].[Food]}\n"
+ "{[Store Size in SQFT].[20319], [Product].[Drink]}\n"
+ "{[Store Size in SQFT].[20319], [Product].[Food]}\n"
+ "{[Store Size in SQFT].[20319], [Product].[Non-Consumable]}\n"
+ "Row #0: 865.0\n"
+ "Row #0: 767.0\n"
+ "Row #1: 195.0\n"
+ "Row #1: 182.0\n"
+ "Row #2: 6065.0\n"
+ "Row #2: 5723.0\n"
+ "Row #3: 865.0\n"
+ "Row #3: 767.0\n"
+ "Row #4: 6065.0\n"
+ "Row #4: 5723.0\n"
+ "Row #5: 2179.0\n"
+ "Row #5: 2025.0\n");
}
/**
* Tests that hierarchy-level access control works on a virtual cube.
* See bug
* <a href="http://jira.pentaho.com/browse/MONDRIAN-456">
* MONDRIAN-456, "Roles and virtual cubes"</a>.
*/
public void testVirtualCube() {
TestContext testContext = TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"VCRole\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Warehouse and Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Store]\" access=\"custom\">\n"
+ " <MemberGrant member=\"[Store].[USA].[CA]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[USA].[CA].[Los Angeles]\" access=\"none\"/>\n"
+ " </HierarchyGrant>\n"
+ " <HierarchyGrant hierarchy=\"[Customers]\" access=\"custom\"\n"
+ " topLevel=\"[Customers].[State Province]\" bottomLevel=\"[Customers].[City]\">\n"
+ " <MemberGrant member=\"[Customers].[USA].[CA]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Customers].[USA].[CA].[Los Angeles]\" access=\"none\"/>\n"
+ " </HierarchyGrant>\n"
+ " <HierarchyGrant hierarchy=\"[Gender]\" access=\"none\"/>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>").withRole("VCRole");
testContext.assertQueryReturns(
"select [Store].Members on 0 from [Warehouse and Sales]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Store].[All Stores]}\n"
+ "{[Store].[USA]}\n"
+ "{[Store].[USA].[CA]}\n"
+ "{[Store].[USA].[CA].[Alameda]}\n"
+ "{[Store].[USA].[CA].[Alameda].[HQ]}\n"
+ "{[Store].[USA].[CA].[Beverly Hills]}\n"
+ "{[Store].[USA].[CA].[Beverly Hills].[Store 6]}\n"
+ "{[Store].[USA].[CA].[San Diego]}\n"
+ "{[Store].[USA].[CA].[San Diego].[Store 24]}\n"
+ "{[Store].[USA].[CA].[San Francisco]}\n"
+ "{[Store].[USA].[CA].[San Francisco].[Store 14]}\n"
+ "Row #0: 159,167.84\n"
+ "Row #0: 159,167.84\n"
+ "Row #0: 159,167.84\n"
+ "Row #0: \n"
+ "Row #0: \n"
+ "Row #0: 45,750.24\n"
+ "Row #0: 45,750.24\n"
+ "Row #0: 54,431.14\n"
+ "Row #0: 54,431.14\n"
+ "Row #0: 4,441.18\n"
+ "Row #0: 4,441.18\n");
}
/**
* this tests the fix for
* http://jira.pentaho.com/browse/BISERVER-2491
* rollupPolicy=partial and queries to upper members don't work
*/
public void testBugBiserver2491() {
final String BiServer2491Role2 =
"<Role name=\"role2\">"
+ " <SchemaGrant access=\"none\">"
+ " <CubeGrant cube=\"Sales\" access=\"all\">"
+ " <HierarchyGrant hierarchy=\"[Store]\" access=\"custom\" rollupPolicy=\"partial\">"
+ " <MemberGrant member=\"[Store].[USA].[CA]\" access=\"all\"/>"
+ " <MemberGrant member=\"[Store].[USA].[CA].[Los Angeles]\" access=\"none\"/>"
+ " </HierarchyGrant>"
+ " </CubeGrant>"
+ " </SchemaGrant>"
+ "</Role>";
final TestContext testContext =
TestContext.instance().create(
null, null, null, null, null, BiServer2491Role2)
.withRole("role2");
final String firstBrokenMdx =
"select [Measures].[Unit Sales] ON COLUMNS, {[Store].[Store Country].Members} ON ROWS from [Sales]";
checkQuery(testContext, firstBrokenMdx);
testContext.assertQueryReturns(
firstBrokenMdx,
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Store].[USA]}\n"
+ "Row #0: 49,085\n");
final String secondBrokenMdx =
"select [Measures].[Unit Sales] ON COLUMNS, "
+ "Descendants([Store],[Store].[Store Name]) ON ROWS from [Sales]";
checkQuery(testContext, secondBrokenMdx);
testContext.assertQueryReturns(
secondBrokenMdx,
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Store].[USA].[CA].[Alameda].[HQ]}\n"
+ "{[Store].[USA].[CA].[Beverly Hills].[Store 6]}\n"
+ "{[Store].[USA].[CA].[San Diego].[Store 24]}\n"
+ "{[Store].[USA].[CA].[San Francisco].[Store 14]}\n"
+ "Row #0: \n"
+ "Row #1: 21,333\n"
+ "Row #2: 25,635\n"
+ "Row #3: 2,117\n");
}
/**
* Test case for bug
* <a href="http://jira.pentaho.com/browse/MONDRIAN-622">MONDRIAN-622,
* "Poor performance with large union role"</a>.
*/
public void testBugMondrian622() {
StringBuilder buf = new StringBuilder();
StringBuilder buf2 = new StringBuilder();
final String cubeName = "Sales with multiple customers";
final Result result = TestContext.instance().executeQuery(
"select [Customers].[City].Members on 0 from [Sales]");
for (Position position : result.getAxes()[0].getPositions()) {
Member member = position.get(0);
String name = member.getParentMember().getName()
+ "."
+ member.getName(); // e.g. "BC.Burnaby"
// e.g. "[Customers].[State Province].[BC].[Burnaby]"
String uniqueName =
Util.replace(member.getUniqueName(), ".[All Customers]", "");
// e.g. "[Customers2].[State Province].[BC].[Burnaby]"
String uniqueName2 =
Util.replace(uniqueName, "Customers", "Customers2");
// e.g. "[Customers3].[State Province].[BC].[Burnaby]"
String uniqueName3 =
Util.replace(uniqueName, "Customers", "Customers3");
buf.append(
" <Role name=\"" + name + "\"> \n"
+ " <SchemaGrant access=\"none\"> \n"
+ " <CubeGrant access=\"all\" cube=\"" + cubeName
+ "\"> \n"
+ " <HierarchyGrant access=\"custom\" hierarchy=\"[Customers]\" rollupPolicy=\"partial\"> \n"
+ " <MemberGrant access=\"all\" member=\""
+ uniqueName + "\"/> \n"
+ " </HierarchyGrant> \n"
+ " <HierarchyGrant access=\"custom\" hierarchy=\"[Customers2]\" rollupPolicy=\"partial\"> \n"
+ " <MemberGrant access=\"all\" member=\""
+ uniqueName2 + "\"/> \n"
+ " </HierarchyGrant> \n"
+ " <HierarchyGrant access=\"custom\" hierarchy=\"[Customers3]\" rollupPolicy=\"partial\"> \n"
+ " <MemberGrant access=\"all\" member=\""
+ uniqueName3 + "\"/> \n"
+ " </HierarchyGrant> \n"
+ " </CubeGrant> \n"
+ " </SchemaGrant> \n"
+ " </Role> \n");
buf2.append(" <RoleUsage roleName=\"" + name + "\"/>\n");
}
final TestContext testContext = TestContext.instance().create(
" <Dimension name=\"Customers\"> \n"
+ " <Hierarchy hasAll=\"true\" primaryKey=\"customer_id\"> \n"
+ " <Table name=\"customer\"/> \n"
+ " <Level name=\"Country\" column=\"country\" uniqueMembers=\"true\"/> \n"
+ " <Level name=\"State Province\" column=\"state_province\" uniqueMembers=\"true\"/> \n"
+ " <Level name=\"City\" column=\"city\" uniqueMembers=\"false\"/> \n"
+ " <Level name=\"Name\" column=\"customer_id\" type=\"Numeric\" uniqueMembers=\"true\"/> \n"
+ " </Hierarchy> \n"
+ " </Dimension> ",
" <Cube name=\"" + cubeName + "\"> \n"
+ " <Table name=\"sales_fact_1997\"/> \n"
+ " <DimensionUsage name=\"Time\" source=\"Time\" foreignKey=\"time_id\"/> \n"
+ " <DimensionUsage name=\"Product\" source=\"Product\" foreignKey=\"product_id\"/> \n"
+ " <DimensionUsage name=\"Customers\" source=\"Customers\" foreignKey=\"customer_id\"/> \n"
+ " <DimensionUsage name=\"Customers2\" source=\"Customers\" foreignKey=\"customer_id\"/> \n"
+ " <DimensionUsage name=\"Customers3\" source=\"Customers\" foreignKey=\"customer_id\"/> \n"
+ " <Measure name=\"Unit Sales\" column=\"unit_sales\" aggregator=\"sum\" formatString=\"Standard\"/> \n"
+ " </Cube> \n",
null, null, null,
buf.toString()
+ " <Role name=\"Test\"> \n"
+ " <Union>\n"
+ buf2.toString()
+ " </Union>\n"
+ " </Role>\n");
final long t0 = System.currentTimeMillis();
final TestContext testContext1 = testContext.withRole("Test");
testContext1.executeQuery("select from [" + cubeName + "]");
final long t1 = System.currentTimeMillis();
// System.out.println("Elapsed=" + (t1 - t0) + " millis");
// System.out.println(
// "RoleImpl.accessCount=" + RoleImpl.accessCallCount);
// testContext1.executeQuery(
// "select from [Sales with multiple customers]");
// final long t2 = System.currentTimeMillis();
// System.out.println("Elapsed=" + (t2 - t1) + " millis");
// System.out.println("RoleImpl.accessCount=" + RoleImpl.accessCallCount);
}
/**
* Test case for bug
* <a href="http://jira.pentaho.com/browse/MONDRIAN-694">MONDRIAN-694,
* "Incorrect handling of child/parent relationship with hierarchy
* grants"</a>.
*/
public void testBugMondrian694() {
final TestContext testContext =
TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"REG1\"> \n"
+ " <SchemaGrant access=\"none\"> \n"
+ " <CubeGrant cube=\"HR\" access=\"all\"> \n"
+ " <HierarchyGrant hierarchy=\"Employees\" access=\"custom\" rollupPolicy=\"partial\"> \n"
+ " <MemberGrant member=\"[Employees].[All Employees]\" access=\"none\"/>\n"
+ " <MemberGrant member=\"[Employees].[Sheri Nowmer].[Derrick Whelply].[Laurie Borges].[Cody Goldey].[Shanay Steelman].[Steven Betsekas]\" access=\"all\"/> \n"
+ " <MemberGrant member=\"[Employees].[Sheri Nowmer].[Derrick Whelply].[Laurie Borges].[Cody Goldey].[Shanay Steelman].[Arvid Ziegler]\" access=\"all\"/> \n"
+ " <MemberGrant member=\"[Employees].[Sheri Nowmer].[Derrick Whelply].[Laurie Borges].[Cody Goldey].[Shanay Steelman].[Ann Weyerhaeuser]\" access=\"all\"/> \n"
+ " </HierarchyGrant> \n"
+ " </CubeGrant> \n"
+ " </SchemaGrant> \n"
+ "</Role>")
.withRole("REG1");
// With bug MONDRIAN-694 returns 874.80, should return 79.20.
// Test case is minimal: doesn't happen without the Crossjoin, or
// without the NON EMPTY, or with [Employees] as opposed to
// [Employees].[All Employees], or with [Department].[All Departments].
testContext.assertQueryReturns(
"select NON EMPTY {[Measures].[Org Salary]} ON COLUMNS,\n"
+ "NON EMPTY Crossjoin({[Department].[14]}, {[Employees].[All Employees]}) ON ROWS\n"
+ "from [HR]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[Org Salary]}\n"
+ "Axis #2:\n"
+ "{[Department].[14], [Employees].[All Employees]}\n"
+ "Row #0: $97.20\n");
// This query gave the right answer, even with MONDRIAN-694.
testContext.assertQueryReturns(
"select NON EMPTY {[Measures].[Org Salary]} ON COLUMNS, \n"
+ "NON EMPTY Hierarchize(Crossjoin({[Department].[14]}, {[Employees].[All Employees], [Employees].Children})) ON ROWS \n"
+ "from [HR] ",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[Org Salary]}\n"
+ "Axis #2:\n"
+ "{[Department].[14], [Employees].[All Employees]}\n"
+ "{[Department].[14], [Employees].[Sheri Nowmer]}\n"
+ "Row #0: $97.20\n"
+ "Row #1: $97.20\n");
// Original test case, not quite minimal. With MONDRIAN-694, returns
// $874.80 for [All Employees].
testContext.assertQueryReturns(
"select NON EMPTY {[Measures].[Org Salary]} ON COLUMNS, \n"
+ "NON EMPTY Hierarchize(Union(Crossjoin({[Department].[All Departments].[14]}, {[Employees].[All Employees]}), Crossjoin({[Department].[All Departments].[14]}, [Employees].[All Employees].Children))) ON ROWS \n"
+ "from [HR] ",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[Org Salary]}\n"
+ "Axis #2:\n"
+ "{[Department].[14], [Employees].[All Employees]}\n"
+ "{[Department].[14], [Employees].[Sheri Nowmer]}\n"
+ "Row #0: $97.20\n"
+ "Row #1: $97.20\n");
testContext.assertQueryReturns(
"select NON EMPTY {[Measures].[Org Salary]} ON COLUMNS, \n"
+ "NON EMPTY Crossjoin(Hierarchize(Union({[Employees].[All Employees]}, [Employees].[All Employees].Children)), {[Department].[14]}) ON ROWS \n"
+ "from [HR] ",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[Org Salary]}\n"
+ "Axis #2:\n"
+ "{[Employees].[All Employees], [Department].[14]}\n"
+ "{[Employees].[Sheri Nowmer], [Department].[14]}\n"
+ "Row #0: $97.20\n"
+ "Row #1: $97.20\n");
}
/**
* Test case for bug
* <a href="http://jira.pentaho.com/browse/MONDRIAN-722">MONDRIAN-722, "If
* ignoreInvalidMembers=true, should ignore grants with invalid
* members"</a>.
*/
public void testBugMondrian722() {
propSaver.set(
MondrianProperties.instance().IgnoreInvalidMembers,
true);
TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"CTO\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Customers]\" access=\"custom\">\n"
+ " <MemberGrant member=\"[Customers].[USA].[XX]\" access=\"none\"/>\n"
+ " <MemberGrant member=\"[Customers].[USA].[XX].[Yyy Yyyyyyy]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Customers].[USA]\" access=\"none\"/>\n"
+ " <MemberGrant member=\"[Customers].[USA].[CA]\" access=\"none\"/>\n"
+ " <MemberGrant member=\"[Customers].[USA].[CA].[Los Angeles]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Customers].[USA].[CA].[Zzz Zzzz]\" access=\"none\"/>\n"
+ " <MemberGrant member=\"[Customers].[USA].[CA].[San Francisco]\" access=\"all\"/>\n"
+ " </HierarchyGrant>\n"
+ " <HierarchyGrant hierarchy=\"[Gender]\" access=\"none\"/>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>")
.withRole("CTO")
.assertQueryReturns(
"select [Measures] on 0,\n"
+ " Hierarchize(\n"
+ " {[Customers].[USA].Children,\n"
+ " [Customers].[USA].[CA].Children}) on 1\n"
+ "from [Sales]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Customers].[USA].[CA]}\n"
+ "{[Customers].[USA].[CA].[Los Angeles]}\n"
+ "{[Customers].[USA].[CA].[San Francisco]}\n"
+ "Row #0: 74,748\n"
+ "Row #1: 2,009\n"
+ "Row #2: 88\n");
}
/**
* Test case for bug
* <a href="http://jira.pentaho.com/browse/MONDRIAN-746">MONDRIAN-746,
* "Report returns stack trace when turning on subtotals on a hierarchy with
* top level hidden"</a>.
*/
public void testCalcMemberLevel() {
checkCalcMemberLevel(getTestContext());
checkCalcMemberLevel(
TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"Role1\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Store]\" access=\"custom\" rollupPolicy=\"Partial\" topLevel=\"[Store].[Store State]\">\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>\n")
.withRole("Role1"));
}
/**
* Test for bug MONDRIAN-568. Grants on OLAP elements are validated
* by name, thus granting implicit access to all cubes which have
* a dimension with the same name.
*/
public void testBugMondrian568() {
final TestContext testContext =
TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"Role1\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"none\">\n"
+ " <HierarchyGrant hierarchy=\"[Measures]\" access=\"custom\">\n"
+ " <MemberGrant member=\"[Measures].[Unit Sales]\" access=\"all\"/>\n"
+ " </HierarchyGrant>"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>\n"
+ "<Role name=\"Role2\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales Ragged\" access=\"all\"/>\n"
+ " </SchemaGrant>\n"
+ "</Role>");
final TestContext testContextRole1 =
testContext
.withRole("Role1")
.withCube("Sales");
final TestContext testContextRole12 =
testContext
.withRole("Role1,Role2")
.withCube("Sales");
assertMemberAccess(
testContextRole1.getConnection(),
Access.NONE,
"[Measures].[Store Cost]");
assertMemberAccess(
testContextRole12.getConnection(),
Access.NONE,
"[Measures].[Store Cost]");
}
private void checkCalcMemberLevel(TestContext testContext) {
Result result = testContext.executeQuery(
"with member [Store].[USA].[CA].[Foo] as\n"
+ " 1\n"
+ "select {[Measures].[Unit Sales]} on columns,\n"
+ "{[Store].[USA].[CA],\n"
+ " [Store].[USA].[CA].[Los Angeles],\n"
+ " [Store].[USA].[CA].[Foo]} on rows\n"
+ "from [Sales]");
final List<Position> rowPos = result.getAxes()[1].getPositions();
final Member member0 = rowPos.get(0).get(0);
assertEquals("CA", member0.getName());
assertEquals("Store State", member0.getLevel().getName());
final Member member1 = rowPos.get(1).get(0);
assertEquals("Los Angeles", member1.getName());
assertEquals("Store City", member1.getLevel().getName());
final Member member2 = rowPos.get(2).get(0);
assertEquals("Foo", member2.getName());
assertEquals("Store City", member2.getLevel().getName());
}
/**
* Testcase for bug
* <a href="http://jira.pentaho.com/browse/MONDRIAN-935">MONDRIAN-935,
* "no results when some level members in a member grant have no data"</a>.
*/
public void testBugMondrian935() {
final TestContext testContext =
TestContext.instance().create(
null, null, null, null, null,
"<Role name='Role1'>\n"
+ " <SchemaGrant access='none'>\n"
+ " <CubeGrant cube='Sales' access='all'>\n"
+ " <HierarchyGrant hierarchy='[Store Type]' access='custom' rollupPolicy='partial'>\n"
+ " <MemberGrant member='[Store Type].[All Store Types]' access='none'/>\n"
+ " <MemberGrant member='[Store Type].[Supermarket]' access='all'/>\n"
+ " </HierarchyGrant>\n"
+ " <HierarchyGrant hierarchy='[Customers]' access='custom' rollupPolicy='partial' >\n"
+ " <MemberGrant member='[Customers].[All Customers]' access='none'/>\n"
+ " <MemberGrant member='[Customers].[USA].[WA]' access='all'/>\n"
+ " <MemberGrant member='[Customers].[USA].[CA]' access='none'/>\n"
+ " <MemberGrant member='[Customers].[USA].[CA].[Los Angeles]' access='all'/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>\n");
testContext.withRole("Role1").assertQueryReturns(
"select [Measures] on 0,\n"
+ "[Customers].[USA].Children * [Store Type].Children on 1\n"
+ "from [Sales]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Customers].[USA].[CA], [Store Type].[Supermarket]}\n"
+ "{[Customers].[USA].[WA], [Store Type].[Supermarket]}\n"
+ "Row #0: 1,118\n"
+ "Row #1: 73,178\n");
}
public void testDimensionGrant() throws Exception {
final TestContext context = TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"Role1\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"custom\">\n"
+ " <DimensionGrant dimension=\"[Measures]\" access=\"all\" />\n"
+ " <DimensionGrant dimension=\"[Education Level]\" access=\"all\" />\n"
+ " <DimensionGrant dimension=\"[Gender]\" access=\"all\" />\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>\n"
+ "<Role name=\"Role2\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"custom\">\n"
+ " <DimensionGrant dimension=\"[Measures]\" access=\"all\" />\n"
+ " <DimensionGrant dimension=\"[Education Level]\" access=\"all\" />\n"
+ " <DimensionGrant dimension=\"[Customers]\" access=\"none\" />\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>\n"
+ "<Role name=\"Role3\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"custom\">\n"
+ " <DimensionGrant dimension=\"[Education Level]\" access=\"all\" />\n"
+ " <DimensionGrant dimension=\"[Measures]\" access=\"custom\" />\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>\n");
context.withRole("Role1").assertAxisReturns(
"[Education Level].Members",
"[Education Level].[All Education Levels]\n"
+ "[Education Level].[Bachelors Degree]\n"
+ "[Education Level].[Graduate Degree]\n"
+ "[Education Level].[High School Degree]\n"
+ "[Education Level].[Partial College]\n"
+ "[Education Level].[Partial High School]");
context.withRole("Role1").assertAxisThrows(
"[Customers].Members",
"Mondrian Error:Failed to parse query 'select {[Customers].Members} on columns from Sales'");
context.withRole("Role2").assertAxisThrows(
"[Customers].Members",
"Mondrian Error:Failed to parse query 'select {[Customers].Members} on columns from Sales'");
context.withRole("Role1").assertQueryReturns(
"select {[Education Level].Members} on columns, {[Measures].[Unit Sales]} on rows from Sales",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Education Level].[All Education Levels]}\n"
+ "{[Education Level].[Bachelors Degree]}\n"
+ "{[Education Level].[Graduate Degree]}\n"
+ "{[Education Level].[High School Degree]}\n"
+ "{[Education Level].[Partial College]}\n"
+ "{[Education Level].[Partial High School]}\n"
+ "Axis #2:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Row #0: 266,773\n"
+ "Row #0: 68,839\n"
+ "Row #0: 15,570\n"
+ "Row #0: 78,664\n"
+ "Row #0: 24,545\n"
+ "Row #0: 79,155\n");
context.withRole("Role3").assertQueryThrows(
"select {[Education Level].Members} on columns, {[Measures].[Unit Sales]} on rows from Sales",
"Mondrian Error:Failed to parse query 'select {[Education Level].Members} on columns, {[Measures].[Unit Sales]} on rows from Sales'");
}
// ~ Inner classes =========================================================
public static class PeopleRole extends DelegatingRole {
private final String repName;
public PeopleRole(Role role, Schema schema, String repName) {
super(((RoleImpl)role).makeMutableClone());
this.repName = repName;
defineGrantsForUser(schema);
}
private void defineGrantsForUser(Schema schema) {
RoleImpl role = (RoleImpl)this.role;
role.grant(schema, Access.NONE);
Cube cube = schema.lookupCube("HR", true);
role.grant(cube, Access.ALL);
Hierarchy hierarchy = cube.lookupHierarchy(
new Id.NameSegment("Employees"), false);
Level[] levels = hierarchy.getLevels();
Level topLevel = levels[1];
role.grant(hierarchy, Access.CUSTOM, null, null, RollupPolicy.FULL);
role.grant(hierarchy.getAllMember(), Access.NONE);
boolean foundMember = false;
List <Member> members =
schema.getSchemaReader().withLocus()
.getLevelMembers(topLevel, true);
for (Member member : members) {
if (member.getUniqueName().contains("[" + repName + "]")) {
foundMember = true;
role.grant(member, Access.ALL);
}
}
assertTrue(foundMember);
}
}
/**
* This is a test for MONDRIAN-1030. When the top level of a hierarchy
* is not accessible and a partial rollup policy is used, the results would
* be returned as those of the first member of those accessible only.
*
* <p>ie: If a union of roles give access to two two sibling root members
* and the level to which they belong is not included in a query, the
* returned cell data would be that of the first sibling and would exclude
* those of the second.
*
* <p>This is because the RolapEvaluator cannot represent default members
* as multiple members (only a single member is the default member) and
* because the default member is not the 'all member', it adds a constrain
* to the SQL for the first member only.
*
* <p>Currently, Mondrian disguises the root member in the evaluator as a
* RestrictedMemberReader.MultiCardinalityDefaultMember. Later,
* RolapHierarchy.LimitedRollupSubstitutingMemberReader will recognize it
* and use the correct rollup policy on the parent member to generate
* correct SQL.
*/
public void testMondrian1030() throws Exception {
final String mdx1 =
"With\n"
+ "Set [*NATIVE_CJ_SET] as 'NonEmptyCrossJoin([*BASE_MEMBERS_Customers],[*BASE_MEMBERS_Product])'\n"
+ "Set [*SORTED_ROW_AXIS] as 'Order([*CJ_ROW_AXIS],[Customers].CurrentMember.OrderKey,BASC,[Education Level].CurrentMember.OrderKey,BASC)'\n"
+ "Set [*BASE_MEMBERS_Customers] as '[Customers].[City].Members'\n"
+ "Set [*BASE_MEMBERS_Product] as '[Education Level].Members'\n"
+ "Set [*BASE_MEMBERS_Measures] as '{[Measures].[*FORMATTED_MEASURE_0]}'\n"
+ "Set [*CJ_ROW_AXIS] as 'Generate([*NATIVE_CJ_SET], {([Customers].currentMember,[Education Level].currentMember)})'\n"
+ "Set [*CJ_COL_AXIS] as '[*NATIVE_CJ_SET]'\n"
+ "Member [Measures].[*FORMATTED_MEASURE_0] as '[Measures].[Unit Sales]', FORMAT_STRING = '#,###', SOLVE_ORDER=400\n"
+ "Select\n"
+ "[*BASE_MEMBERS_Measures] on columns,\n"
+ "Non Empty [*SORTED_ROW_AXIS] on rows\n"
+ "From [Sales] \n";
final String mdx2 =
"With\n"
+ "Set [*BASE_MEMBERS_Product] as '[Education Level].Members'\n"
+ "Set [*BASE_MEMBERS_Measures] as '{[Measures].[*FORMATTED_MEASURE_0]}'\n"
+ "Member [Measures].[*FORMATTED_MEASURE_0] as '[Measures].[Unit Sales]', FORMAT_STRING = '#,###', SOLVE_ORDER=400\n"
+ "Select\n"
+ "[*BASE_MEMBERS_Measures] on columns,\n"
+ "Non Empty [*BASE_MEMBERS_Product] on rows\n"
+ "From [Sales] \n";
final TestContext context =
getTestContext().create(
null, null, null, null, null,
" <Role name=\"Role1\">\n"
+ " <SchemaGrant access=\"all\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Customers]\" access=\"custom\" topLevel=\"[Customers].[City]\" bottomLevel=\"[Customers].[City]\" rollupPolicy=\"partial\">\n"
+ " <MemberGrant member=\"[City].[Coronado]\" access=\"all\">\n"
+ " </MemberGrant>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ " </Role>\n"
+ " <Role name=\"Role2\">\n"
+ " <SchemaGrant access=\"all\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Customers]\" access=\"custom\" topLevel=\"[Customers].[City]\" bottomLevel=\"[Customers].[City]\" rollupPolicy=\"partial\">\n"
+ " <MemberGrant member=\"[City].[Burbank]\" access=\"all\">\n"
+ " </MemberGrant>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ " </Role>\n");
// Control tests
context.withRole("Role1").assertQueryReturns(
mdx1,
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[*FORMATTED_MEASURE_0]}\n"
+ "Axis #2:\n"
+ "{[Customers].[USA].[CA].[Coronado], [Education Level].[All Education Levels]}\n"
+ "{[Customers].[USA].[CA].[Coronado], [Education Level].[Bachelors Degree]}\n"
+ "{[Customers].[USA].[CA].[Coronado], [Education Level].[Graduate Degree]}\n"
+ "{[Customers].[USA].[CA].[Coronado], [Education Level].[High School Degree]}\n"
+ "{[Customers].[USA].[CA].[Coronado], [Education Level].[Partial College]}\n"
+ "{[Customers].[USA].[CA].[Coronado], [Education Level].[Partial High School]}\n"
+ "Row #0: 2,391\n"
+ "Row #1: 559\n"
+ "Row #2: 205\n"
+ "Row #3: 551\n"
+ "Row #4: 253\n"
+ "Row #5: 823\n");
context.withRole("Role2").assertQueryReturns(
mdx1,
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[*FORMATTED_MEASURE_0]}\n"
+ "Axis #2:\n"
+ "{[Customers].[USA].[CA].[Burbank], [Education Level].[All Education Levels]}\n"
+ "{[Customers].[USA].[CA].[Burbank], [Education Level].[Bachelors Degree]}\n"
+ "{[Customers].[USA].[CA].[Burbank], [Education Level].[Graduate Degree]}\n"
+ "{[Customers].[USA].[CA].[Burbank], [Education Level].[High School Degree]}\n"
+ "{[Customers].[USA].[CA].[Burbank], [Education Level].[Partial College]}\n"
+ "{[Customers].[USA].[CA].[Burbank], [Education Level].[Partial High School]}\n"
+ "Row #0: 3,086\n"
+ "Row #1: 914\n"
+ "Row #2: 126\n"
+ "Row #3: 1,029\n"
+ "Row #4: 286\n"
+ "Row #5: 731\n");
context.withRole("Role1,Role2").assertQueryReturns(
mdx1,
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[*FORMATTED_MEASURE_0]}\n"
+ "Axis #2:\n"
+ "{[Customers].[USA].[CA].[Burbank], [Education Level].[All Education Levels]}\n"
+ "{[Customers].[USA].[CA].[Burbank], [Education Level].[Bachelors Degree]}\n"
+ "{[Customers].[USA].[CA].[Burbank], [Education Level].[Graduate Degree]}\n"
+ "{[Customers].[USA].[CA].[Burbank], [Education Level].[High School Degree]}\n"
+ "{[Customers].[USA].[CA].[Burbank], [Education Level].[Partial College]}\n"
+ "{[Customers].[USA].[CA].[Burbank], [Education Level].[Partial High School]}\n"
+ "{[Customers].[USA].[CA].[Coronado], [Education Level].[All Education Levels]}\n"
+ "{[Customers].[USA].[CA].[Coronado], [Education Level].[Bachelors Degree]}\n"
+ "{[Customers].[USA].[CA].[Coronado], [Education Level].[Graduate Degree]}\n"
+ "{[Customers].[USA].[CA].[Coronado], [Education Level].[High School Degree]}\n"
+ "{[Customers].[USA].[CA].[Coronado], [Education Level].[Partial College]}\n"
+ "{[Customers].[USA].[CA].[Coronado], [Education Level].[Partial High School]}\n"
+ "Row #0: 3,086\n"
+ "Row #1: 914\n"
+ "Row #2: 126\n"
+ "Row #3: 1,029\n"
+ "Row #4: 286\n"
+ "Row #5: 731\n"
+ "Row #6: 2,391\n"
+ "Row #7: 559\n"
+ "Row #8: 205\n"
+ "Row #9: 551\n"
+ "Row #10: 253\n"
+ "Row #11: 823\n");
// Actual tests
context.withRole("Role1").assertQueryReturns(
mdx2,
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[*FORMATTED_MEASURE_0]}\n"
+ "Axis #2:\n"
+ "{[Education Level].[All Education Levels]}\n"
+ "{[Education Level].[Bachelors Degree]}\n"
+ "{[Education Level].[Graduate Degree]}\n"
+ "{[Education Level].[High School Degree]}\n"
+ "{[Education Level].[Partial College]}\n"
+ "{[Education Level].[Partial High School]}\n"
+ "Row #0: 2,391\n"
+ "Row #1: 559\n"
+ "Row #2: 205\n"
+ "Row #3: 551\n"
+ "Row #4: 253\n"
+ "Row #5: 823\n");
context.withRole("Role2").assertQueryReturns(
mdx2,
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[*FORMATTED_MEASURE_0]}\n"
+ "Axis #2:\n"
+ "{[Education Level].[All Education Levels]}\n"
+ "{[Education Level].[Bachelors Degree]}\n"
+ "{[Education Level].[Graduate Degree]}\n"
+ "{[Education Level].[High School Degree]}\n"
+ "{[Education Level].[Partial College]}\n"
+ "{[Education Level].[Partial High School]}\n"
+ "Row #0: 3,086\n"
+ "Row #1: 914\n"
+ "Row #2: 126\n"
+ "Row #3: 1,029\n"
+ "Row #4: 286\n"
+ "Row #5: 731\n");
context.withRole("Role1,Role2").assertQueryReturns(
mdx2,
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[*FORMATTED_MEASURE_0]}\n"
+ "Axis #2:\n"
+ "{[Education Level].[All Education Levels]}\n"
+ "{[Education Level].[Bachelors Degree]}\n"
+ "{[Education Level].[Graduate Degree]}\n"
+ "{[Education Level].[High School Degree]}\n"
+ "{[Education Level].[Partial College]}\n"
+ "{[Education Level].[Partial High School]}\n"
+ "Row #0: 5,477\n"
+ "Row #1: 1,473\n"
+ "Row #2: 331\n"
+ "Row #3: 1,580\n"
+ "Row #4: 539\n"
+ "Row #5: 1,554\n");
}
/**
* This is a test for
* <a href="http://jira.pentaho.com/browse/MONDRIAN-1030">MONDRIAN-1030</a>
* When a query is based on a level higher than one in the same hierarchy
* which has access controls, it would only constrain at the current level
* if the rollup policy of partial is used.
*
* <p>Example. A query on USA where only Los-Angeles is accessible would
* return the values for California instead of only LA.
*/
public void testBugMondrian1030_2() {
TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"Bacon\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Customers]\" access=\"custom\" rollupPolicy=\"partial\">\n"
+ " <MemberGrant member=\"[Customers].[USA].[CA].[Los Angeles]\" access=\"all\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>")
.withRole("Bacon")
.assertQueryReturns(
"select {[Measures].[Unit Sales]} on 0,\n"
+ " {[Customers].[USA]} on 1\n"
+ "from [Sales]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Customers].[USA]}\n"
+ "Row #0: 2,009\n");
}
/**
* Test for
* <a href="http://jira.pentaho.com/browse/MONDRIAN-1091">MONDRIAN-1091</a>
* The RoleImpl would try to search for member grants by object identity
* rather than unique name. When using the partial rollup policy, the
* members are wrapped, so identity checks would fail.
*/
public void testMondrian1091() throws Exception {
final TestContext testContext = TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"Role1\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Store]\" access=\"custom\" rollupPolicy=\"partial\">\n"
+ " <MemberGrant member=\"[Store].[USA].[CA]\" access=\"all\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>")
.withRole("Role1");
testContext.assertQueryReturns(
"select {[Store].Members} on columns from [Sales]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Store].[All Stores]}\n"
+ "{[Store].[USA]}\n"
+ "{[Store].[USA].[CA]}\n"
+ "{[Store].[USA].[CA].[Alameda]}\n"
+ "{[Store].[USA].[CA].[Alameda].[HQ]}\n"
+ "{[Store].[USA].[CA].[Beverly Hills]}\n"
+ "{[Store].[USA].[CA].[Beverly Hills].[Store 6]}\n"
+ "{[Store].[USA].[CA].[Los Angeles]}\n"
+ "{[Store].[USA].[CA].[Los Angeles].[Store 7]}\n"
+ "{[Store].[USA].[CA].[San Diego]}\n"
+ "{[Store].[USA].[CA].[San Diego].[Store 24]}\n"
+ "{[Store].[USA].[CA].[San Francisco]}\n"
+ "{[Store].[USA].[CA].[San Francisco].[Store 14]}\n"
+ "Row #0: 74,748\n"
+ "Row #0: 74,748\n"
+ "Row #0: 74,748\n"
+ "Row #0: \n"
+ "Row #0: \n"
+ "Row #0: 21,333\n"
+ "Row #0: 21,333\n"
+ "Row #0: 25,663\n"
+ "Row #0: 25,663\n"
+ "Row #0: 25,635\n"
+ "Row #0: 25,635\n"
+ "Row #0: 2,117\n"
+ "Row #0: 2,117\n");
org.olap4j.metadata.Cube cube =
testContext.getOlap4jConnection()
.getOlapSchema().getCubes().get("Sales");
org.olap4j.metadata.Member allMember =
cube.lookupMember(
IdentifierNode.parseIdentifier("[Store].[All Stores]")
.getSegmentList());
assertNotNull(allMember);
assertEquals(1, allMember.getHierarchy().getRootMembers().size());
assertEquals(
"[Store].[All Stores]",
allMember.getHierarchy().getRootMembers().get(0).getUniqueName());
}
/**
* Unit test for
* <a href="http://jira.pentaho.com/browse/mondrian-1259">MONDRIAN-1259,
* "Mondrian security: access leaks from one user to another"</a>.
*
* <p>Enhancements made to the SmartRestrictedMemberReader were causing
* security leaks between roles and potential class cast exceptions.
*/
public void testMondrian1259() throws Exception {
final String mdx =
"select non empty {[Store].Members} on columns from [Sales]";
final TestContext testContext = TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"Role1\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Store]\" access=\"custom\" rollupPolicy=\"partial\">\n"
+ " <MemberGrant member=\"[Store].[USA].[CA]\" access=\"all\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>"
+ "<Role name=\"Role2\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Store]\" access=\"custom\" rollupPolicy=\"partial\">\n"
+ " <MemberGrant member=\"[Store].[USA].[OR]\" access=\"all\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>");
testContext.withRole("Role1").assertQueryReturns(
mdx,
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Store].[All Stores]}\n"
+ "{[Store].[USA]}\n"
+ "{[Store].[USA].[CA]}\n"
+ "{[Store].[USA].[CA].[Beverly Hills]}\n"
+ "{[Store].[USA].[CA].[Beverly Hills].[Store 6]}\n"
+ "{[Store].[USA].[CA].[Los Angeles]}\n"
+ "{[Store].[USA].[CA].[Los Angeles].[Store 7]}\n"
+ "{[Store].[USA].[CA].[San Diego]}\n"
+ "{[Store].[USA].[CA].[San Diego].[Store 24]}\n"
+ "{[Store].[USA].[CA].[San Francisco]}\n"
+ "{[Store].[USA].[CA].[San Francisco].[Store 14]}\n"
+ "Row #0: 74,748\n"
+ "Row #0: 74,748\n"
+ "Row #0: 74,748\n"
+ "Row #0: 21,333\n"
+ "Row #0: 21,333\n"
+ "Row #0: 25,663\n"
+ "Row #0: 25,663\n"
+ "Row #0: 25,635\n"
+ "Row #0: 25,635\n"
+ "Row #0: 2,117\n"
+ "Row #0: 2,117\n");
testContext.withRole("Role2").assertQueryReturns(
mdx,
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Store].[All Stores]}\n"
+ "{[Store].[USA]}\n"
+ "{[Store].[USA].[OR]}\n"
+ "{[Store].[USA].[OR].[Portland]}\n"
+ "{[Store].[USA].[OR].[Portland].[Store 11]}\n"
+ "{[Store].[USA].[OR].[Salem]}\n"
+ "{[Store].[USA].[OR].[Salem].[Store 13]}\n"
+ "Row #0: 67,659\n"
+ "Row #0: 67,659\n"
+ "Row #0: 67,659\n"
+ "Row #0: 26,079\n"
+ "Row #0: 26,079\n"
+ "Row #0: 41,580\n"
+ "Row #0: 41,580\n");
}
public void testMondrian1295() throws Exception {
final String mdx =
"With\n"
+ "Set [*NATIVE_CJ_SET] as 'NonEmptyCrossJoin([*BASE_MEMBERS_Time],[*BASE_MEMBERS_Product])'\n"
+ "Set [*SORTED_ROW_AXIS] as 'Order([*CJ_ROW_AXIS],Ancestor([Time].CurrentMember, [Time].[Year]).OrderKey,BASC,Ancestor([Time].CurrentMember, [Time].[Quarter]).OrderKey,BASC,[Time].CurrentMember.OrderKey,BASC,[Product].CurrentMember.OrderKey,BASC)'\n"
+ "Set [*BASE_MEMBERS_Product] as '{[Product].[All Products]}'\n"
+ "Set [*BASE_MEMBERS_Measures] as '{[Measures].[*FORMATTED_MEASURE_0]}'\n"
+ "Set [*CJ_ROW_AXIS] as 'Generate([*NATIVE_CJ_SET], {([Time].currentMember,[Product].currentMember)})'\n"
+ "Set [*BASE_MEMBERS_Time] as '[Time].[Year].Members'\n"
+ "Set [*CJ_COL_AXIS] as '[*NATIVE_CJ_SET]'\n"
+ "Member [Measures].[*FORMATTED_MEASURE_0] as '[Measures].[Unit Sales]', FORMAT_STRING = 'Standard', SOLVE_ORDER=400\n"
+ "Select\n"
+ "[*BASE_MEMBERS_Measures] on columns,\n"
+ "Non Empty [*SORTED_ROW_AXIS] on rows\n"
+ "From [Sales]\n";
final TestContext context =
getTestContext().create(
null, null, null, null, null,
"<Role name=\"Admin\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Store]\" rollupPolicy=\"partial\" access=\"custom\">\n"
+ " <MemberGrant member=\"[Store].[USA].[CA]\" access=\"all\">\n"
+ " </MemberGrant>\n"
+ " </HierarchyGrant>\n"
+ " <HierarchyGrant hierarchy=\"[Customers]\" rollupPolicy=\"partial\" access=\"custom\">\n"
+ " <MemberGrant member=\"[Customers].[USA].[CA]\" access=\"all\">\n"
+ " </MemberGrant>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role> \n");
// Control
context
.assertQueryReturns(
"select {[Measures].[Unit Sales]} on columns from [Sales]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Row #0: 266,773\n");
context.withRole("Admin")
.assertQueryReturns(
"select {[Measures].[Unit Sales]} on columns from [Sales]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Row #0: 74,748\n");
// Test
context.withRole("Admin")
.assertQueryReturns(
mdx,
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[*FORMATTED_MEASURE_0]}\n"
+ "Axis #2:\n"
+ "{[Time].[1997], [Product].[All Products]}\n"
+ "Row #0: 74,748\n");
}
public void testMondrian936() throws Exception {
final TestContext testContext = TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"test\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Store]\" access=\"custom\"\n"
+ " topLevel=\"[Store].[Store Country]\" rollupPolicy=\"partial\">\n"
+ " <MemberGrant member=\"[Store].[All Stores]\" access=\"none\"/>\n"
+ " <MemberGrant member=\"[Store].[USA].[CA].[Los Angeles]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[USA].[CA].[Alameda]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[USA].[CA].[Beverly Hills]\"\n"
+ "access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[USA].[CA].[San Francisco]\"\n"
+ "access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[USA].[CA].[San Diego]\" access=\"all\"/>\n"
+ "\n"
+ " <MemberGrant member=\"[Store].[USA].[OR].[Portland]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store].[USA].[OR].[Salem]\" access=\"all\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ "</Role>");
testContext.withRole("test").assertQueryReturns(
"select {[Measures].[Unit Sales]} on columns, "
+ " {[Product].[Food].[Baked Goods].[Bread]} on rows "
+ " from [Sales] "
+ " where { [Store].[USA].[OR], [Store].[USA].[CA]} ", "Axis #0:\n"
+ "{[Store].[USA].[OR]}\n"
+ "{[Store].[USA].[CA]}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Product].[Food].[Baked Goods].[Bread]}\n"
+ "Row #0: 4,163\n");
// changing ordering of members in the slicer should not change
// result
testContext.withRole("test").assertQueryReturns(
"select {[Measures].[Unit Sales]} on columns, "
+ " {[Product].[Food].[Baked Goods].[Bread]} on rows "
+ " from [Sales] "
+ " where { [Store].[USA].[CA], [Store].[USA].[OR]} ", "Axis #0:\n"
+ "{[Store].[USA].[CA]}\n"
+ "{[Store].[USA].[OR]}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Product].[Food].[Baked Goods].[Bread]}\n"
+ "Row #0: 4,163\n");
Result result = testContext.withRole("test").executeQuery(
"with member store.aggCaliforniaOregon as "
+ "'aggregate({ [Store].[USA].[CA], [Store].[USA].[OR]})'"
+ " select store.aggCaliforniaOregon on 0 from sales");
String valueAggMember = result
.getCell(new int[] {0}).getFormattedValue();
result = testContext.withRole("test").executeQuery(
" select from sales where "
+ "{ [Store].[USA].[CA], [Store].[USA].[OR]}");
String valueSlicerAgg = result
.getCell(new int[] {}).getFormattedValue();
// aggregating CA & OR in a calc member should produce same result
// as aggregating in the slicer.
assertTrue(valueAggMember.equals(valueSlicerAgg));
}
public void testMondrian1434() {
String roleDef =
"<Role name=\"dev\">"
+ " <SchemaGrant access=\"all\">"
+ " <CubeGrant cube=\"Sales\" access=\"all\">"
+ " </CubeGrant>"
+ " <CubeGrant cube=\"HR\" access=\"all\">"
+ " </CubeGrant>"
+ " <CubeGrant cube=\"Warehouse and Sales\" access=\"all\">"
+ " <HierarchyGrant hierarchy=\"Measures\" access=\"custom\">"
+ " <MemberGrant member=\"[Measures].[Warehouse Sales]\" access=\"all\">"
+ " </MemberGrant>"
+ " </HierarchyGrant>"
+ " </CubeGrant>"
+ " </SchemaGrant>"
+ "</Role>";
TestContext testContext = TestContext.instance()
.create(null, null, null, null, null, roleDef).withRole("dev");
testContext.executeQuery(
" select from [Sales] where {[Measures].[Unit Sales]}");
roleDef =
"<Role name=\"dev\">"
+ " <SchemaGrant access=\"all\">"
+ " <CubeGrant cube=\"Sales\" access=\"all\">"
+ " <HierarchyGrant hierarchy=\"Measures\" access=\"custom\">"
+ " <MemberGrant member=\"[Measures].[Unit Sales]\" access=\"all\">"
+ " </MemberGrant>"
+ " </HierarchyGrant>"
+ " </CubeGrant>"
+ " <CubeGrant cube=\"HR\" access=\"all\">"
+ " </CubeGrant>"
+ " <CubeGrant cube=\"Warehouse and Sales\" access=\"all\">"
+ " </CubeGrant>"
+ " </SchemaGrant>"
+ "</Role>";
testContext = TestContext.instance()
.create(null, null, null, null, null, roleDef).withRole("dev");
testContext.executeQuery(
" select from [Warehouse and Sales] where {[Measures].[Store Sales]}");
// test is that there is no exception
}
/**
* Fix for
* <a href="http://jira.pentaho.com/browse/MONDRIAN-1486">MONDRIAN-1486</a>
*
* When NECJ was used, a call to RolapNativeCrossJoin.createEvaluator
* would swap the {@link LimitedRollupMember} for the regular all member
* of the hierarchy, effectively removing security constraints.
*/
public void testMondrian1486() throws Exception {
final String mdx =
"With\n"
+ "Set [*NATIVE_CJ_SET] as 'NonEmptyCrossJoin([*BASE_MEMBERS_Gender],[*BASE_MEMBERS_Marital Status])'\n"
+ "Set [*SORTED_ROW_AXIS] as 'Order([*CJ_ROW_AXIS],[Gender].CurrentMember.OrderKey,BASC,[Marital Status].CurrentMember.OrderKey,BASC)'\n"
+ "Set [*BASE_MEMBERS_Gender] as '[Gender].[Gender].Members'\n"
+ "Set [*BASE_MEMBERS_Measures] as '{[Measures].[*FORMATTED_MEASURE_0]}'\n"
+ "Set [*CJ_ROW_AXIS] as 'Generate([*NATIVE_CJ_SET], {([Gender].currentMember,[Marital Status].currentMember)})'\n"
+ "Set [*BASE_MEMBERS_Marital Status] as '[Marital Status].[Marital Status].Members'\n"
+ "Set [*CJ_COL_AXIS] as '[*NATIVE_CJ_SET]'\n"
+ "Member [Measures].[*FORMATTED_MEASURE_0] as '[Measures].[Unit Sales]', FORMAT_STRING = 'Standard', SOLVE_ORDER=400\n"
+ "Select\n"
+ "[*BASE_MEMBERS_Measures] on columns,\n"
+ "Non Empty [*SORTED_ROW_AXIS] on rows\n"
+ "From [Sales]\n";
final TestContext context =
TestContext.instance().create(
null, null, null, null, null,
"<Role name=\"Admin\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Gender]\" rollupPolicy=\"partial\" access=\"custom\">\n"
+ " <MemberGrant member=\"[Gender].[F]\" access=\"all\">\n"
+ " </MemberGrant>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ " </Role>\n").withRole("Admin");
context.assertQueryReturns(
mdx,
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[*FORMATTED_MEASURE_0]}\n"
+ "Axis #2:\n"
+ "{[Gender].[F], [Marital Status].[M]}\n"
+ "{[Gender].[F], [Marital Status].[S]}\n"
+ "Row #0: 65,336\n"
+ "Row #1: 66,222\n");
}
public void testRollupPolicyWithNative() {
// Verifies limited role-restricted results using
// all variations of rollup policy
// Also verifies consistent results with a non-all default member.
// connected with MONDRIAN-1568
propSaver.set(propSaver.properties.EnableNativeCrossJoin, true);
propSaver.set(propSaver.properties.EnableNativeFilter, true);
propSaver.set(propSaver.properties.EnableNativeNonEmpty, true);
propSaver.set(propSaver.properties.EnableNativeTopCount, true);
propSaver.set(propSaver.properties.ExpandNonNative, true);
String dimension =
"<Dimension name=\"Store2\">\n"
+ " <Hierarchy hasAll=\"%s\" primaryKey=\"store_id\" %s >\n"
+ " <Table name=\"store\"/>\n"
+ " <Level name=\"Store Country\" column=\"store_country\" uniqueMembers=\"true\"/>\n"
+ " <Level name=\"Store State\" column=\"store_state\" uniqueMembers=\"true\"/>\n"
+ " </Hierarchy>\n"
+ "</Dimension>\n";
String cube =
"<Cube name=\"TinySales\">\n"
+ " <Table name=\"sales_fact_1997\"/>\n"
+ " <DimensionUsage name=\"Product\" source=\"Product\" foreignKey=\"product_id\"/>\n"
+ " <DimensionUsage name=\"Store2\" source=\"Store2\" foreignKey=\"store_id\"/>\n"
+ " <Measure name=\"Unit Sales\" column=\"unit_sales\" aggregator=\"sum\"/>\n"
+ "</Cube>";
final String roleDefs =
"<Role name=\"test\">\n"
+ " <SchemaGrant access=\"none\">\n"
+ " <CubeGrant cube=\"TinySales\" access=\"all\">\n"
+ " <HierarchyGrant hierarchy=\"[Store2]\" access=\"custom\"\n"
+ " rollupPolicy=\"%s\">\n"
+ " <MemberGrant member=\"[Store2].[USA].[CA]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store2].[USA].[OR]\" access=\"all\"/>\n"
+ " <MemberGrant member=\"[Store2].[Canada]\" access=\"all\"/>\n"
+ " </HierarchyGrant>\n"
+ " </CubeGrant>\n"
+ " </SchemaGrant>\n"
+ " </Role> ";
String nonAllDefaultMem = "defaultMember=\"[Store2].[USA].[CA]\"";
for (Role.RollupPolicy policy : Role.RollupPolicy.values()) {
for (String defaultMember : new String[]{nonAllDefaultMem, "" }) {
for (boolean hasAll : new Boolean[]{true, false}) {
// Results in this test should be the same regardless
// of rollupPolicy, default member, and whether there
// is an all member, since the rollup is not included
// in the test queries and context is explicitly set
// for [Store2].
// MONDRIAN-1568 showed different results with different
// rollup policies and different default members
final TestContext testContext2 = getTestContext().create(
// swap in hasAll and defaultMember
String.format(dimension, hasAll, defaultMember),
cube, null, null, null,
// swap in policy
String.format(roleDefs, policy)).withRole("test");
// RolapNativeCrossjoin
testContext2.assertQueryReturns(
String.format(
"Failure testing RolapNativeCrossJoin with "
+ " rollupPolicy=%s, "
+ "defaultMember=%s, hasAll=%s",
policy, defaultMember, hasAll),
"select NonEmptyCrossJoin([Store2].[Store State].MEMBERS,"
+ "[Product].[Product Family].MEMBERS) on 0 from tinysales",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Store2].[USA].[CA], [Product].[Drink]}\n"
+ "{[Store2].[USA].[CA], [Product].[Food]}\n"
+ "{[Store2].[USA].[CA], [Product].[Non-Consumable]}\n"
+ "{[Store2].[USA].[OR], [Product].[Drink]}\n"
+ "{[Store2].[USA].[OR], [Product].[Food]}\n"
+ "{[Store2].[USA].[OR], [Product].[Non-Consumable]}\n"
+ "Row #0: 7,102\n"
+ "Row #0: 53,656\n"
+ "Row #0: 13,990\n"
+ "Row #0: 6,106\n"
+ "Row #0: 48,537\n"
+ "Row #0: 13,016\n");
// RolapNativeFilter
testContext2.assertQueryReturns(
String.format(
"Failure testing RolapNativeFilter with "
+ "rollupPolicy=%s, "
+ "defaultMember=%s, hasAll=%s",
policy, defaultMember, hasAll),
"select NON EMPTY {[Measures].[Unit Sales]} ON COLUMNS, \n"
+ " Filter( [Store2].[USA].children,"
+ " [Measures].[Unit Sales]>0) ON ROWS \n"
+ "from [TinySales] \n",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Store2].[USA].[CA]}\n"
+ "{[Store2].[USA].[OR]}\n"
+ "Row #0: 74,748\n"
+ "Row #1: 67,659\n");
// RolapNativeTopCount
testContext2.assertQueryReturns(
String.format(
"Failure testing RolapNativeTopCount with "
+ " rollupPolicy=%s, "
+ "defaultMember=%s, hasAll=%s",
policy, defaultMember, hasAll),
"select NON EMPTY {[Measures].[Unit Sales]} ON COLUMNS, \n"
+ " TopCount( [Store2].[USA].children,"
+ " 2) ON ROWS \n"
+ "from [TinySales] \n",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[Unit Sales]}\n"
+ "Axis #2:\n"
+ "{[Store2].[USA].[CA]}\n"
+ "{[Store2].[USA].[OR]}\n"
+ "Row #0: 74,748\n"
+ "Row #1: 67,659\n");
}
}
}
propSaver.reset();
}
public void testValidMeasureWithRestrictedCubes() {
//http://jira.pentaho.com/browse/MONDRIAN-1616
final String roleDefs =
"<Role name=\"noBaseCubes\">\n"
+ " <SchemaGrant access=\"all\">\n"
+ " <CubeGrant cube=\"Sales\" access=\"none\" />\n"
+ " <CubeGrant cube=\"Sales Ragged\" access=\"none\" />\n"
+ " <CubeGrant cube=\"Sales 2\" access=\"none\" />\n"
+ " <CubeGrant cube=\"Warehouse\" access=\"none\" />\n"
+ " </SchemaGrant>\n"
+ "</Role> ";
final TestContext testContext = getTestContext().create(
null, null, null, null, null, roleDefs).withRole("noBaseCubes");
testContext.assertQueryReturns(
"with member measures.vm as 'validmeasure(measures.[unit sales])' "
+ "select measures.vm on 0 from [warehouse and sales]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[vm]}\n"
+ "Row #0: 266,773\n");
testContext.assertQueryReturns(
"with member measures.vm as 'validmeasure(measures.[warehouse cost])' "
+ "select measures.vm * {gender.f} on 0 from [warehouse and sales]",
"Axis #0:\n"
+ "{}\n"
+ "Axis #1:\n"
+ "{[Measures].[vm], [Gender].[F]}\n"
+ "Row #0: 89,043.253\n");
}
}
// End AccessControlTest.java
| epl-1.0 |
FauxFaux/jdk9-jdk | src/java.desktop/share/classes/java/awt/print/Printable.java | 6437 | /*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.awt.print;
import java.awt.Graphics;
/**
* The {@code Printable} interface is implemented
* by the {@code print} methods of the current
* page painter, which is called by the printing
* system to render a page. When building a
* {@link Pageable}, pairs of {@link PageFormat}
* instances and instances that implement
* this interface are used to describe each page. The
* instance implementing {@code Printable} is called to
* print the page's graphics.
* <p>
* A {@code Printable(..)} may be set on a {@code PrinterJob}.
* When the client subsequently initiates printing by calling
* {@code PrinterJob.print(..)} control
* <p>
* is handed to the printing system until all pages have been printed.
* It does this by calling {@code Printable.print(..)} until
* all pages in the document have been printed.
* In using the {@code Printable} interface the printing
* commits to image the contents of a page whenever
* requested by the printing system.
* <p>
* The parameters to {@code Printable.print(..)} include a
* {@code PageFormat} which describes the printable area of
* the page, needed for calculating the contents that will fit the
* page, and the page index, which specifies the zero-based print
* stream index of the requested page.
* <p>
* For correct printing behaviour, the following points should be
* observed:
* <ul>
* <li> The printing system may request a page index more than once.
* On each occasion equal PageFormat parameters will be supplied.
*
* <li>The printing system will call {@code Printable.print(..)}
* with page indexes which increase monotonically, although as noted above,
* the {@code Printable} should expect multiple calls for a page index
* and that page indexes may be skipped, when page ranges are specified
* by the client, or by a user through a print dialog.
*
* <li>If multiple collated copies of a document are requested, and the
* printer cannot natively support this, then the document may be imaged
* multiple times. Printing will start each copy from the lowest print
* stream page index page.
*
* <li>With the exception of re-imaging an entire document for multiple
* collated copies, the increasing page index order means that when
* page N is requested if a client needs to calculate page break position,
* it may safely discard any state related to pages < N, and make current
* that for page N. "State" usually is just the calculated position in the
* document that corresponds to the start of the page.
*
* <li>When called by the printing system the {@code Printable} must
* inspect and honour the supplied PageFormat parameter as well as the
* page index. The format of the page to be drawn is specified by the
* supplied PageFormat. The size, orientation and imageable area of the page
* is therefore already determined and rendering must be within this
* imageable area.
* This is key to correct printing behaviour, and it has the
* implication that the client has the responsibility of tracking
* what content belongs on the specified page.
*
* <li>When the {@code Printable} is obtained from a client-supplied
* {@code Pageable} then the client may provide different PageFormats
* for each page index. Calculations of page breaks must account for this.
* </ul>
* @see java.awt.print.Pageable
* @see java.awt.print.PageFormat
* @see java.awt.print.PrinterJob
*/
public interface Printable {
/**
* Returned from {@link #print(Graphics, PageFormat, int)}
* to signify that the requested page was rendered.
*/
int PAGE_EXISTS = 0;
/**
* Returned from {@code print} to signify that the
* {@code pageIndex} is too large and that the requested page
* does not exist.
*/
int NO_SUCH_PAGE = 1;
/**
* Prints the page at the specified index into the specified
* {@link Graphics} context in the specified
* format. A {@code PrinterJob} calls the
* {@code Printable} interface to request that a page be
* rendered into the context specified by
* {@code graphics}. The format of the page to be drawn is
* specified by {@code pageFormat}. The zero based index
* of the requested page is specified by {@code pageIndex}.
* If the requested page does not exist then this method returns
* NO_SUCH_PAGE; otherwise PAGE_EXISTS is returned.
* The {@code Graphics} class or subclass implements the
* {@link PrinterGraphics} interface to provide additional
* information. If the {@code Printable} object
* aborts the print job then it throws a {@link PrinterException}.
* @param graphics the context into which the page is drawn
* @param pageFormat the size and orientation of the page being drawn
* @param pageIndex the zero based index of the page to be drawn
* @return PAGE_EXISTS if the page is rendered successfully
* or NO_SUCH_PAGE if {@code pageIndex} specifies a
* non-existent page.
* @exception java.awt.print.PrinterException
* thrown when the print job is terminated.
*/
int print(Graphics graphics, PageFormat pageFormat, int pageIndex)
throws PrinterException;
}
| gpl-2.0 |
manuranga/wso2-axis2 | modules/adb/src/org/apache/axis2/databinding/utils/reader/WrappingXMLStreamReader.java | 5712 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.axis2.databinding.utils.reader;
import javax.xml.namespace.NamespaceContext;
import javax.xml.namespace.QName;
import javax.xml.stream.Location;
import javax.xml.stream.XMLStreamException;
import javax.xml.stream.XMLStreamReader;
public class WrappingXMLStreamReader implements ADBXMLStreamReader {
private XMLStreamReader reader;
public WrappingXMLStreamReader(XMLStreamReader reader) {
this.reader = reader;
}
public boolean isDone() {
try {
return !hasNext();
} catch (XMLStreamException e) {
throw new RuntimeException(e);
}
}
public Object getProperty(String string) throws IllegalArgumentException {
return reader.getProperty(string);
}
public int next() throws XMLStreamException {
return reader.next();
}
public void require(int i, String string, String string1) throws XMLStreamException {
//nothing to do
}
public String getElementText() throws XMLStreamException {
return reader.getElementText();
}
public int nextTag() throws XMLStreamException {
return reader.nextTag();
}
public boolean hasNext() throws XMLStreamException {
return reader.hasNext();
}
public void close() throws XMLStreamException {
reader.close();
}
public String getNamespaceURI(String string) {
return reader.getNamespaceURI(string);
}
public boolean isStartElement() {
return reader.isStartElement();
}
public boolean isEndElement() {
return reader.isEndElement();
}
public boolean isCharacters() {
return reader.isCharacters();
}
public boolean isWhiteSpace() {
return reader.isWhiteSpace();
}
public String getAttributeValue(String string, String string1) {
return reader.getAttributeValue(string, string1);
}
public int getAttributeCount() {
return reader.getAttributeCount();
}
public QName getAttributeName(int i) {
return reader.getAttributeName(i);
}
public String getAttributeNamespace(int i) {
return reader.getAttributeNamespace(i);
}
public String getAttributeLocalName(int i) {
return reader.getAttributeLocalName(i);
}
public String getAttributePrefix(int i) {
return reader.getAttributePrefix(i);
}
public String getAttributeType(int i) {
return reader.getAttributeType(i);
}
public String getAttributeValue(int i) {
return reader.getAttributeValue(i);
}
public boolean isAttributeSpecified(int i) {
return reader.isAttributeSpecified(i);
}
public int getNamespaceCount() {
return reader.getNamespaceCount();
}
public String getNamespacePrefix(int i) {
return reader.getNamespacePrefix(i);
}
public String getNamespaceURI(int i) {
return reader.getNamespaceURI(i);
}
public NamespaceContext getNamespaceContext() {
return reader.getNamespaceContext();
}
public int getEventType() {
return reader.getEventType();
}
public String getText() {
return reader.getText();
}
public char[] getTextCharacters() {
return reader.getTextCharacters();
}
public int getTextCharacters(int i, char[] chars, int i1, int i2) throws XMLStreamException {
return reader.getTextCharacters(i, chars, i1, i2);
}
public int getTextStart() {
return reader.getTextStart();
}
public int getTextLength() {
return reader.getTextLength();
}
public String getEncoding() {
return reader.getEncoding();
}
public boolean hasText() {
return reader.hasText();
}
public Location getLocation() {
return reader.getLocation();
}
public QName getName() {
return reader.getName();
}
public String getLocalName() {
return reader.getLocalName();
}
public boolean hasName() {
return reader.hasName();
}
public String getNamespaceURI() {
return reader.getNamespaceURI();
}
public String getPrefix() {
return reader.getPrefix();
}
public String getVersion() {
return reader.getVersion();
}
public boolean isStandalone() {
return reader.isStandalone();
}
public boolean standaloneSet() {
return reader.standaloneSet();
}
public String getCharacterEncodingScheme() {
return reader.getCharacterEncodingScheme();
}
public String getPITarget() {
return reader.getPITarget();
}
public String getPIData() {
return reader.getPIData();
}
public void addNamespaceContext(NamespaceContext nsContext) {
//nothing to do here
}
public void init() {
//Nothing to do here
}
}
| apache-2.0 |
YolandaMDavis/nifi | nifi-nar-bundles/nifi-aws-bundle/nifi-aws-abstract-processors/src/main/java/org/apache/nifi/processors/aws/wag/client/Validate.java | 538 | package org.apache.nifi.processors.aws.wag.client;
import com.amazonaws.util.StringUtils;
public class Validate {
public static void notEmpty(String in, String fieldName) {
if (StringUtils.isNullOrEmpty(in)) {
throw new IllegalArgumentException(String.format("%s cannot be empty", fieldName));
}
}
public static void notNull(Object in, String fieldName) {
if (in == null) {
throw new IllegalArgumentException(String.format("%s cannot be null", fieldName));
}
}
}
| apache-2.0 |
Nimco/sling | testing/http/clients/src/main/java/org/apache/sling/testing/clients/osgi/Component.java | 1408 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.sling.testing.clients.osgi;
public class Component {
public enum Status {
ACTIVE("active"),
REGISTERED("registered"),
UNSATISFIED("unsatisfied");
String value;
Status(String value) {
this.value = value;
}
public static Status value(String o) {
for(Status s : values()) {
if(s.value.equalsIgnoreCase(o)) {
return s;
}
}
return null;
}
public String toString() {
return value;
}
}
}
| apache-2.0 |
vt09/bazel | src/test/java/com/google/devtools/build/lib/packages/TestSizeTest.java | 2682 | // Copyright 2012-2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.lib.packages;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
/**
* Tests the various methods of {@link TestSize}
*/
@RunWith(JUnit4.class)
public class TestSizeTest {
@Test
public void testBasicConversion() {
assertEquals(TestSize.valueOf("SMALL"), TestSize.SMALL);
assertEquals(TestSize.valueOf("MEDIUM"), TestSize.MEDIUM);
assertEquals(TestSize.valueOf("LARGE"), TestSize.LARGE);
assertEquals(TestSize.valueOf("ENORMOUS"), TestSize.ENORMOUS);
}
@Test
public void testGetDefaultTimeout() {
assertEquals(TestTimeout.SHORT, TestSize.SMALL.getDefaultTimeout());
assertEquals(TestTimeout.MODERATE, TestSize.MEDIUM.getDefaultTimeout());
assertEquals(TestTimeout.LONG, TestSize.LARGE.getDefaultTimeout());
assertEquals(TestTimeout.ETERNAL, TestSize.ENORMOUS.getDefaultTimeout());
}
@Test
public void testGetDefaultShards() {
assertEquals(2, TestSize.SMALL.getDefaultShards());
assertEquals(10, TestSize.MEDIUM.getDefaultShards());
assertEquals(20, TestSize.LARGE.getDefaultShards());
assertEquals(30, TestSize.ENORMOUS.getDefaultShards());
}
@Test
public void testGetTestSizeFromString() {
assertNull(TestSize.getTestSize("Small"));
assertNull(TestSize.getTestSize("Koala"));
assertEquals(TestSize.SMALL, TestSize.getTestSize("small"));
assertEquals(TestSize.MEDIUM, TestSize.getTestSize("medium"));
assertEquals(TestSize.LARGE, TestSize.getTestSize("large"));
assertEquals(TestSize.ENORMOUS, TestSize.getTestSize("enormous"));
}
@Test
public void testGetTestSizeFromDefaultTimeout() {
assertEquals(TestSize.SMALL, TestSize.getTestSize(TestTimeout.SHORT));
assertEquals(TestSize.MEDIUM, TestSize.getTestSize(TestTimeout.MODERATE));
assertEquals(TestSize.LARGE, TestSize.getTestSize(TestTimeout.LONG));
assertEquals(TestSize.ENORMOUS, TestSize.getTestSize(TestTimeout.ETERNAL));
}
}
| apache-2.0 |
FreshGrade/liquibase | liquibase-core/src/main/java/liquibase/executor/AbstractExecutor.java | 1329 | package liquibase.executor;
import liquibase.database.Database;
import liquibase.exception.DatabaseException;
import liquibase.exception.StatementNotSupportedOnDatabaseException;
import liquibase.sql.Sql;
import liquibase.sql.visitor.SqlVisitor;
import liquibase.sqlgenerator.SqlGeneratorFactory;
import liquibase.statement.SqlStatement;
import liquibase.util.StringUtils;
import java.util.List;
import java.util.Set;
public abstract class AbstractExecutor {
protected Database database;
public void setDatabase(Database database) {
this.database = database;
}
protected String[] applyVisitors(SqlStatement statement, List<SqlVisitor> sqlVisitors) throws DatabaseException {
Sql[] sql = SqlGeneratorFactory.getInstance().generateSql(statement, database);
if (sql == null) {
return new String[0];
}
String[] returnSql = new String[sql.length];
for (int i=0; i<sql.length; i++) {
if (sql[i] == null) {
continue;
}
returnSql[i] = sql[i].toSql();
if (sqlVisitors != null) {
for (SqlVisitor visitor : sqlVisitors) {
returnSql[i] = visitor.modifySql(returnSql[i], database);
}
}
}
return returnSql;
}
}
| apache-2.0 |
bryce-anderson/netty | transport/src/main/java/io/netty/channel/ChannelOutboundHandler.java | 4543 | /*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.channel;
import java.net.SocketAddress;
/**
* {@link ChannelHandler} which will get notified for IO-outbound-operations.
*/
public interface ChannelOutboundHandler extends ChannelHandler {
/**
* Called once a bind operation is made.
*
* @param ctx the {@link ChannelHandlerContext} for which the bind operation is made
* @param localAddress the {@link SocketAddress} to which it should bound
* @param promise the {@link ChannelPromise} to notify once the operation completes
* @throws Exception thrown if an error occurs
*/
void bind(ChannelHandlerContext ctx, SocketAddress localAddress, ChannelPromise promise) throws Exception;
/**
* Called once a connect operation is made.
*
* @param ctx the {@link ChannelHandlerContext} for which the connect operation is made
* @param remoteAddress the {@link SocketAddress} to which it should connect
* @param localAddress the {@link SocketAddress} which is used as source on connect
* @param promise the {@link ChannelPromise} to notify once the operation completes
* @throws Exception thrown if an error occurs
*/
void connect(
ChannelHandlerContext ctx, SocketAddress remoteAddress,
SocketAddress localAddress, ChannelPromise promise) throws Exception;
/**
* Called once a disconnect operation is made.
*
* @param ctx the {@link ChannelHandlerContext} for which the disconnect operation is made
* @param promise the {@link ChannelPromise} to notify once the operation completes
* @throws Exception thrown if an error occurs
*/
void disconnect(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception;
/**
* Called once a close operation is made.
*
* @param ctx the {@link ChannelHandlerContext} for which the close operation is made
* @param promise the {@link ChannelPromise} to notify once the operation completes
* @throws Exception thrown if an error occurs
*/
void close(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception;
/**
* Called once a deregister operation is made from the current registered {@link EventLoop}.
*
* @param ctx the {@link ChannelHandlerContext} for which the close operation is made
* @param promise the {@link ChannelPromise} to notify once the operation completes
* @throws Exception thrown if an error occurs
*/
void deregister(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception;
/**
* Intercepts {@link ChannelHandlerContext#read()}.
*/
void read(ChannelHandlerContext ctx) throws Exception;
/**
* Called once a write operation is made. The write operation will write the messages through the
* {@link ChannelPipeline}. Those are then ready to be flushed to the actual {@link Channel} once
* {@link Channel#flush()} is called
*
* @param ctx the {@link ChannelHandlerContext} for which the write operation is made
* @param msg the message to write
* @param promise the {@link ChannelPromise} to notify once the operation completes
* @throws Exception thrown if an error occurs
*/
void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception;
/**
* Called once a flush operation is made. The flush operation will try to flush out all previous written messages
* that are pending.
*
* @param ctx the {@link ChannelHandlerContext} for which the flush operation is made
* @throws Exception thrown if an error occurs
*/
void flush(ChannelHandlerContext ctx) throws Exception;
}
| apache-2.0 |
jekey/presto | presto-main/src/main/java/com/facebook/presto/execution/QueryQueue.java | 4172 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.execution;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.MoreExecutors;
import io.airlift.concurrent.AsyncSemaphore;
import org.weakref.jmx.Managed;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import static com.facebook.presto.execution.SqlQueryManager.addCompletionCallback;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
public class QueryQueue
{
private final int maxQueuedQueries;
private final AtomicInteger queryQueueSize = new AtomicInteger();
private final AtomicInteger queuePermits;
private final AsyncSemaphore<QueueEntry> asyncSemaphore;
QueryQueue(Executor queryExecutor, int maxQueuedQueries, int maxConcurrentQueries)
{
checkNotNull(queryExecutor, "queryExecutor is null");
checkArgument(maxQueuedQueries > 0, "maxQueuedQueries must be greater than zero");
checkArgument(maxConcurrentQueries > 0, "maxConcurrentQueries must be greater than zero");
this.maxQueuedQueries = maxQueuedQueries;
this.queuePermits = new AtomicInteger(maxQueuedQueries + maxConcurrentQueries);
this.asyncSemaphore = new AsyncSemaphore<>(maxConcurrentQueries,
queryExecutor,
queueEntry -> {
QueuedExecution queuedExecution = queueEntry.dequeue();
if (queuedExecution != null) {
queuedExecution.start();
return queuedExecution.getCompletionFuture();
}
return Futures.immediateFuture(null);
});
}
@Managed
public int getQueueSize()
{
return queryQueueSize.get();
}
public boolean reserve(QueryExecution queryExecution)
{
if (queuePermits.getAndDecrement() < 0) {
queuePermits.incrementAndGet();
return false;
}
addCompletionCallback(queryExecution, queuePermits::incrementAndGet);
return true;
}
public boolean enqueue(QueuedExecution queuedExecution)
{
if (queryQueueSize.incrementAndGet() > maxQueuedQueries) {
queryQueueSize.decrementAndGet();
return false;
}
// Add a callback to dequeue the entry if it is ever completed.
// This enables us to remove the entry sooner if is cancelled before starting,
// and has no effect if called after starting.
QueueEntry entry = new QueueEntry(queuedExecution, queryQueueSize::decrementAndGet);
queuedExecution.getCompletionFuture().addListener(entry::dequeue, MoreExecutors.directExecutor());
asyncSemaphore.submit(entry);
return true;
}
private static class QueueEntry
{
private final AtomicReference<QueuedExecution> queryExecution;
private final Runnable onDequeue;
private QueueEntry(QueuedExecution queuedExecution, Runnable onDequeue)
{
checkNotNull(queuedExecution, "queueableExecution is null");
this.queryExecution = new AtomicReference<>(queuedExecution);
this.onDequeue = checkNotNull(onDequeue, "onDequeue is null");
}
public QueuedExecution dequeue()
{
QueuedExecution value = queryExecution.getAndSet(null);
if (value != null) {
onDequeue.run();
}
return value;
}
}
}
| apache-2.0 |
rancherio/cattle | modules/caas/common/src/main/java/io/cattle/platform/docker/constants/DockerIpAddressConstants.java | 145 | package io.cattle.platform.docker.constants;
public class DockerIpAddressConstants {
public static final String KIND_DOCKER = "docker";
}
| apache-2.0 |
iamthearm/bazel | src/main/java/com/google/devtools/build/lib/profiler/SingleStatRecorder.java | 2947 | // Copyright 2014 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.lib.profiler;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Range;
import com.google.devtools.build.lib.concurrent.ThreadSafety.ThreadSafe;
import com.google.devtools.build.lib.profiler.MetricData.HistogramElement;
import com.google.devtools.build.lib.util.Preconditions;
/**
* A stat recorder that can record time histograms, count of calls, average time, Std. Deviation
* and max time.
*/
@ThreadSafe
public class SingleStatRecorder implements StatRecorder {
private final int buckets;
private final Object description;
private int[] histogram;
private int count = 0;
private double avg = 0.0;
private double m2 = 0.0;
private int max = -1;
public SingleStatRecorder(Object description, int buckets) {
this.description = description;
Preconditions.checkArgument(buckets > 1, "At least two buckets (one for bellow start and one"
+ "for above start) are required");
this.buckets = buckets;
histogram = new int[buckets];
}
/** Create an snapshot of the the stats recorded up to now. */
public MetricData snapshot() {
synchronized (this) {
ImmutableList.Builder<HistogramElement> result = ImmutableList.builder();
result.add(new HistogramElement(Range.closedOpen(0, 1), histogram[0]));
int from = 1;
for (int i = 1; i < histogram.length - 1; i++) {
int to = from << 1;
result.add(new HistogramElement(Range.closedOpen(from, to), histogram[i]));
from = to;
}
result.add(new HistogramElement(Range.atLeast(from), histogram[histogram.length - 1]));
return new MetricData(description, result.build(), count, avg,
Math.sqrt(m2 / (double) count), max);
}
}
@Override
public void addStat(int duration, Object obj) {
int histogramBucket = Math.min(32 - Integer.numberOfLeadingZeros(duration), buckets - 1);
synchronized (this) {
count++;
double delta = duration - avg;
avg += delta / count;
m2 += delta * (duration - avg);
if (duration > max) {
max = duration;
}
histogram[histogramBucket]++;
}
}
@Override
public boolean isEmpty() {
return snapshot().getCount() == 0;
}
@Override
public String toString() {
return snapshot().toString();
}
}
| apache-2.0 |
rancherio/cattle | modules/framework/src/main/java/io/cattle/platform/eventing/EventProgress.java | 156 | package io.cattle.platform.eventing;
import io.cattle.platform.eventing.model.Event;
public interface EventProgress {
void progress(Event event);
}
| apache-2.0 |
nboukhed/camel | components/camel-docker/src/test/java/org/apache/camel/component/docker/headers/PullImageCmdHeaderTest.java | 2737 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.docker.headers;
import java.util.Map;
import com.github.dockerjava.api.command.PullImageCmd;
import com.github.dockerjava.core.command.PullImageResultCallback;
import org.apache.camel.component.docker.DockerConstants;
import org.apache.camel.component.docker.DockerOperation;
import org.junit.Test;
import org.mockito.Matchers;
import org.mockito.Mock;
import org.mockito.Mockito;
/**
* Validates Pull Image Request headers are applied properly
*/
public class PullImageCmdHeaderTest extends BaseDockerHeaderTest<PullImageCmd> {
@Mock
private PullImageCmd mockObject;
@Mock
private PullImageResultCallback callback;
@Test
public void pullImageHeaderTest() {
String repository = "docker/empty";
String tag = "1.0";
String registry = "registry";
Map<String, Object> headers = getDefaultParameters();
headers.put(DockerConstants.DOCKER_REPOSITORY, repository);
headers.put(DockerConstants.DOCKER_TAG, tag);
headers.put(DockerConstants.DOCKER_REGISTRY, registry);
template.sendBodyAndHeaders("direct:in", "", headers);
Mockito.verify(dockerClient, Mockito.times(1)).pullImageCmd(repository);
Mockito.verify(mockObject, Mockito.times(1)).withTag(Matchers.eq(tag));
Mockito.verify(mockObject, Mockito.times(1)).withRegistry(Matchers.eq(registry));
}
@Override
protected void setupMocks() {
Mockito.when(dockerClient.pullImageCmd(Matchers.anyString())).thenReturn(mockObject);
Mockito.when(mockObject.exec(Matchers.anyObject())).thenReturn(callback);
try {
Mockito.when(callback.awaitCompletion()).thenReturn(callback);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
@Override
protected DockerOperation getOperation() {
return DockerOperation.PULL_IMAGE;
}
}
| apache-2.0 |
irudyak/ignite | modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousWithTransformerRandomOperationsTest.java | 1381 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.cache.query.continuous;
import org.apache.ignite.cache.query.AbstractContinuousQuery;
import org.apache.ignite.cache.query.ContinuousQueryWithTransformer;
/**
* Test to check random continuous query operation for ContinuousQueryWithTransformer
*/
public class CacheContinuousWithTransformerRandomOperationsTest extends CacheContinuousQueryRandomOperationsTest {
/** {@inheritDoc} */
@Override protected <K, V> AbstractContinuousQuery<K, V> createQuery() {
return new ContinuousQueryWithTransformer<>();
}
}
| apache-2.0 |
mcgilman/nifi | nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/AbstractCommandGroup.java | 3404 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.toolkit.cli.impl.command;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.lang3.Validate;
import org.apache.nifi.toolkit.cli.api.Command;
import org.apache.nifi.toolkit.cli.api.CommandGroup;
import org.apache.nifi.toolkit.cli.api.Context;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.util.Collections;
import java.util.List;
/**
* Base class for CommandGroups to extend from.
*/
public abstract class AbstractCommandGroup implements CommandGroup {
private final String name;
private PrintStream output;
private List<Command> commands;
public AbstractCommandGroup(final String name) {
this.name = name;
Validate.notBlank(this.name);
}
@Override
public final void initialize(final Context context) {
Validate.notNull(context);
this.output = context.getOutput();
this.commands = Collections.unmodifiableList(createCommands());
this.commands.stream().forEach(c -> c.initialize(context));
}
/**
* Sub-classes override to provide the appropriate commands for the given group.
*
* @return the list of commands for this group
*/
protected abstract List<Command> createCommands();
@Override
public String getName() {
return this.name;
}
@Override
public List<Command> getCommands() {
return this.commands;
}
@Override
public void printUsage(final boolean verbose) {
if (verbose) {
final PrintWriter printWriter = new PrintWriter(output);
final int width = 80;
final HelpFormatter hf = new HelpFormatter();
hf.setWidth(width);
commands.stream().forEach(c -> {
hf.printWrapped(printWriter, width, "-------------------------------------------------------------------------------");
hf.printWrapped(printWriter, width, "COMMAND: " + getName() + " " + c.getName());
hf.printWrapped(printWriter, width, "");
hf.printWrapped(printWriter, width, "- " + c.getDescription());
hf.printWrapped(printWriter, width, "");
if (c.isReferencable()) {
hf.printWrapped(printWriter, width, "PRODUCES BACK-REFERENCES");
hf.printWrapped(printWriter, width, "");
}
});
printWriter.flush();
} else {
commands.stream().forEach(c -> output.println("\t" + getName() + " " + c.getName()));
}
output.flush();
}
}
| apache-2.0 |
rokn/Count_Words_2015 | testing/openjdk2/jdk/test/javax/swing/JScrollBar/7163696/Test7163696.java | 3641 | /*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 7163696
* @summary Tests that JScrollBar scrolls to the left
* @author Sergey Malenkov
*/
import sun.awt.SunToolkit;
import java.awt.Dimension;
import java.awt.Point;
import java.awt.Robot;
import java.awt.Toolkit;
import java.awt.event.InputEvent;
import javax.swing.JFrame;
import javax.swing.JScrollBar;
import javax.swing.SwingUtilities;
import javax.swing.UIManager;
import javax.swing.UIManager.LookAndFeelInfo;
public class Test7163696 implements Runnable {
private static final boolean AUTO = null != System.getProperty("test.src", null);
public static void main(String[] args) throws Exception {
new Test7163696().test();
}
private JScrollBar bar;
private void test() throws Exception {
Robot robot = new Robot();
SunToolkit toolkit = (SunToolkit) Toolkit.getDefaultToolkit();
for (LookAndFeelInfo info : UIManager.getInstalledLookAndFeels()) {
UIManager.setLookAndFeel(info.getClassName());
SwingUtilities.invokeAndWait(this);
toolkit.realSync(); // after creation
Thread.sleep(1000);
Point point = this.bar.getLocation();
SwingUtilities.convertPointToScreen(point, this.bar);
point.x += this.bar.getWidth() >> 2;
point.y += this.bar.getHeight() >> 1;
robot.mouseMove(point.x, point.y);
robot.mousePress(InputEvent.BUTTON1_MASK);
robot.mouseRelease(InputEvent.BUTTON1_MASK);
toolkit.realSync(); // before validation
Thread.sleep(1000);
SwingUtilities.invokeAndWait(this);
if (this.bar != null) {
this.bar = null; // allows to reuse the instance
if (AUTO) { // error reporting only for automatic testing
throw new Error("TEST FAILED");
}
}
}
}
public void run() {
if (this.bar == null) {
this.bar = new JScrollBar(JScrollBar.HORIZONTAL, 50, 10, 0, 100);
this.bar.setPreferredSize(new Dimension(400, 20));
JFrame frame = new JFrame();
frame.add(this.bar);
frame.pack();
frame.setVisible(true);
}
else if (40 != this.bar.getValue()) {
System.out.println("name = " + UIManager.getLookAndFeel().getName());
System.out.println("value = " + this.bar.getValue());
}
else {
SwingUtilities.getWindowAncestor(this.bar).dispose();
this.bar = null;
}
}
}
| mit |
riccardobl/lombok | src/core/lombok/eclipse/handlers/HandleSynchronized.java | 7256 | /*
* Copyright (C) 2009-2014 The Project Lombok Authors.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package lombok.eclipse.handlers;
import static lombok.core.handlers.HandlerUtil.*;
import static lombok.eclipse.handlers.EclipseHandlerUtil.*;
import java.lang.reflect.Modifier;
import lombok.ConfigurationKeys;
import lombok.Synchronized;
import lombok.core.AnnotationValues;
import lombok.core.HandlerPriority;
import lombok.core.AST.Kind;
import lombok.eclipse.DeferUntilPostDiet;
import lombok.eclipse.EclipseAnnotationHandler;
import lombok.eclipse.EclipseNode;
import org.eclipse.jdt.internal.compiler.ast.Annotation;
import org.eclipse.jdt.internal.compiler.ast.ArrayAllocationExpression;
import org.eclipse.jdt.internal.compiler.ast.Block;
import org.eclipse.jdt.internal.compiler.ast.Expression;
import org.eclipse.jdt.internal.compiler.ast.FieldDeclaration;
import org.eclipse.jdt.internal.compiler.ast.FieldReference;
import org.eclipse.jdt.internal.compiler.ast.MethodDeclaration;
import org.eclipse.jdt.internal.compiler.ast.QualifiedNameReference;
import org.eclipse.jdt.internal.compiler.ast.QualifiedTypeReference;
import org.eclipse.jdt.internal.compiler.ast.Statement;
import org.eclipse.jdt.internal.compiler.ast.SynchronizedStatement;
import org.eclipse.jdt.internal.compiler.ast.ThisReference;
import org.eclipse.jdt.internal.compiler.lookup.TypeConstants;
import org.mangosdk.spi.ProviderFor;
/**
* Handles the {@code lombok.Synchronized} annotation for eclipse.
*/
@ProviderFor(EclipseAnnotationHandler.class)
@DeferUntilPostDiet
@HandlerPriority(value = 1024) // 2^10; @NonNull must have run first, so that we wrap around the statements generated by it.
public class HandleSynchronized extends EclipseAnnotationHandler<Synchronized> {
private static final char[] INSTANCE_LOCK_NAME = "$lock".toCharArray();
private static final char[] STATIC_LOCK_NAME = "$LOCK".toCharArray();
@Override public void preHandle(AnnotationValues<Synchronized> annotation, Annotation source, EclipseNode annotationNode) {
EclipseNode methodNode = annotationNode.up();
if (methodNode == null || methodNode.getKind() != Kind.METHOD || !(methodNode.get() instanceof MethodDeclaration)) return;
MethodDeclaration method = (MethodDeclaration)methodNode.get();
if (method.isAbstract()) return;
createLockField(annotation, annotationNode, method.isStatic(), false);
}
public char[] createLockField(AnnotationValues<Synchronized> annotation, EclipseNode annotationNode, boolean isStatic, boolean reportErrors) {
char[] lockName = annotation.getInstance().value().toCharArray();
Annotation source = (Annotation) annotationNode.get();
boolean autoMake = false;
if (lockName.length == 0) {
autoMake = true;
lockName = isStatic ? STATIC_LOCK_NAME : INSTANCE_LOCK_NAME;
}
if (fieldExists(new String(lockName), annotationNode) == MemberExistsResult.NOT_EXISTS) {
if (!autoMake) {
if (reportErrors) annotationNode.addError(String.format("The field %s does not exist.", new String(lockName)));
return null;
}
FieldDeclaration fieldDecl = new FieldDeclaration(lockName, 0, -1);
setGeneratedBy(fieldDecl, source);
fieldDecl.declarationSourceEnd = -1;
fieldDecl.modifiers = (isStatic ? Modifier.STATIC : 0) | Modifier.FINAL | Modifier.PRIVATE;
//We use 'new Object[0];' because unlike 'new Object();', empty arrays *ARE* serializable!
ArrayAllocationExpression arrayAlloc = new ArrayAllocationExpression();
setGeneratedBy(arrayAlloc, source);
arrayAlloc.dimensions = new Expression[] { makeIntLiteral("0".toCharArray(), source) };
arrayAlloc.type = new QualifiedTypeReference(TypeConstants.JAVA_LANG_OBJECT, new long[] { 0, 0, 0 });
setGeneratedBy(arrayAlloc.type, source);
fieldDecl.type = new QualifiedTypeReference(TypeConstants.JAVA_LANG_OBJECT, new long[] { 0, 0, 0 });
setGeneratedBy(fieldDecl.type, source);
fieldDecl.initialization = arrayAlloc;
// TODO temporary workaround for issue 217. http://code.google.com/p/projectlombok/issues/detail?id=217
// injectFieldSuppressWarnings(annotationNode.up().up(), fieldDecl);
injectField(annotationNode.up().up(), fieldDecl);
}
return lockName;
}
@Override public void handle(AnnotationValues<Synchronized> annotation, Annotation source, EclipseNode annotationNode) {
handleFlagUsage(annotationNode, ConfigurationKeys.SYNCHRONIZED_FLAG_USAGE, "@Synchronized");
int p1 = source.sourceStart -1;
int p2 = source.sourceStart -2;
long pos = (((long)p1) << 32) | p2;
EclipseNode methodNode = annotationNode.up();
if (methodNode == null || methodNode.getKind() != Kind.METHOD || !(methodNode.get() instanceof MethodDeclaration)) {
annotationNode.addError("@Synchronized is legal only on methods.");
return;
}
MethodDeclaration method = (MethodDeclaration)methodNode.get();
if (method.isAbstract()) {
annotationNode.addError("@Synchronized is legal only on concrete methods.");
return;
}
char[] lockName = createLockField(annotation, annotationNode, method.isStatic(), true);
if (lockName == null) return;
if (method.statements == null) return;
Block block = new Block(0);
setGeneratedBy(block, source);
block.statements = method.statements;
// Positions for in-method generated nodes are special
block.sourceEnd = method.bodyEnd;
block.sourceStart = method.bodyStart;
Expression lockVariable;
if (method.isStatic()) lockVariable = new QualifiedNameReference(new char[][] {
methodNode.up().getName().toCharArray(), lockName }, new long[] { pos, pos }, p1, p2);
else {
lockVariable = new FieldReference(lockName, pos);
ThisReference thisReference = new ThisReference(p1, p2);
setGeneratedBy(thisReference, source);
((FieldReference)lockVariable).receiver = thisReference;
}
setGeneratedBy(lockVariable, source);
method.statements = new Statement[] {
new SynchronizedStatement(lockVariable, block, 0, 0)
};
// Positions for in-method generated nodes are special
method.statements[0].sourceEnd = method.bodyEnd;
method.statements[0].sourceStart = method.bodyStart;
setGeneratedBy(method.statements[0], source);
methodNode.rebuild();
}
}
| mit |
ouit0408/sakai | msgcntr/messageforums-api/src/java/org/sakaiproject/api/app/messageforums/AnonymousMapping.java | 598 | package org.sakaiproject.api.app.messageforums;
/**
* A user's anonymous ID must remain consistent across a site (so they can be graded consistently).
* But the anonymous ID should be different in other sites to prevent any way to deduce identities based on mutual enrollments.
* So, each row maps siteIds to userIds to anonIds
* @author bbailla2
*/
public interface AnonymousMapping
{
public String getSiteId();
public void setSiteId(String siteId);
public String getUserId();
public void setUserId(String userId);
public String getAnonId();
public void setAnonId(String anonId);
}
| apache-2.0 |
ebegoli/drill | exec/vector/src/main/java/org/apache/drill/exec/exception/OversizedAllocationException.java | 1867 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.drill.exec.exception;
import org.apache.drill.common.exceptions.DrillRuntimeException;
/**
* An exception that is used to signal that allocation request in bytes is greater than the maximum allowed by
* {@link org.apache.drill.exec.memory.BufferAllocator#buffer(int) allocator}.
*
* <p>Operators should handle this exception to split the batch and later resume the execution on the next
* {@link RecordBatch#next() iteration}.</p>
*
*/
public class OversizedAllocationException extends DrillRuntimeException {
public OversizedAllocationException() {
super();
}
public OversizedAllocationException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) {
super(message, cause, enableSuppression, writableStackTrace);
}
public OversizedAllocationException(String message, Throwable cause) {
super(message, cause);
}
public OversizedAllocationException(String message) {
super(message);
}
public OversizedAllocationException(Throwable cause) {
super(cause);
}
}
| apache-2.0 |
paplorinc/intellij-community | platform/editor-ui-api/src/com/intellij/ide/ui/ColorBlindnessMatrix.java | 4845 | package com.intellij.ide.ui;
import com.intellij.util.Matrix;
import com.intellij.util.Vector;
/**
* @author Sergey.Malenkov
*/
final class ColorBlindnessMatrix {
private static final Matrix CORRECTION = Matrix.createIdentity(3);
private static final Matrix RGB_LMS; // a matrix to convert a RGB color to the LMS space
private static final Matrix LMS_RGB; // a matrix to convert a LMS color to the RGB space
private static final Vector WHITE_BLUE;
private static final Vector WHITE_RED;
static {
// a matrix to convert a RGB color to the XYZ space
Matrix RGB_XYZ = Matrix.create(3,
0.4124, 0.2126, 0.0193,
0.3576, 0.7152, 0.1192,
0.1805, 0.0722, 0.9505);
// a matrix to convert an XYZ color to the LMS space
Matrix XYZ_LMS = Matrix.create(3,
0.7328, -0.7036, 0.0030,
0.4296, 1.6975, 0.0136,
-0.1624, 0.0061, 0.9834);
// create direct conversion from RGB to LMS and vice versa
RGB_LMS = RGB_XYZ.multiply(XYZ_LMS);
LMS_RGB = RGB_LMS.inverse();
// To simulate color blindness we remove the data lost by the absence of a cone.
// This cannot be done by just zeroing out the corresponding LMS component,
// because it would create a color outside of the RGB gammut. Instead,
// we project the color along the axis of the missing component
// onto a plane within the RGB gammut:
// - since the projection happens along the axis of the missing component,
// a color blind viewer perceives the projected color the same.
// - We use the plane defined by 3 points in LMS space: black, white and
// blue and red for protanopia/deuteranopia and tritanopia respectively.
Vector red = RGB_LMS.getRow(0); // LMS space red
Vector blue = RGB_LMS.getRow(2); // LMS space blue
Vector white = Vector.create(1, 1, 1).multiply(RGB_LMS); // LMS space white
// To find the planes we solve the a*L + b*M + c*S = 0 equation
// for the LMS values of the three known points. This equation is trivially solved,
// and has for solution the following cross-products:
WHITE_BLUE = cross(white, blue); // protanopia/deuteranopia
WHITE_RED = cross(white, red); // tritanopia
}
private static Vector cross(Vector left, Vector right) {
return Vector.create(
left.get(1) * right.get(2) - left.get(2) * right.get(1),
left.get(2) * right.get(0) - left.get(0) * right.get(2),
left.get(0) * right.get(1) - left.get(1) * right.get(0));
}
private static Matrix calculate(Matrix simulation, Matrix correction) {
// We will calculate the error between the color and the color
// viewed by a color blind user and "spread" this error onto the healthy cones.
// The correction matrix perform this last step and have been chosen arbitrarily.
Matrix matrix = simulation.multiply(RGB_LMS);
if (correction == null) correction = CORRECTION;
return LMS_RGB.multiply(matrix.plus(correction.multiply(RGB_LMS.minus(matrix))));
}
static final class Protanopia {
private static final double V1 = -WHITE_BLUE.get(1) / WHITE_BLUE.get(0);
private static final double V2 = -WHITE_BLUE.get(2) / WHITE_BLUE.get(0);
private static final Matrix SIMULATION = Matrix.create(3, 0, 0, 0, V1, 1, 0, V2, 0, 1);
private static final Matrix CORRECTION = Matrix.create(3, 1, .7, .7, 0, 1, 0, 0, 0, 1);
static final Matrix MATRIX = calculate(CORRECTION);
static Matrix calculate(Matrix correction) {
return ColorBlindnessMatrix.calculate(SIMULATION, correction);
}
}
static final class Deuteranopia {
private static final double V1 = -WHITE_BLUE.get(0) / WHITE_BLUE.get(1);
private static final double V2 = -WHITE_BLUE.get(2) / WHITE_BLUE.get(1);
private static final Matrix SIMULATION = Matrix.create(3, 1, V1, 0, 0, 0, 0, 0, V2, 1);
private static final Matrix CORRECTION = Matrix.create(3, 1, 0, 0, .7, 1, .7, 0, 0, 1);
static final Matrix MATRIX = calculate(CORRECTION);
static Matrix calculate(Matrix correction) {
return ColorBlindnessMatrix.calculate(SIMULATION, correction);
}
}
static final class Tritanopia {
private static final double V1 = -WHITE_RED.get(0) / WHITE_RED.get(2);
private static final double V2 = -WHITE_RED.get(1) / WHITE_RED.get(2);
private static final Matrix SIMULATION = Matrix.create(3, 1, 0, V1, 0, 1, V2, 0, 0, 0);
private static final Matrix CORRECTION = Matrix.create(3, 1, 0, 0, 0, 1, 0, .7, .7, 1);
static final Matrix MATRIX = calculate(CORRECTION);
static Matrix calculate(Matrix correction) {
return ColorBlindnessMatrix.calculate(SIMULATION, correction);
}
}
}
| apache-2.0 |
yweijiang/WinObjC | deps/3rdparty/cassowary-0.60/java/ClEditConstraint.java | 932 | // $Id: ClEditConstraint.java,v 1.10 1999/04/20 00:26:28 gjb Exp $
//
// Cassowary Incremental Constraint Solver
// Original Smalltalk Implementation by Alan Borning
// This Java Implementation by Greg J. Badros, <gjb@cs.washington.edu>
// http://www.cs.washington.edu/homes/gjb
// (C) 1998, 1999 Greg J. Badros and Alan Borning
// See ../LICENSE for legal details regarding this software
//
// ClEditConstraint
//
package EDU.Washington.grad.gjb.cassowary;
public class ClEditConstraint extends ClEditOrStayConstraint
{
public ClEditConstraint(ClVariable clv,
ClStrength strength,
double weight)
{ super(clv,strength,weight); }
public ClEditConstraint(ClVariable clv,
ClStrength strength)
{ super(clv,strength); }
public ClEditConstraint(ClVariable clv)
{ super(clv); }
public boolean isEditConstraint()
{ return true; }
public String toString()
{ return "edit" + super.toString(); }
}
| mit |
steveloughran/hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/NavBlock.java | 1430 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp.view;
import org.apache.hadoop.classification.InterfaceAudience;
@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
public class NavBlock extends HtmlBlock {
@Override protected void render(Block html) {
html.
div("#nav").
h3("Heading1").
ul().
li("Item 1").
li("Item 2").
li("...").__().
h3("Tools").
ul().
li().a("/conf", "Configuration").__().
li().a("/stacks", "Thread dump").__().
li().a("/logs", "Logs").__().
li().a("/jmx?qry=Hadoop:*", "Metrics").__().__().__();
}
}
| apache-2.0 |
mahaliachante/aws-sdk-java | aws-java-sdk-elasticloadbalancing/src/main/java/com/amazonaws/services/elasticloadbalancing/model/transform/CrossZoneLoadBalancingStaxUnmarshaller.java | 2477 | /*
* Copyright 2010-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.elasticloadbalancing.model.transform;
import java.util.Map;
import java.util.Map.Entry;
import javax.xml.stream.events.XMLEvent;
import com.amazonaws.services.elasticloadbalancing.model.*;
import com.amazonaws.transform.Unmarshaller;
import com.amazonaws.transform.MapEntry;
import com.amazonaws.transform.StaxUnmarshallerContext;
import com.amazonaws.transform.SimpleTypeStaxUnmarshallers.*;
/**
* Cross Zone Load Balancing StAX Unmarshaller
*/
public class CrossZoneLoadBalancingStaxUnmarshaller implements Unmarshaller<CrossZoneLoadBalancing, StaxUnmarshallerContext> {
public CrossZoneLoadBalancing unmarshall(StaxUnmarshallerContext context) throws Exception {
CrossZoneLoadBalancing crossZoneLoadBalancing = new CrossZoneLoadBalancing();
int originalDepth = context.getCurrentDepth();
int targetDepth = originalDepth + 1;
if (context.isStartOfDocument()) targetDepth += 2;
while (true) {
XMLEvent xmlEvent = context.nextEvent();
if (xmlEvent.isEndDocument()) return crossZoneLoadBalancing;
if (xmlEvent.isAttribute() || xmlEvent.isStartElement()) {
if (context.testExpression("Enabled", targetDepth)) {
crossZoneLoadBalancing.setEnabled(BooleanStaxUnmarshaller.getInstance().unmarshall(context));
continue;
}
} else if (xmlEvent.isEndElement()) {
if (context.getCurrentDepth() < originalDepth) {
return crossZoneLoadBalancing;
}
}
}
}
private static CrossZoneLoadBalancingStaxUnmarshaller instance;
public static CrossZoneLoadBalancingStaxUnmarshaller getInstance() {
if (instance == null) instance = new CrossZoneLoadBalancingStaxUnmarshaller();
return instance;
}
}
| apache-2.0 |
GlenRSmith/elasticsearch | x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobWriteAbortedException.java | 460 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
package org.elasticsearch.repositories.blobstore.testkit;
public class BlobWriteAbortedException extends RuntimeException {
public BlobWriteAbortedException() {
super("write aborted");
}
}
| apache-2.0 |
dyk/liquibase | liquibase-core/src/main/java/liquibase/diff/core/StandardDiffGenerator.java | 8853 | package liquibase.diff.core;
import liquibase.CatalogAndSchema;
import liquibase.database.Database;
import liquibase.diff.*;
import liquibase.diff.compare.CompareControl;
import liquibase.exception.DatabaseException;
import liquibase.exception.UnexpectedLiquibaseException;
import liquibase.snapshot.DatabaseSnapshot;
import liquibase.snapshot.EmptyDatabaseSnapshot;
import liquibase.snapshot.InvalidExampleException;
import liquibase.snapshot.JdbcDatabaseSnapshot;
import liquibase.structure.DatabaseObject;
import liquibase.diff.compare.DatabaseObjectComparatorFactory;
import java.util.Set;
public class StandardDiffGenerator implements DiffGenerator {
@Override
public int getPriority() {
return PRIORITY_DEFAULT;
}
@Override
public boolean supports(Database referenceDatabase, Database comparisonDatabase) {
return true;
}
@Override
public DiffResult compare(DatabaseSnapshot referenceSnapshot, DatabaseSnapshot comparisonSnapshot, CompareControl compareControl) throws DatabaseException {
if (comparisonSnapshot == null) {
try {
comparisonSnapshot = new EmptyDatabaseSnapshot(referenceSnapshot.getDatabase()); //, compareControl.toSnapshotControl(CompareControl.DatabaseRole.REFERENCE));
} catch (InvalidExampleException e) {
throw new UnexpectedLiquibaseException(e);
}
}
DiffResult diffResult = new DiffResult(referenceSnapshot, comparisonSnapshot, compareControl);
checkVersionInfo(referenceSnapshot, comparisonSnapshot, diffResult);
Set<Class<? extends DatabaseObject>> typesToCompare = compareControl.getComparedTypes();
typesToCompare.retainAll(referenceSnapshot.getSnapshotControl().getTypesToInclude());
typesToCompare.retainAll(comparisonSnapshot.getSnapshotControl().getTypesToInclude());
for (Class<? extends DatabaseObject> typeToCompare : typesToCompare) {
compareObjectType(typeToCompare, referenceSnapshot, comparisonSnapshot, diffResult);
}
// // Hack: Sometimes Indexes or Unique Constraints with multiple columns get added twice (1 for each column),
// // so we're combining them back to a single Index or Unique Constraint here.
// removeDuplicateIndexes( diffResult.getMissingIndexes() );
// removeDuplicateIndexes( diffResult.getUnexpectedIndexes() );
// removeDuplicateUniqueConstraints( diffResult.getMissingUniqueConstraints() );
// removeDuplicateUniqueConstraints( diffResult.getUnexpectedUniqueConstraints() );
return diffResult;
}
protected void checkVersionInfo(DatabaseSnapshot referenceSnapshot, DatabaseSnapshot comparisonSnapshot, DiffResult diffResult) throws DatabaseException {
if (comparisonSnapshot != null && comparisonSnapshot.getDatabase() != null) {
diffResult.setProductNameDiff(new StringDiff(referenceSnapshot.getDatabase().getDatabaseProductName(), comparisonSnapshot.getDatabase().getDatabaseProductName()));
diffResult.setProductVersionDiff(new StringDiff(referenceSnapshot.getDatabase().getDatabaseProductVersion(), comparisonSnapshot.getDatabase().getDatabaseProductVersion()));
}
}
protected <T extends DatabaseObject> void compareObjectType(Class<T> type, DatabaseSnapshot referenceSnapshot, DatabaseSnapshot comparisonSnapshot, DiffResult diffResult) {
CompareControl.SchemaComparison[] schemaComparisons = diffResult.getCompareControl().getSchemaComparisons();
if (schemaComparisons != null) {
for (CompareControl.SchemaComparison schemaComparison : schemaComparisons) {
for (T referenceObject : referenceSnapshot.get(type)) {
// if (referenceObject instanceof Table && referenceSnapshot.getDatabase().isLiquibaseTable(referenceSchema, referenceObject.getName())) {
// continue;
// }
T comparisonObject = comparisonSnapshot.get(referenceObject);
if (comparisonObject == null) {
diffResult.addMissingObject(referenceObject);
} else {
ObjectDifferences differences = DatabaseObjectComparatorFactory.getInstance().findDifferences(referenceObject, comparisonObject, comparisonSnapshot.getDatabase(), diffResult.getCompareControl());
if (differences.hasDifferences()) {
diffResult.addChangedObject(referenceObject, differences);
}
}
}
//
for (T comparisonObject : comparisonSnapshot.get(type)) {
// if (targetObject instanceof Table && comparisonSnapshot.getDatabase().isLiquibaseTable(comparisonSchema, targetObject.getName())) {
// continue;
// }
if (referenceSnapshot.get(comparisonObject) == null) {
diffResult.addUnexpectedObject(comparisonObject);
}
// }
}
}
//todo: add logic for when container is missing or unexpected also
}
// /**
// * Removes duplicate Indexes from the DiffResult object.
// *
// * @param indexes [IN/OUT] - A set of Indexes to be updated.
// */
// private void removeDuplicateIndexes( SortedSet<Index> indexes )
// {
// SortedSet<Index> combinedIndexes = new TreeSet<Index>();
// SortedSet<Index> indexesToRemove = new TreeSet<Index>();
//
// // Find Indexes with the same name, copy their columns into the first one,
// // then remove the duplicate Indexes.
// for ( Index idx1 : indexes )
// {
// if ( !combinedIndexes.contains( idx1 ) )
// {
// for ( Index idx2 : indexes.tailSet( idx1 ) )
// {
// if ( idx1 == idx2 ) {
// continue;
// }
//
// String index1Name = StringUtils.trimToEmpty(idx1.getName());
// String index2Name = StringUtils.trimToEmpty(idx2.getName());
// if ( index1Name.equalsIgnoreCase(index2Name)
// && idx1.getTable().getName().equalsIgnoreCase( idx2.getTable().getName() ) )
// {
// for ( String column : idx2.getColumns() )
// {
// if ( !idx1.getColumns().contains( column ) ) {
// idx1.getColumns().add( column );
// }
// }
//
// indexesToRemove.add( idx2 );
// }
// }
//
// combinedIndexes.add( idx1 );
// }
// }
//
// indexes.removeAll( indexesToRemove );
// }
//
// /**
// * Removes duplicate Unique Constraints from the DiffResult object.
// *
// * @param uniqueConstraints [IN/OUT] - A set of Unique Constraints to be updated.
// */
// private void removeDuplicateUniqueConstraints( SortedSet<UniqueConstraint> uniqueConstraints ) {
// SortedSet<UniqueConstraint> combinedConstraints = new TreeSet<UniqueConstraint>();
// SortedSet<UniqueConstraint> constraintsToRemove = new TreeSet<UniqueConstraint>();
//
// // Find UniqueConstraints with the same name, copy their columns into the first one,
// // then remove the duplicate UniqueConstraints.
// for ( UniqueConstraint uc1 : uniqueConstraints )
// {
// if ( !combinedConstraints.contains( uc1 ) )
// {
// for ( UniqueConstraint uc2 : uniqueConstraints.tailSet( uc1 ) )
// {
// if ( uc1 == uc2 ) {
// continue;
// }
//
// if ( uc1.getName().equalsIgnoreCase( uc2.getName() )
// && uc1.getTable().getName().equalsIgnoreCase( uc2.getTable().getName() ) )
// {
// for ( String column : uc2.getColumns() )
// {
// if ( !uc1.getColumns().contains( column ) ) {
// uc1.getColumns().add( column );
// }
// }
//
// constraintsToRemove.add( uc2 );
// }
// }
//
// combinedConstraints.add( uc1 );
// }
// }
//
// uniqueConstraints.removeAll( constraintsToRemove );
// }
}
}
| apache-2.0 |
nmelnick/aws-sdk-java | aws-java-sdk-cloudwatchmetrics/src/main/java/com/amazonaws/metrics/internal/cloudwatch/MetricUploaderThread.java | 3500 | /*
* Copyright 2010-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.metrics.internal.cloudwatch;
import java.util.concurrent.BlockingQueue;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.cloudwatch.AmazonCloudWatchClient;
import com.amazonaws.services.cloudwatch.model.MetricDatum;
import com.amazonaws.services.cloudwatch.model.PutMetricDataRequest;
import com.amazonaws.util.VersionInfoUtils;
/**
* A internal daemon thread used to upload request metrics to Amazon CloudWatch.
*/
class MetricUploaderThread extends Thread {
private static final String USER_AGENT = MetricUploaderThread.class.getName() + "/" + VersionInfoUtils.getVersion();
private static final String THREAD_NAME = "java-sdk-metric-uploader";
private volatile boolean cancelled;
private final AmazonCloudWatchClient cloudwatchClient;
private final Log log = LogFactory.getLog(getClass());
private final BlockingRequestBuilder qIterator;
MetricUploaderThread(CloudWatchMetricConfig config,
BlockingQueue<MetricDatum> queue) {
this(config,
queue,
config.getCredentialsProvider() == null
? new AmazonCloudWatchClient()
: new AmazonCloudWatchClient(config.getCredentialsProvider()));
}
MetricUploaderThread(CloudWatchMetricConfig config,
BlockingQueue<MetricDatum> queue,
AmazonCloudWatchClient client)
{
super(THREAD_NAME);
if (config == null || queue == null) {
throw new IllegalArgumentException();
}
this.cloudwatchClient = client;
this.qIterator = new BlockingRequestBuilder(config, queue);
String endpoint = config.getCloudWatchEndPoint();
if (endpoint != null)
cloudwatchClient.setEndpoint(endpoint);
this.setPriority(MIN_PRIORITY);
setDaemon(true);
}
@Override
public void run() {
while (!cancelled) {
try {
Iterable<PutMetricDataRequest> requests = qIterator.nextUploadUnits();
for (PutMetricDataRequest req: requests) {
appendUserAgent(req);
log.debug(req);
cloudwatchClient.putMetricData(req);
Thread.yield();
}
} catch(InterruptedException e) {
if (!cancelled) {
log.debug("Unexpected interruption ignored");
}
} catch(Throwable t) {
log.warn("Unexpected condition; soldier on", t);
Thread.yield();
}
}
}
void cancel() { cancelled = true; }
public AmazonCloudWatchClient getCloudwatchClient() {
return cloudwatchClient;
}
private void appendUserAgent(PutMetricDataRequest request) {
request.getRequestClientOptions().appendUserAgent(USER_AGENT);
}
}
| apache-2.0 |
idea4bsd/idea4bsd | platform/xdebugger-impl/src/com/intellij/xdebugger/impl/actions/UnmuteOnStopAction.java | 1311 | /*
* Copyright 2000-2015 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.xdebugger.impl.actions;
import com.intellij.openapi.actionSystem.AnActionEvent;
import com.intellij.openapi.actionSystem.ToggleAction;
import com.intellij.openapi.project.DumbAware;
import com.intellij.xdebugger.impl.settings.XDebuggerSettingManagerImpl;
/**
* @author egor
*/
public class UnmuteOnStopAction extends ToggleAction implements DumbAware {
@Override
public boolean isSelected(AnActionEvent e) {
return XDebuggerSettingManagerImpl.getInstanceImpl().getGeneralSettings().isUnmuteOnStop();
}
@Override
public void setSelected(AnActionEvent e, boolean state) {
XDebuggerSettingManagerImpl.getInstanceImpl().getGeneralSettings().setUnmuteOnStop(state);
}
} | apache-2.0 |
HappyYang/stetho | stetho-sample/src/main/java/com/facebook/stetho/sample/Constants.java | 424 | /*
* Copyright (c) 2014-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/
package com.facebook.stetho.sample;
public class Constants {
public static final String TAG = "StethoSample";
}
| bsd-3-clause |
OpenCollabZA/sakai | providers/cm-authz-provider/src/java/org/sakaiproject/coursemanagement/impl/provider/CourseManagementGroupProvider.java | 10729 | /**********************************************************************************
* $URL$
* $Id$
***********************************************************************************
*
* Copyright (c) 2006, 2007, 2008 The Sakai Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ECL-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************************/
package org.sakaiproject.coursemanagement.impl.provider;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.regex.Pattern;
import lombok.extern.slf4j.Slf4j;
import org.sakaiproject.authz.api.GroupProvider;
import org.sakaiproject.coursemanagement.api.CourseManagementService;
import org.sakaiproject.coursemanagement.api.Section;
import org.sakaiproject.coursemanagement.api.exception.IdNotFoundException;
/**
* A Sakai GroupProvider that utilizes the CourseManagementService and the
* CmMappingService to supply authz data to Sakai. This implementation uses
* a list of RoleResolvers, which can be used to resolve a user's role in a section
* based on memberships in parent objects such as CourseSets.
*/
@Slf4j
public class CourseManagementGroupProvider implements GroupProvider {
// Configuration keys.
public static final String SITE_ROLE_RESOLUTION_ORDER = "siteRoleResolutionOrder";
// Handle packing and unpacking safely.
public static String EID_SEPARATOR = "+";
public static String QUOTED_SEPARATOR = "/+";
public static Pattern EID_SEPARATOR_PATTERN = Pattern.compile("(?<!/)\\+");
/** The course management service */
CourseManagementService cmService;
/** The role resolvers to use when looking for CM roles in the hierarchy*/
List<RoleResolver> roleResolvers;
/** The ordered list of role preferences. Roles earlier in the list are preferred to those later in the list. */
List<String> rolePreferences;
/** Map to support external service configuration */
Map<String, Object> configuration;
// GroupProvider methods
/**
* This method is not longer in use in Sakai. It should be removed from the
* GroupProvider interface.
*/
public String getRole(String id, String user) {
log.error("\n------------------------------------------------------------------\n");
log.error("THIS METHOD IS NEVER CALLED IN SAKAI. WHAT HAPPENED???");
log.error("\n------------------------------------------------------------------\n");
return null;
}
/**
* Provides a Map of user IDs to (Sakai) roles for the Course Section EIDs specified
* in the input AuthzGroup provider string.
*/
public Map<String, String> getUserRolesForGroup(String id) {
if(log.isDebugEnabled()) log.debug("------------------CMGP.getUserRolesForGroup(" + id + ")");
Map<String, String> userRoleMap = new HashMap<String, String>();
String[] sectionEids = unpackId(id);
if(log.isDebugEnabled()) log.debug(id + " is mapped to " + sectionEids.length + " sections");
for (RoleResolver rr : roleResolvers) {
for(int i=0; i < sectionEids.length; i++) {
String sectionEid = sectionEids[i];
Section section;
try {
section = cmService.getSection(sectionEid);
} catch (IdNotFoundException e) {
if (log.isWarnEnabled()) log.warn("Unable to find CM section " + sectionEid);
continue;
}
if(log.isDebugEnabled()) log.debug("Looking for roles in section " + sectionEid);
Map<String, String> rrUserRoleMap = rr.getUserRoles(cmService, section);
for(Iterator<Entry<String, String>> rrRoleIter = rrUserRoleMap.entrySet().iterator(); rrRoleIter.hasNext();) {
Entry<String, String> entry = rrRoleIter.next();
String userEid = entry.getKey();
String existingRole = userRoleMap.get(userEid);
String rrRole = entry.getValue();
// The Role Resolver has found no role for this user
if(rrRole == null) {
continue;
}
// Add or replace the role in the map if this is a more preferred role than the previous role
if(existingRole == null) {
if(log.isDebugEnabled()) log.debug("Adding "+ userEid + " to userRoleMap with role=" + rrRole);
userRoleMap.put(userEid, rrRole);
} else if(preferredRole(existingRole, rrRole).equals(rrRole)){
if(log.isDebugEnabled()) log.debug("Changing "+ userEid + "'s role in userRoleMap from " + existingRole + " to " + rrRole + " for section " + sectionEid);
userRoleMap.put(userEid, rrRole);
}
}
}
}
if(log.isDebugEnabled()) log.debug("_____________getUserRolesForGroup=" + userRoleMap);
return userRoleMap;
}
public Map<String, String> getGroupRolesForUser(String userEid, String academicSessionEid) {
log.debug("------------------CMGP.getGroupRolesForUser({})", userEid);
Map<String, String> groupRoleMap = new HashMap<>();
for(RoleResolver rr : roleResolvers) {
// note that some implementations of RoleResolver may not implement this method call signature.
Map<String, String> rrGroupRoleMap = rr.getGroupRoles(cmService, userEid, academicSessionEid);
log.debug("Found {} groups for {} from resolver {}", rrGroupRoleMap.size(), userEid, rr.getClass().getName());
// Only add the section eids if they aren't already in the map or if the new role has a higher preference.
for(String sectionEid : rrGroupRoleMap.keySet()) {
String existingRole = groupRoleMap.get(sectionEid);
String rrRole = rrGroupRoleMap.get(sectionEid);
// The Role Resolver has found no role for this section
if (rrRole == null) {
continue;
}
if (existingRole == null) {
log.debug("Adding {} to groupRoleMap with sakai role {} for user {}", sectionEid, rrRole, userEid);
groupRoleMap.put(sectionEid, rrRole);
} else if (preferredRole(existingRole, rrRole).equals(rrRole)) {
log.debug("Changing {}'s role in groupRoleMap from {} to {} for section {}", userEid, existingRole, rrRole, sectionEid);
groupRoleMap.put(sectionEid, rrRole);
}
}
}
log.debug("______________getGroupRolesForUser={}", groupRoleMap);
return groupRoleMap;
}
/**
* Provides a map of Course Section EIDs (which can be used as AuthzGroup provider IDs)
* to Sakai roles for a given user.
*/
public Map<String, String> getGroupRolesForUser(String userEid) {
if(log.isDebugEnabled()) log.debug("------------------CMGP.getGroupRolesForUser(" + userEid + ")");
Map<String, String> groupRoleMap = new HashMap<String, String>();
for(RoleResolver rr : roleResolvers) {
Map<String, String> rrGroupRoleMap = rr.getGroupRoles(cmService, userEid);
if(log.isDebugEnabled()) log.debug("Found " + rrGroupRoleMap.size() + " groups for " + userEid + " from resolver " + rr.getClass().getName());
// Only add the section eids if they aren't already in the map or if the new role has a higher preference.
for(Iterator<Entry<String, String>> rrRoleIter = rrGroupRoleMap.entrySet().iterator(); rrRoleIter.hasNext();) {
Entry<String, String> entry = rrRoleIter.next();
String sectionEid = entry.getKey();
String existingRole = groupRoleMap.get(sectionEid);
String rrRole = entry.getValue();
// The Role Resolver has found no role for this section
if(rrRole == null) {
continue;
}
if(existingRole == null) {
if(log.isDebugEnabled()) log.debug("Adding " + sectionEid + " to groupRoleMap with sakai role" + rrRole + " for user " + userEid);
groupRoleMap.put(sectionEid, rrRole);
} else if(preferredRole(existingRole, rrRole).equals(rrRole)){
if(log.isDebugEnabled()) log.debug("Changing "+ userEid + "'s role in groupRoleMap from " + existingRole + " to " + rrRole + " for section " + sectionEid);
groupRoleMap.put(sectionEid, rrRole);
}
}
}
if(log.isDebugEnabled()) log.debug("______________getGroupRolesForUser=" + groupRoleMap);
return groupRoleMap;
}
/**
* {@inheritDoc}
*/
public String packId(String[] ids) {
if(ids == null || ids.length == 0) {
return null;
}
if(ids.length == 1) {
return ids[0];
}
StringBuilder sb = new StringBuilder();
for(int i=0; i<ids.length; i++) {
// First, escape any embedded separator characters.
String eid = (ids[i]).replace(EID_SEPARATOR, QUOTED_SEPARATOR);
sb.append(eid);
if(i < ids.length - 1) {
sb.append(EID_SEPARATOR);
}
}
return sb.toString();
}
public String[] unpackId(String id) {
if(id == null) {
return new String[0];
}
String[] ids = EID_SEPARATOR_PATTERN.split(id);
// Unescape any embedded separator characters.
for(int i=0; i<ids.length; i++) {
String eid = (ids[i]).replace(QUOTED_SEPARATOR, EID_SEPARATOR);
ids[i] = eid;
}
return ids;
}
// Utility methods
public void init() {
if(log.isInfoEnabled()) log.info("initializing " + this.getClass().getName());
/**
* Use the externally supplied configuration map, if any.
*/
if (configuration != null) {
if (rolePreferences != null) {
log.warn("Both a provider configuration object and direct role mappings have been defined. " +
"The configuration object will take precedence.");
}
setRolePreferences((List<String>)configuration.get(SITE_ROLE_RESOLUTION_ORDER));
}
}
public void destroy() {
if(log.isInfoEnabled()) log.info("destroying " + this.getClass().getName());
}
// Dependency injection
public void setCmService(CourseManagementService cmService) {
this.cmService = cmService;
}
public void setRoleResolvers(List<RoleResolver> roleResolvers) {
this.roleResolvers = roleResolvers;
}
public String preferredRole(String one, String other) {
int oneIndex = rolePreferences.indexOf(one);
int otherIndex = rolePreferences.indexOf(other);
if(otherIndex == -1) {
return one;
}
if(oneIndex == -1) {
return other;
}
return oneIndex < otherIndex ? one : other;
}
public void setRolePreferences(List<String> rolePreferences) {
this.rolePreferences = rolePreferences;
}
public void setConfiguration(Map<String, Object> configuration) {
this.configuration = configuration;
}
public boolean groupExists(String groupId) {
if (cmService.isSectionDefined(groupId))
return true;
return false;
}
}
| apache-2.0 |
mahaliachante/aws-sdk-java | src/samples/AwsFlowFramework/src/com/amazonaws/services/simpleworkflow/flow/examples/fileprocessing/SimpleStoreActivities.java | 2420 | /*
* Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.simpleworkflow.flow.examples.fileprocessing;
import com.amazonaws.services.simpleworkflow.flow.annotations.Activities;
import com.amazonaws.services.simpleworkflow.flow.annotations.ActivityRegistrationOptions;
import com.amazonaws.services.simpleworkflow.flow.annotations.ExponentialRetry;
@Activities(version = "1.0")
@ActivityRegistrationOptions(defaultTaskScheduleToStartTimeoutSeconds = 60, defaultTaskStartToCloseTimeoutSeconds = 120)
public interface SimpleStoreActivities {
/**
*
* @param localName
* Name of the file to upload from temporary directory
* @param remoteName
* Name of the file to use on S3 bucket after upload
* @param fromBox
* Machine name which has the file that needs to be uploaded
* @return
*/
@ExponentialRetry(initialRetryIntervalSeconds = 10, maximumAttempts = 10)
public void upload(String bucketName, String localName, String targetName);
/**
*
* @param remoteName
* Name of the file to download from S3 bucket
* @param localName
* Name of the file used locally after download
* @param toBox
* This is an output parameter here.
* Used to communicate the name of the box that runs download activity
*/
@ExponentialRetry(initialRetryIntervalSeconds = 10, maximumAttempts = 10)
public String download(String bucketName, String remoteName, String localName) throws Exception;
/**
*
* @param fileName
* Name of file to delete from temporary folder
* @param machineName
* Machine which has the file locally
* @return
*/
@ExponentialRetry(initialRetryIntervalSeconds=10)
public void deleteLocalFile(String fileName);
}
| apache-2.0 |
shaotuanchen/sunflower_exp | tools/source/gcc-4.2.4/libjava/classpath/java/rmi/server/RemoteServer.java | 3499 | /* RemoteServer.java --
Copyright (c) 1996, 1997, 1998, 1999, 2004, 2006
Free Software Foundation, Inc.
This file is part of GNU Classpath.
GNU Classpath is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GNU Classpath is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU Classpath; see the file COPYING. If not, write to the
Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA.
Linking this library statically or dynamically with other modules is
making a combined work based on this library. Thus, the terms and
conditions of the GNU General Public License cover the whole
combination.
As a special exception, the copyright holders of this library give you
permission to link this library with independent modules to produce an
executable, regardless of the license terms of these independent
modules, and to copy and distribute the resulting executable under
terms of your choice, provided that you also meet, for each linked
independent module, the terms and conditions of the license of that
module. An independent module is a module which is not derived from
or based on this library. If you modify this library, you may extend
this exception to your version of the library, but you are not
obligated to do so. If you do not wish to do so, delete this
exception statement from your version. */
package java.rmi.server;
import gnu.java.rmi.server.RMIIncomingThread;
import java.io.OutputStream;
import java.io.PrintStream;
/**
* A common superclass for the server implementations.
*/
public abstract class RemoteServer
extends RemoteObject
{
private static final long serialVersionUID = - 4100238210092549637L;
/**
* Does nothing, delegates to super().
*/
protected RemoteServer()
{
super();
}
/**
* Does nothing, delegates to super(ref).
*/
protected RemoteServer(RemoteRef ref)
{
super(ref);
}
/**
* Get the host of the calling client. The current thread must be an instance
* of the {@link RMIIncomingThread}.
*
* @return the client host address
*
* @throws ServerNotActiveException if the current thread is not an instance
* of the RMIIncomingThread.
*/
public static String getClientHost() throws ServerNotActiveException
{
Thread currThread = Thread.currentThread();
if (currThread instanceof RMIIncomingThread)
{
RMIIncomingThread incomingThread = (RMIIncomingThread) currThread;
return incomingThread.getClientHost();
}
else
{
throw new ServerNotActiveException(
"Unknown client host - current thread not instance of 'RMIIncomingThread'");
}
}
/**
* Set the stream for logging RMI calls.
*
* @param out the stream to set or null to turn the logging off.
*/
public static void setLog(OutputStream out)
{
throw new Error("Not implemented");
}
/**
* Get the stream for logging RMI calls.
*
* @return the associated stream.
*/
public static PrintStream getLog()
{
throw new Error("Not implemented");
}
}
| bsd-3-clause |
openweave/openweave-core | third_party/android/platform-libcore/android-platform-libcore/dom/src/test/java/org/w3c/domts/level2/core/hasAttributeNS03.java | 3111 |
/*
This Java source file was generated by test-to-java.xsl
and is a derived work from the source document.
The source document contained the following notice:
Copyright (c) 2001-2004 World Wide Web Consortium,
(Massachusetts Institute of Technology, Institut National de
Recherche en Informatique et en Automatique, Keio University). All
Rights Reserved. This program is distributed under the W3C's Software
Intellectual Property License. This program is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE.
See W3C License http://www.w3.org/Consortium/Legal/ for more details.
*/
package org.w3c.domts.level2.core;
import org.w3c.dom.*;
import org.w3c.domts.DOMTestCase;
import org.w3c.domts.DOMTestDocumentBuilderFactory;
/**
* The "hasAttributeNS()" method for an Element should
* return false if the element does not have an attribute with the given local name
* and/or namespace URI specified on this element or does not have a default value.
* Retrieve the first "emp:address" element.
* The boolean value returned by the "hasAttributeNS()" should be false
* since the attribute does not have a default value.
* @author NIST
* @author Mary Brady
* @see <a href="http://www.w3.org/TR/DOM-Level-2-Core/core#ID-ElHasAttrNS">http://www.w3.org/TR/DOM-Level-2-Core/core#ID-ElHasAttrNS</a>
*/
public final class hasAttributeNS03 extends DOMTestCase {
/**
* Constructor.
* @param factory document factory, may not be null
* @throws org.w3c.domts.DOMTestIncompatibleException Thrown if test is not compatible with parser configuration
*/
public hasAttributeNS03(final DOMTestDocumentBuilderFactory factory) throws org.w3c.domts.DOMTestIncompatibleException {
super(factory);
//
// check if loaded documents are supported for content type
//
String contentType = getContentType();
preload(contentType, "staffNS", false);
}
/**
* Runs the test case.
* @throws Throwable Any uncaught exception causes test to fail
*/
public void runTest() throws Throwable {
String localName = "blank";
String namespaceURI = "http://www.nist.gov";
Document doc;
NodeList elementList;
Element testNode;
boolean state;
doc = (Document) load("staffNS", false);
elementList = doc.getElementsByTagName("emp:address");
testNode = (Element) elementList.item(0);
assertNotNull("empAddrNotNull", testNode);
state = testNode.hasAttributeNS(namespaceURI, localName);
assertFalse("throw_False", state);
}
/**
* Gets URI that identifies the test.
* @return uri identifier of test
*/
public String getTargetURI() {
return "http://www.w3.org/2001/DOM-Test-Suite/level2/core/hasAttributeNS03";
}
/**
* Runs this test from the command line.
* @param args command line arguments
*/
public static void main(final String[] args) {
DOMTestCase.doMain(hasAttributeNS03.class, args);
}
}
| apache-2.0 |
qwerty4030/elasticsearch | server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FragmentBuilderHelper.java | 4551 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.fetch.subphase.highlight;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Field;
import org.apache.lucene.search.vectorhighlight.FastVectorHighlighter;
import org.apache.lucene.search.vectorhighlight.FieldFragList.WeightedFragInfo;
import org.apache.lucene.search.vectorhighlight.FieldFragList.WeightedFragInfo.SubInfo;
import org.apache.lucene.search.vectorhighlight.FragmentsBuilder;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.index.analysis.CustomAnalyzer;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.analysis.TokenFilterFactory;
import org.elasticsearch.index.mapper.FieldMapper;
import java.util.Comparator;
import java.util.List;
/**
* Simple helper class for {@link FastVectorHighlighter} {@link FragmentsBuilder} implementations.
*/
public final class FragmentBuilderHelper {
private FragmentBuilderHelper() {
// no instance
}
/**
* Fixes problems with broken analysis chains if positions and offsets are messed up that can lead to
* {@link StringIndexOutOfBoundsException} in the {@link FastVectorHighlighter}
*/
public static WeightedFragInfo fixWeightedFragInfo(FieldMapper mapper, Field[] values, WeightedFragInfo fragInfo) {
assert fragInfo != null : "FragInfo must not be null";
assert mapper.fieldType().name().equals(values[0].name()) : "Expected FieldMapper for field " + values[0].name();
if (!fragInfo.getSubInfos().isEmpty() && containsBrokenAnalysis(mapper.fieldType().indexAnalyzer())) {
/* This is a special case where broken analysis like WDF is used for term-vector creation at index-time
* which can potentially mess up the offsets. To prevent a SAIIOBException we need to resort
* the fragments based on their offsets rather than using soley the positions as it is done in
* the FastVectorHighlighter. Yet, this is really a lucene problem and should be fixed in lucene rather
* than in this hack... aka. "we are are working on in!" */
final List<SubInfo> subInfos = fragInfo.getSubInfos();
CollectionUtil.introSort(subInfos, new Comparator<SubInfo>() {
@Override
public int compare(SubInfo o1, SubInfo o2) {
int startOffset = o1.getTermsOffsets().get(0).getStartOffset();
int startOffset2 = o2.getTermsOffsets().get(0).getStartOffset();
return FragmentBuilderHelper.compare(startOffset, startOffset2);
}
});
return new WeightedFragInfo(Math.min(fragInfo.getSubInfos().get(0).getTermsOffsets().get(0).getStartOffset(),
fragInfo.getStartOffset()), fragInfo.getEndOffset(), subInfos, fragInfo.getTotalBoost());
} else {
return fragInfo;
}
}
private static int compare(int x, int y) {
return (x < y) ? -1 : ((x == y) ? 0 : 1);
}
private static boolean containsBrokenAnalysis(Analyzer analyzer) {
// TODO maybe we need a getter on Namedanalyzer that tells if this uses broken Analysis
if (analyzer instanceof NamedAnalyzer) {
analyzer = ((NamedAnalyzer) analyzer).analyzer();
}
if (analyzer instanceof CustomAnalyzer) {
final CustomAnalyzer a = (CustomAnalyzer) analyzer;
TokenFilterFactory[] tokenFilters = a.tokenFilters();
for (TokenFilterFactory tokenFilterFactory : tokenFilters) {
if (tokenFilterFactory.breaksFastVectorHighlighter()) {
return true;
}
}
}
return false;
}
}
| apache-2.0 |
asedunov/intellij-community | java/java-tests/testData/inspection/java8MapApi/beforeComputeIfAbsentSingleLineParens.java | 378 | // "Replace with 'computeIfAbsent' method call" "true"
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class Main {
public void testMap(Map<String, List<String>> map, String key, String value) {
List<String> list = map.get(key);
if(list == nul<caret>l) {
map.put(key, (list = new ArrayList<>()));
}
list.add(value);
}
} | apache-2.0 |
siosio/intellij-community | java/java-tests/testData/codeInsight/daemonCodeAnalyzer/quickFix/streamApiMigration/anyMatch/afterNestedAnyMatch.java | 275 | // "Replace with toArray" "true"
import java.util.*;
public class Main {
public Integer[] testNestedAnyMatch(List<List<String>> data) {
return data.stream().filter(list -> list.stream().anyMatch(str -> !str.isEmpty())).map(List::size).toArray(Integer[]::new);
}
} | apache-2.0 |
barthel/maven | maven-compat/src/main/java/org/apache/maven/project/interpolation/BuildTimestampValueSource.java | 1769 | package org.apache.maven.project.interpolation;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.text.SimpleDateFormat;
import java.util.Date;
import org.codehaus.plexus.interpolation.AbstractValueSource;
/**
*
*/
@Deprecated
public class BuildTimestampValueSource
extends AbstractValueSource
{
private final Date startTime;
private final String format;
private String formattedDate;
public BuildTimestampValueSource( Date startTime, String format )
{
super( false );
this.startTime = startTime;
this.format = format;
}
public Object getValue( String expression )
{
if ( "build.timestamp".equals( expression ) || "maven.build.timestamp".equals( expression ) )
{
if ( formattedDate == null && startTime != null )
{
formattedDate = new SimpleDateFormat( format ).format( startTime );
}
return formattedDate;
}
return null;
}
}
| apache-2.0 |
MikaNieminen/gocd | server/test/unit/com/thoughtworks/go/server/ui/StageSummaryModelTest.java | 6332 | /*************************GO-LICENSE-START*********************************
* Copyright 2014 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*************************GO-LICENSE-END***********************************/
package com.thoughtworks.go.server.ui;
import com.thoughtworks.go.domain.JobInstance;
import com.thoughtworks.go.domain.JobInstances;
import com.thoughtworks.go.domain.JobResult;
import com.thoughtworks.go.domain.Stage;
import com.thoughtworks.go.domain.StageState;
import com.thoughtworks.go.domain.Stages;
import com.thoughtworks.go.helper.JobInstanceMother;
import com.thoughtworks.go.helper.StageMother;
import static com.thoughtworks.go.helper.StageMother.completedFailedStageInstance;
import static com.thoughtworks.go.helper.StageMother.custom;
import static com.thoughtworks.go.helper.StageMother.passedStageInstance;
import com.thoughtworks.go.server.domain.JobDurationStrategy;
import static org.hamcrest.core.Is.is;
import org.joda.time.Duration;
import static org.junit.Assert.assertThat;
import org.junit.Test;
public class StageSummaryModelTest {
private static final JobDurationStrategy JOB_DURATION_STRATEGY = JobDurationStrategy.ALWAYS_ZERO;
@Test public void shouldReturnInProgressWhenTheDurationIs0() throws Exception {
Stage stage = StageMother.scheduledStage("pipeline-name", 1, "stage", 1, "job");
StageSummaryModel stageSummaryModel = new StageSummaryModel(stage, new Stages(), JOB_DURATION_STRATEGY, null);
assertThat(stageSummaryModel.getDuration(), is("In Progress"));
}
@Test public void shouldReturn0ForAFailedStage0() throws Exception {
Stage stage = completedFailedStageInstance("pipeline-name", "stage", "job");
StageSummaryModel stageSummaryModel = new StageSummaryModel(stage, new Stages(), JOB_DURATION_STRATEGY, null);
assertThat(stageSummaryModel.getDuration(), is("00:00:00"));
}
@Test public void shouldReturnTotalRuns() throws Exception {
Stage failed = completedFailedStageInstance("pipeline-name", "stage", "job");
failed.setCounter(1);
Stage passed = custom("stage");
passed.setCounter(2);
StageSummaryModel stageSummaryModel = new StageSummaryModel(failed, new Stages(failed, passed), JOB_DURATION_STRATEGY, null);
assertThat(stageSummaryModel.getTotalRuns(),is(2));
assertThat(stageSummaryModel.getStateForRun(1),is(StageState.Failed));
assertThat(stageSummaryModel.getStateForRun(2),is(StageState.Passed));
}
@Test
public void shouldReturnJobsForAGivenResult() throws Exception {
JobInstance first = JobInstanceMother.completed("first", JobResult.Failed);
JobInstance second = JobInstanceMother.completed("bsecond", JobResult.Passed);
JobInstance third = JobInstanceMother.completed("athird", JobResult.Passed);
JobInstance fourth = JobInstanceMother.building("fourth");
JobInstance fifth = JobInstanceMother.completed("fifth", JobResult.Cancelled);
Stage stage = StageMother.custom("pipeline", "stage", new JobInstances(first, second, third, fourth, fifth));
StageSummaryModel model = new StageSummaryModel(stage, new Stages(stage), JOB_DURATION_STRATEGY, null);
assertThat(model.passedJobs().size(), is(2));
assertThat(model.passedJobs().get(0).getName(), is(third.getName()));
assertThat(model.passedJobs().get(1).getName(), is(second.getName()));
assertThat(model.nonPassedJobs().size(), is(2));
assertThat(model.nonPassedJobs().get(0).getName(), is(fifth.getName()));
assertThat(model.nonPassedJobs().get(1).getName(), is(first.getName()));
assertThat(model.inProgressJobs().size(), is(1));
assertThat(model.inProgressJobs().get(0).getName(), is(fourth.getName()));
}
@Test
public void shouldRetriveShowElapsedTime() throws Exception {
JobInstance first = JobInstanceMother.completed("first", JobResult.Failed);
Stage stage = StageMother.custom("pipeline", "stage", new JobInstances(first));
StageSummaryModel model = new StageSummaryModel(stage, new Stages(stage), JOB_DURATION_STRATEGY, null);
assertThat(model.nonPassedJobs().get(0).getElapsedTime(), is(first.getElapsedTime()));
}
@Test
public void shouldRetrivePercentCompleteOnJobs() throws Exception {
JobInstance first = JobInstanceMother.completed("first", JobResult.Failed);
Stage stage = StageMother.custom("pipeline", "stage", new JobInstances(first));
StageSummaryModel model = new StageSummaryModel(stage, new Stages(stage), new JobDurationStrategy.ConstantJobDuration(1000 * 1000), null);
assertThat(model.nonPassedJobs().get(0).getElapsedTime(), is(new Duration(120 * 1000)));
assertThat(model.nonPassedJobs().get(0).getPercentComplete(), is(12));
}
@Test
public void shouldExplainWhetherJobIsComplete() throws Exception {
JobInstance first = JobInstanceMother.completed("first", JobResult.Failed);
Stage stage = StageMother.custom("pipeline", "stage", new JobInstances(first));
StageSummaryModel model = new StageSummaryModel(stage, new Stages(stage), JOB_DURATION_STRATEGY, null);
assertThat(model.nonPassedJobs().get(0).isCompleted(), is(true));
}
@Test
public void shouldGetPipelineCounter() throws Exception {
JobInstance first = JobInstanceMother.completed("first", JobResult.Failed);
Stage stage = StageMother.custom("pipeline", "stage", new JobInstances(first));
StageSummaryModel model = new StageSummaryModel(stage, new Stages(stage), JOB_DURATION_STRATEGY, stage.getIdentifier());
assertThat(model.getPipelineCounter(), is(stage.getIdentifier().getPipelineCounter()));
}
}
| apache-2.0 |
qorio/maestro | zookeeper/src/java/test/org/apache/zookeeper/test/LETest.java | 5249 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.zookeeper.test;
import java.io.File;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Random;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.zookeeper.PortAssignment;
import org.apache.zookeeper.ZKTestCase;
import org.apache.zookeeper.server.quorum.LeaderElection;
import org.apache.zookeeper.server.quorum.QuorumPeer;
import org.apache.zookeeper.server.quorum.Vote;
import org.apache.zookeeper.server.quorum.QuorumPeer.QuorumServer;
import org.junit.Assert;
import org.junit.Test;
public class LETest extends ZKTestCase {
private static final Logger LOG = LoggerFactory.getLogger(LETest.class);
volatile Vote votes[];
volatile boolean leaderDies;
volatile long leader = -1;
Random rand = new Random();
class LEThread extends Thread {
LeaderElection le;
int i;
QuorumPeer peer;
LEThread(LeaderElection le, QuorumPeer peer, int i) {
this.le = le;
this.i = i;
this.peer = peer;
}
public void run() {
try {
Vote v = null;
while(true) {
v = le.lookForLeader();
votes[i] = v;
if (v.getId() == i) {
synchronized(LETest.this) {
if (leaderDies) {
leaderDies = false;
peer.stopLeaderElection();
LOG.info("Leader " + i + " dying");
leader = -2;
} else {
leader = i;
}
LETest.this.notifyAll();
}
break;
}
synchronized(LETest.this) {
if (leader == -1) {
LETest.this.wait();
}
if (leader == v.getId()) {
break;
}
}
Thread.sleep(rand.nextInt(1000));
peer.setCurrentVote(new Vote(peer.getId(), 0));
}
LOG.info("Thread " + i + " votes " + v);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
@Test
public void testLE() throws Exception {
int count = 30;
HashMap<Long,QuorumServer> peers = new HashMap<Long,QuorumServer>(count);
ArrayList<LEThread> threads = new ArrayList<LEThread>(count);
File tmpdir[] = new File[count];
int port[] = new int[count];
votes = new Vote[count];
for(int i = 0; i < count; i++) {
peers.put(Long.valueOf(i),
new QuorumServer(i,
new InetSocketAddress("127.0.0.1",
PortAssignment.unique())));
tmpdir[i] = ClientBase.createTmpDir();
port[i] = PortAssignment.unique();
}
LeaderElection le[] = new LeaderElection[count];
leaderDies = true;
boolean allowOneBadLeader = leaderDies;
for(int i = 0; i < le.length; i++) {
QuorumPeer peer = new QuorumPeer(peers, tmpdir[i], tmpdir[i],
port[i], 0, i, 1000, 2, 2);
peer.startLeaderElection();
le[i] = new LeaderElection(peer);
LEThread thread = new LEThread(le[i], peer, i);
thread.start();
threads.add(thread);
}
for(int i = 0; i < threads.size(); i++) {
threads.get(i).join(15000);
if (threads.get(i).isAlive()) {
Assert.fail("Threads didn't join");
}
}
long id = votes[0].getId();
for(int i = 1; i < votes.length; i++) {
if (votes[i] == null) {
Assert.fail("Thread " + i + " had a null vote");
}
if (votes[i].getId() != id) {
if (allowOneBadLeader && votes[i].getId() == i) {
allowOneBadLeader = false;
} else {
Assert.fail("Thread " + i + " got " + votes[i].getId() + " expected " + id);
}
}
}
}
}
| apache-2.0 |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/third_party/cacheinvalidation/src/java/com/google/ipc/invalidation/util/TextBuilder.java | 5947 | /*
* Copyright 2011 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.ipc.invalidation.util;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
/**
* A {@link TextBuilder} is an abstraction that allows classes to efficiently append their string
* representations and then use them later for human consumption, e.g., for debugging or logging. It
* is currently a wrapper around {@link StringBuilder} and {@link Formatter} to give us format and
* append capabilities together. All append methods return this TextBuilder so that the method calls
* can be chained.
*
*/
public class TextBuilder {
private final StringBuilder builder;
private final UtilFormatter formatter;
/**
* Given an {@code object} that is an instance of {@code clazz}, outputs names and values of all
* member fields declared on {@code clazz}. This method should be used carefully:
* <ol>
* <li>This method is expensive. For frequently logged types, an ad hoc
* {@link InternalBase#toCompactString} implementation is preferred.</li>
* <li>May overflow the stack if there is a cycle in an object graph.</li>
* <li>Custom formatters have been implemented for many protos. They will not be used by this
* method.</li>
* </ol>
*/
public static void outputFieldsToBuilder(TextBuilder builder, Object object, Class<?> clazz) {
Preconditions.checkArgument(clazz.isAssignableFrom(object.getClass()));
// Get all the fields and print them using toCompactString if possible;
// otherwise, via toString
Field[] fields = clazz.getDeclaredFields();
for (Field field : fields) {
try {
// Ignore static final fields, as they're uninteresting.
int modifiers = field.getModifiers();
if (Modifier.isStatic(modifiers) && Modifier.isFinal(modifiers)) {
continue;
}
field.setAccessible(true);
builder.append(field.getName() + " = ");
Object fieldValue = field.get(object);
if (fieldValue instanceof InternalBase) {
((InternalBase) fieldValue).toCompactString(builder);
} else {
builder.append(fieldValue);
}
builder.append(", ");
} catch (IllegalArgumentException e) {
e.printStackTrace();
} catch (IllegalAccessException e) {
e.printStackTrace();
}
}
}
/**
* Returns an empty TextBuilder to which various objects' string
* representations can be added later.
*/
public TextBuilder() {
builder = new StringBuilder();
formatter = new UtilFormatter(builder);
}
/**
* Appends the string representation of {@code c} to this builder.
*
* @param c the character being appended
*/
public TextBuilder append(char c) {
builder.append(c);
return this;
}
/**
* Appends the string representation of {@code i} to this builder.
*
* @param i the integer being appended
*/
public TextBuilder append(int i) {
builder.append(i);
return this;
}
/**
* Appends the toString representation of {@code object} to this builder.
*/
public TextBuilder append(Object object) {
if (object instanceof InternalBase) {
return append((InternalBase) object);
} else {
builder.append(object);
return this;
}
}
/**
* Appends the {@code InternalBase#toCompactString} representation of {@code object} to this
* builder.
*/
public TextBuilder append(InternalBase object) {
if (object == null) {
return append("null");
}
object.toCompactString(this);
return this;
}
/**
* Appends the comma-separated {@code InternalBase#toCompactString} representations of
* {@code objects} to this builder.
*/
public TextBuilder append(Iterable<? extends InternalBase> objects) {
if (objects == null) {
return this;
}
boolean first = true;
for (InternalBase object : objects) {
if (first) {
first = false;
} else {
builder.append(", ");
}
append(object);
}
return this;
}
/** Appends the {@link Bytes#toString} representation of {@code bytes} to this builder. */
public TextBuilder append(byte[] bytes) {
if (bytes == null) {
return append("null");
}
Bytes.toCompactString(this, bytes);
return this;
}
/**
* Appends the string representation of {@code l} to this builder.
*
* @param l the long being appended
*/
public TextBuilder append(long l) {
builder.append(l);
return this;
}
/**
* Appends the string representation of {@code b} to this builder.
*
* @param b the boolean being appended
*/
public TextBuilder append(boolean b) {
builder.append(b);
return this;
}
/**
* Appends {@code s} to this builder.
*
* @param s the string being appended
*/
public TextBuilder append(String s) {
builder.append(s);
return this;
}
/**
* Writes a formatted string to this using the specified format string and
* arguments.
*
* @param format the format as used in {@link java.util.Formatter}
* @param args the arguments that are converted to their string form using
* {@code format}
*/
public TextBuilder appendFormat(String format, Object... args) {
formatter.format(format, args);
return this;
}
@Override
public String toString() {
return builder.toString();
}
}
| mit |
md-5/jdk10 | test/jdk/com/sun/jdi/InvokeVarArgs.java | 3350 | /*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* @test
* @bug 8075331
* @summary Verify that we can call varargs methods
* @run build TestScaffold VMConnection TargetAdapter TargetListener
* @run compile -g InvokeVarArgs.java
* @run driver InvokeVarArgs
*/
import com.sun.jdi.*;
import com.sun.jdi.event.*;
import java.util.Arrays;
interface MyInterface {
}
class SomeClass implements MyInterface {
}
class InvokeVarArgsTarg {
public static void main(String args[]) {
new InvokeVarArgsTarg().run();
}
SomeClass someClass1 = new SomeClass();
SomeClass someClass2 = new SomeClass();
MyInterface[] array = new MyInterface[]{someClass1, someClass2};
SomeClass[] array2 = new SomeClass[]{someClass1, someClass2};
public void run() {
System.out.println("size(array) : " + size(array));
System.out.println("size(array2) : " + size(array2));
}
int size(Object... value) {
return value.length;
}
}
public class InvokeVarArgs extends TestScaffold {
public static void main(String args[]) throws Exception {
new InvokeVarArgs(args).startTests();
}
InvokeVarArgs(String args[]) throws Exception {
super(args);
}
protected void runTests() throws Exception {
BreakpointEvent bpe = startTo("InvokeVarArgsTarg", "run", "()V");
StackFrame frame = bpe.thread().frame(0);
ObjectReference targetObj = frame.thisObject();
ReferenceType targetType = (ReferenceType) targetObj.type();
Value arrayVal = targetObj.getValue(targetType.fieldByName("array"));
Value array2Val = targetObj.getValue(targetType.fieldByName("array2"));
Method sizeMethod = targetType.methodsByName("size", "([Ljava/lang/Object;)I").get(0);
IntegerValue size = (IntegerValue) targetObj.invokeMethod(bpe.thread(), sizeMethod, Arrays.asList(new Value[]{arrayVal}), 0);
if (size.value() != 2) {
throw new Exception("size(array) should be 2, but was " + size.value());
}
size = (IntegerValue) targetObj.invokeMethod(bpe.thread(), sizeMethod, Arrays.asList(new Value[]{array2Val}), 0);
if (size.value() != 2) {
throw new Exception("size(array2) should be 2, but was " + size.value());
}
listenUntilVMDisconnect();
}
}
| gpl-2.0 |
jwren/intellij-community | java/java-impl/src/com/intellij/codeInsight/hint/LambdaDeclarationRangeHandler.java | 1140 | /*
* Copyright 2000-2016 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.codeInsight.hint;
import com.intellij.openapi.util.TextRange;
import com.intellij.psi.PsiElement;
import com.intellij.psi.PsiLambdaExpression;
import org.jetbrains.annotations.NotNull;
public class LambdaDeclarationRangeHandler implements DeclarationRangeHandler {
@Override
@NotNull
public TextRange getDeclarationRange(@NotNull final PsiElement container) {
final PsiLambdaExpression lambdaExpression = (PsiLambdaExpression)container;
return lambdaExpression.getParameterList().getTextRange();
}
}
| apache-2.0 |
Munazza/AndEngine | src/org/andengine/entity/modifier/IEntityModifier.java | 1631 | package org.andengine.entity.modifier;
import org.andengine.entity.IEntity;
import org.andengine.util.IMatcher;
import org.andengine.util.modifier.IModifier;
/**
* (c) 2010 Nicolas Gramlich
* (c) 2011 Zynga Inc.
*
* @author Nicolas Gramlich
* @since 11:17:50 - 19.03.2010
*/
public interface IEntityModifier extends IModifier<IEntity> {
// ===========================================================
// Constants
// ===========================================================
// ===========================================================
// Methods
// ===========================================================
@Override
public IEntityModifier deepCopy() throws DeepCopyNotSupportedException;
// ===========================================================
// Inner and Anonymous Classes
// ===========================================================
public static interface IEntityModifierListener extends IModifierListener<IEntity>{
// ===========================================================
// Constants
// ===========================================================
// ===========================================================
// Methods
// ===========================================================
}
public interface IEntityModifierMatcher extends IMatcher<IModifier<IEntity>> {
// ===========================================================
// Constants
// ===========================================================
// ===========================================================
// Methods
// ===========================================================
}
}
| apache-2.0 |
chinmaymhatre91/floodlight | src/test/java/net/floodlightcontroller/packet/SPUDTest.java | 4117 | package net.floodlightcontroller.packet;
import static org.junit.Assert.assertTrue;
import java.util.Arrays;
import org.junit.Test;
/**
* @author Jacob Chappell (jacob.chappell@uky.edu)
*/
public class SPUDTest {
@Test
public void testSerializeCommandOpen() {
byte[] expected = new byte[] {
(byte) 0xd8, 0x00, 0x00, (byte) 0xd8,
(byte) 0xb6, 0x40, 0x17, (byte) 0x88,
0x0a, 0x51, 0x01, 0x07, 0x40
};
SPUD packet = (new SPUD())
.setTubeID(0xb64017880a510107L)
.setCommand(SPUD.COMMAND_OPEN)
.setADEC(false)
.setPDEC(false)
.setReserved((byte) 0);
byte[] actual = packet.serialize();
assertTrue(Arrays.equals(expected, actual));
}
@Test
public void testSerializeCommandDataEmpty() {
byte[] expected = new byte[] {
(byte) 0xd8, 0x00, 0x00, (byte) 0xd8,
(byte) 0xb6, 0x40, 0x17, (byte) 0x88,
0x0a, 0x51, 0x01, 0x07, 0x00
};
SPUD packet = (new SPUD())
.setTubeID(0xb64017880a510107L)
.setCommand(SPUD.COMMAND_DATA)
.setADEC(false)
.setPDEC(false)
.setReserved((byte) 0);
byte[] actual = packet.serialize();
assertTrue(Arrays.equals(expected, actual));
}
@Test
public void testSerializeCommandDataEmptyWithADEC() {
byte[] expected = new byte[] {
(byte) 0xd8, 0x00, 0x00, (byte) 0xd8,
(byte) 0xb6, 0x40, 0x17, (byte) 0x88,
0x0a, 0x51, 0x01, 0x07, 0x20
};
SPUD packet = (new SPUD())
.setTubeID(0xb64017880a510107L)
.setCommand(SPUD.COMMAND_DATA)
.setADEC(true)
.setPDEC(false)
.setReserved((byte) 0);
byte[] actual = packet.serialize();
assertTrue(Arrays.equals(expected, actual));
}
@Test
public void testSerializeCommandDataEmptyWithPDEC() {
byte[] expected = new byte[] {
(byte) 0xd8, 0x00, 0x00, (byte) 0xd8,
(byte) 0xb6, 0x40, 0x17, (byte) 0x88,
0x0a, 0x51, 0x01, 0x07, 0x10
};
SPUD packet = (new SPUD())
.setTubeID(0xb64017880a510107L)
.setCommand(SPUD.COMMAND_DATA)
.setADEC(false)
.setPDEC(true)
.setReserved((byte) 0);
byte[] actual = packet.serialize();
assertTrue(Arrays.equals(expected, actual));
}
@Test
public void testSerializeCommandDataEmptyWithBoth() {
byte[] expected = new byte[] {
(byte) 0xd8, 0x00, 0x00, (byte) 0xd8,
(byte) 0xb6, 0x40, 0x17, (byte) 0x88,
0x0a, 0x51, 0x01, 0x07, 0x30
};
SPUD packet = (new SPUD())
.setTubeID(0xb64017880a510107L)
.setCommand(SPUD.COMMAND_DATA)
.setADEC(true)
.setPDEC(true)
.setReserved((byte) 0);
byte[] actual = packet.serialize();
assertTrue(Arrays.equals(expected, actual));
}
@Test
public void testDeserialize() throws PacketParsingException {
byte[] spudPacket = {
(byte) 0xd8, 0x00, 0x00, (byte) 0xd8, (byte) 0xb6,
0x40, 0x17, (byte) 0x88, 0x0a, 0x51, 0x01, 0x07,
0x00, (byte) 0xa1, 0x00, (byte) 0xa2, 0x68, 0x75,
0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x65,
0x4a, 0x61, 0x63, 0x6f, 0x62, 0x67, 0x6d, 0x65,
0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x68, 0x61,
0x73, 0x20, 0x6a, 0x6f, 0x69, 0x6e, 0x65, 0x64,
0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x6f, 0x6f,
0x6d
};
SPUD packet = new SPUD();
packet.deserialize(spudPacket, 0, spudPacket.length);
byte[] packetSerialized = packet.serialize();
assertTrue(Arrays.equals(spudPacket, packetSerialized));
}
}
| apache-2.0 |
rokn/Count_Words_2015 | testing/openjdk2/jdk/src/share/classes/sun/security/util/AuthResources_zh_CN.java | 7603 | /*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package sun.security.util;
/**
* <p> This class represents the <code>ResourceBundle</code>
* for the following packages:
*
* <ol>
* <li> com.sun.security.auth
* <li> com.sun.security.auth.login
* </ol>
*
*/
public class AuthResources_zh_CN extends java.util.ListResourceBundle {
private static final Object[][] contents = {
// NT principals
{"invalid.null.input.value", "\u65E0\u6548\u7684\u7A7A\u8F93\u5165: {0}"},
{"NTDomainPrincipal.name", "NTDomainPrincipal: {0}"},
{"NTNumericCredential.name", "NTNumericCredential: {0}"},
{"Invalid.NTSid.value", "\u65E0\u6548\u7684 NTSid \u503C"},
{"NTSid.name", "NTSid: {0}"},
{"NTSidDomainPrincipal.name", "NTSidDomainPrincipal: {0}"},
{"NTSidGroupPrincipal.name", "NTSidGroupPrincipal: {0}"},
{"NTSidPrimaryGroupPrincipal.name", "NTSidPrimaryGroupPrincipal: {0}"},
{"NTSidUserPrincipal.name", "NTSidUserPrincipal: {0}"},
{"NTUserPrincipal.name", "NTUserPrincipal: {0}"},
// UnixPrincipals
{"UnixNumericGroupPrincipal.Primary.Group.name",
"UnixNumericGroupPrincipal [\u4E3B\u7EC4]: {0}"},
{"UnixNumericGroupPrincipal.Supplementary.Group.name",
"UnixNumericGroupPrincipal [\u8865\u5145\u7EC4]: {0}"},
{"UnixNumericUserPrincipal.name", "UnixNumericUserPrincipal: {0}"},
{"UnixPrincipal.name", "UnixPrincipal: {0}"},
// com.sun.security.auth.login.ConfigFile
{"Unable.to.properly.expand.config", "\u65E0\u6CD5\u6B63\u786E\u6269\u5C55{0}"},
{"extra.config.No.such.file.or.directory.",
"{0} (\u6CA1\u6709\u8FD9\u6837\u7684\u6587\u4EF6\u6216\u76EE\u5F55)"},
{"Configuration.Error.No.such.file.or.directory",
"\u914D\u7F6E\u9519\u8BEF:\n\t\u6CA1\u6709\u6B64\u6587\u4EF6\u6216\u76EE\u5F55"},
{"Configuration.Error.Invalid.control.flag.flag",
"\u914D\u7F6E\u9519\u8BEF: \n\t\u65E0\u6548\u7684\u63A7\u5236\u6807\u8BB0, {0}"},
{"Configuration.Error.Can.not.specify.multiple.entries.for.appName",
"\u914D\u7F6E\u9519\u8BEF:\n\t\u65E0\u6CD5\u6307\u5B9A{0}\u7684\u591A\u4E2A\u6761\u76EE"},
{"Configuration.Error.expected.expect.read.end.of.file.",
"\u914D\u7F6E\u9519\u8BEF: \n\t\u5E94\u4E3A [{0}], \u8BFB\u53D6\u7684\u662F [\u6587\u4EF6\u7ED3\u5C3E]"},
{"Configuration.Error.Line.line.expected.expect.found.value.",
"\u914D\u7F6E\u9519\u8BEF: \n\t\u884C {0}: \u5E94\u4E3A [{1}], \u627E\u5230 [{2}]"},
{"Configuration.Error.Line.line.expected.expect.",
"\u914D\u7F6E\u9519\u8BEF: \n\t\u884C {0}: \u5E94\u4E3A [{1}]"},
{"Configuration.Error.Line.line.system.property.value.expanded.to.empty.value",
"\u914D\u7F6E\u9519\u8BEF: \n\t\u884C {0}: \u7CFB\u7EDF\u5C5E\u6027 [{1}] \u6269\u5C55\u5230\u7A7A\u503C"},
// com.sun.security.auth.module.JndiLoginModule
{"username.","\u7528\u6237\u540D: "},
{"password.","\u53E3\u4EE4: "},
// com.sun.security.auth.module.KeyStoreLoginModule
{"Please.enter.keystore.information",
"\u8BF7\u8F93\u5165\u5BC6\u94A5\u5E93\u4FE1\u606F"},
{"Keystore.alias.","\u5BC6\u94A5\u5E93\u522B\u540D: "},
{"Keystore.password.","\u5BC6\u94A5\u5E93\u53E3\u4EE4: "},
{"Private.key.password.optional.",
"\u79C1\u6709\u5BC6\u94A5\u53E3\u4EE4 (\u53EF\u9009): "},
// com.sun.security.auth.module.Krb5LoginModule
{"Kerberos.username.defUsername.",
"Kerberos \u7528\u6237\u540D [{0}]: "},
{"Kerberos.password.for.username.",
"{0}\u7684 Kerberos \u53E3\u4EE4: "},
/*** EVERYTHING BELOW IS DEPRECATED ***/
// com.sun.security.auth.PolicyFile
{".error.parsing.", ": \u89E3\u6790\u65F6\u51FA\u9519 "},
{"COLON", ": "},
{".error.adding.Permission.", ": \u6DFB\u52A0\u6743\u9650\u65F6\u51FA\u9519 "},
{"SPACE", " "},
{".error.adding.Entry.", ": \u6DFB\u52A0\u6761\u76EE\u65F6\u51FA\u9519 "},
{"LPARAM", "("},
{"RPARAM", ")"},
{"attempt.to.add.a.Permission.to.a.readonly.PermissionCollection",
"\u5C1D\u8BD5\u5C06\u6743\u9650\u6DFB\u52A0\u81F3\u53EA\u8BFB\u7684 PermissionCollection"},
// com.sun.security.auth.PolicyParser
{"expected.keystore.type", "\u5E94\u4E3A\u5BC6\u94A5\u5E93\u7C7B\u578B"},
{"can.not.specify.Principal.with.a.wildcard.class.without.a.wildcard.name",
"\u6CA1\u6709\u901A\u914D\u7B26\u540D\u79F0, \u65E0\u6CD5\u4F7F\u7528\u901A\u914D\u7B26\u7C7B\u6307\u5B9A\u4E3B\u7528\u6237"},
{"expected.codeBase.or.SignedBy", "\u5E94\u4E3A codeBase \u6216 SignedBy"},
{"only.Principal.based.grant.entries.permitted",
"\u53EA\u5141\u8BB8\u57FA\u4E8E\u4E3B\u7528\u6237\u7684\u6388\u6743\u6761\u76EE"},
{"expected.permission.entry", "\u5E94\u4E3A\u6743\u9650\u6761\u76EE"},
{"number.", "\u7F16\u53F7 "},
{"expected.expect.read.end.of.file.",
"\u5E94\u4E3A{0}, \u8BFB\u53D6\u7684\u662F\u6587\u4EF6\u7ED3\u5C3E"},
{"expected.read.end.of.file", "\u5E94\u4E3A ';', \u8BFB\u53D6\u7684\u662F\u6587\u4EF6\u7ED3\u5C3E"},
{"line.", "\u884C "},
{".expected.", ": \u5E94\u4E3A '"},
{".found.", "', \u627E\u5230 '"},
{"QUOTE", "'"},
// SolarisPrincipals
{"SolarisNumericGroupPrincipal.Primary.Group.",
"SolarisNumericGroupPrincipal [\u4E3B\u7EC4]: "},
{"SolarisNumericGroupPrincipal.Supplementary.Group.",
"SolarisNumericGroupPrincipal [\u8865\u5145\u7EC4]: "},
{"SolarisNumericUserPrincipal.",
"SolarisNumericUserPrincipal: "},
{"SolarisPrincipal.", "SolarisPrincipal: "},
// provided.null.name is the NullPointerException message when a
// developer incorrectly passes a null name to the constructor of
// subclasses of java.security.Principal
{"provided.null.name", "\u63D0\u4F9B\u7684\u540D\u79F0\u4E3A\u7A7A\u503C"}
};
/**
* Returns the contents of this <code>ResourceBundle</code>.
*
* <p>
*
* @return the contents of this <code>ResourceBundle</code>.
*/
public Object[][] getContents() {
return contents;
}
}
| mit |
josh-mckenzie/cassandra | src/java/org/apache/cassandra/index/sasi/utils/MappedBuffer.java | 6743 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.index.sasi.utils;
import java.io.Closeable;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel.MapMode;
import org.apache.cassandra.db.marshal.AbstractType;
import org.apache.cassandra.io.util.ChannelProxy;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.io.util.RandomAccessReader;
import com.google.common.annotations.VisibleForTesting;
public class MappedBuffer implements Closeable
{
private final MappedByteBuffer[] pages;
private long position, limit;
private final long capacity;
private final int pageSize, sizeBits;
private MappedBuffer(MappedBuffer other)
{
this.sizeBits = other.sizeBits;
this.pageSize = other.pageSize;
this.position = other.position;
this.limit = other.limit;
this.capacity = other.capacity;
this.pages = other.pages;
}
public MappedBuffer(RandomAccessReader file)
{
this(file.getChannel(), 30);
}
public MappedBuffer(ChannelProxy file)
{
this(file, 30);
}
@VisibleForTesting
protected MappedBuffer(ChannelProxy file, int numPageBits)
{
if (numPageBits > Integer.SIZE - 1)
throw new IllegalArgumentException("page size can't be bigger than 1G");
sizeBits = numPageBits;
pageSize = 1 << sizeBits;
position = 0;
limit = capacity = file.size();
pages = new MappedByteBuffer[(int) (file.size() / pageSize) + 1];
try
{
long offset = 0;
for (int i = 0; i < pages.length; i++)
{
long pageSize = Math.min(this.pageSize, (capacity - offset));
pages[i] = file.map(MapMode.READ_ONLY, offset, pageSize);
offset += pageSize;
}
}
finally
{
file.close();
}
}
public int comparePageTo(long offset, int length, AbstractType<?> comparator, ByteBuffer other)
{
return comparator.compare(getPageRegion(offset, length), other);
}
public long capacity()
{
return capacity;
}
public long position()
{
return position;
}
public MappedBuffer position(long newPosition)
{
if (newPosition < 0 || newPosition > limit)
throw new IllegalArgumentException("position: " + newPosition + ", limit: " + limit);
position = newPosition;
return this;
}
public long limit()
{
return limit;
}
public MappedBuffer limit(long newLimit)
{
if (newLimit < position || newLimit > capacity)
throw new IllegalArgumentException();
limit = newLimit;
return this;
}
public long remaining()
{
return limit - position;
}
public boolean hasRemaining()
{
return remaining() > 0;
}
public byte get()
{
return get(position++);
}
public byte get(long pos)
{
return pages[getPage(pos)].get(getPageOffset(pos));
}
public short getShort()
{
short value = getShort(position);
position += 2;
return value;
}
public short getShort(long pos)
{
if (isPageAligned(pos, 2))
return pages[getPage(pos)].getShort(getPageOffset(pos));
int ch1 = get(pos) & 0xff;
int ch2 = get(pos + 1) & 0xff;
return (short) ((ch1 << 8) + ch2);
}
public int getInt()
{
int value = getInt(position);
position += 4;
return value;
}
public int getInt(long pos)
{
if (isPageAligned(pos, 4))
return pages[getPage(pos)].getInt(getPageOffset(pos));
int ch1 = get(pos) & 0xff;
int ch2 = get(pos + 1) & 0xff;
int ch3 = get(pos + 2) & 0xff;
int ch4 = get(pos + 3) & 0xff;
return ((ch1 << 24) + (ch2 << 16) + (ch3 << 8) + ch4);
}
public long getLong()
{
long value = getLong(position);
position += 8;
return value;
}
public long getLong(long pos)
{
// fast path if the long could be retrieved from a single page
// that would avoid multiple expensive look-ups into page array.
return (isPageAligned(pos, 8))
? pages[getPage(pos)].getLong(getPageOffset(pos))
: ((long) (getInt(pos)) << 32) + (getInt(pos + 4) & 0xFFFFFFFFL);
}
public ByteBuffer getPageRegion(long position, int length)
{
if (!isPageAligned(position, length))
throw new IllegalArgumentException(String.format("range: %s-%s wraps more than one page", position, length));
ByteBuffer slice = pages[getPage(position)].duplicate();
int pageOffset = getPageOffset(position);
slice.position(pageOffset).limit(pageOffset + length);
return slice;
}
public MappedBuffer duplicate()
{
return new MappedBuffer(this);
}
public void close()
{
/*
* Try forcing the unmapping of pages using undocumented unsafe sun APIs.
* If this fails (non Sun JVM), we'll have to wait for the GC to finalize the mapping.
* If this works and a thread tries to access any page, hell will unleash on earth.
*/
try
{
for (MappedByteBuffer segment : pages)
FileUtils.clean(segment);
}
catch (Exception e)
{
// This is not supposed to happen
}
}
private int getPage(long position)
{
return (int) (position >> sizeBits);
}
private int getPageOffset(long position)
{
return (int) (position & pageSize - 1);
}
private boolean isPageAligned(long position, int length)
{
return pageSize - (getPageOffset(position) + length) > 0;
}
}
| apache-2.0 |