repo_name stringlengths 5 108 | path stringlengths 6 333 | size stringlengths 1 6 | content stringlengths 4 977k | license stringclasses 15
values |
|---|---|---|---|---|
smmribeiro/intellij-community | java/java-tests/testData/codeInsight/daemonCodeAnalyzer/quickFix/redundantInstanceOf/beforeInstanceOfPattern.java | 190 | // "Replace with a null check" "true"
class Test {
void test(String s) {
Object object = s;
if(object instanceof <caret>String s1) {
System.out.println("always");
}
}
} | apache-2.0 |
apache/flink | flink-table/flink-table-api-java-bridge/src/main/java/org/apache/flink/connector/datagen/table/DataGenTableSourceFactory.java | 5745 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.datagen.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.ReadableConfig;
import org.apache.flink.streaming.api.functions.source.datagen.DataGenerator;
import org.apache.flink.table.api.ValidationException;
import org.apache.flink.table.connector.source.DynamicTableSource;
import org.apache.flink.table.factories.DynamicTableSourceFactory;
import org.apache.flink.table.factories.FactoryUtil;
import org.apache.flink.table.types.DataType;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import static org.apache.flink.configuration.ConfigOptions.key;
import static org.apache.flink.table.factories.FactoryUtil.CONNECTOR;
/**
* Factory for creating configured instances of {@link DataGenTableSource} in a stream environment.
*/
@Internal
public class DataGenTableSourceFactory implements DynamicTableSourceFactory {
public static final String IDENTIFIER = "datagen";
@Override
public String factoryIdentifier() {
return IDENTIFIER;
}
@Override
public Set<ConfigOption<?>> requiredOptions() {
return new HashSet<>();
}
@Override
public Set<ConfigOption<?>> optionalOptions() {
Set<ConfigOption<?>> options = new HashSet<>();
options.add(DataGenConnectorOptions.ROWS_PER_SECOND);
options.add(DataGenConnectorOptions.NUMBER_OF_ROWS);
// Placeholder options
options.add(DataGenConnectorOptions.FIELD_KIND);
options.add(DataGenConnectorOptions.FIELD_MIN);
options.add(DataGenConnectorOptions.FIELD_MAX);
options.add(DataGenConnectorOptions.FIELD_MAX_PAST);
options.add(DataGenConnectorOptions.FIELD_LENGTH);
options.add(DataGenConnectorOptions.FIELD_START);
options.add(DataGenConnectorOptions.FIELD_END);
return options;
}
@Override
public DynamicTableSource createDynamicTableSource(Context context) {
Configuration options = new Configuration();
context.getCatalogTable().getOptions().forEach(options::setString);
DataType rowDataType = context.getPhysicalRowDataType();
DataGenerator<?>[] fieldGenerators = new DataGenerator[DataType.getFieldCount(rowDataType)];
Set<ConfigOption<?>> optionalOptions = new HashSet<>();
List<String> fieldNames = DataType.getFieldNames(rowDataType);
List<DataType> fieldDataTypes = DataType.getFieldDataTypes(rowDataType);
for (int i = 0; i < fieldGenerators.length; i++) {
String name = fieldNames.get(i);
DataType type = fieldDataTypes.get(i);
ConfigOption<String> kind =
key(DataGenConnectorOptionsUtil.FIELDS
+ "."
+ name
+ "."
+ DataGenConnectorOptionsUtil.KIND)
.stringType()
.defaultValue(DataGenConnectorOptionsUtil.RANDOM);
DataGeneratorContainer container =
createContainer(name, type, options.get(kind), options);
fieldGenerators[i] = container.getGenerator();
optionalOptions.add(kind);
optionalOptions.addAll(container.getOptions());
}
FactoryUtil.validateFactoryOptions(requiredOptions(), optionalOptions, options);
Set<String> consumedOptionKeys = new HashSet<>();
consumedOptionKeys.add(CONNECTOR.key());
consumedOptionKeys.add(DataGenConnectorOptions.ROWS_PER_SECOND.key());
consumedOptionKeys.add(DataGenConnectorOptions.NUMBER_OF_ROWS.key());
optionalOptions.stream().map(ConfigOption::key).forEach(consumedOptionKeys::add);
FactoryUtil.validateUnconsumedKeys(
factoryIdentifier(), options.keySet(), consumedOptionKeys);
String name = context.getObjectIdentifier().toString();
return new DataGenTableSource(
fieldGenerators,
name,
rowDataType,
options.get(DataGenConnectorOptions.ROWS_PER_SECOND),
options.get(DataGenConnectorOptions.NUMBER_OF_ROWS));
}
private DataGeneratorContainer createContainer(
String name, DataType type, String kind, ReadableConfig options) {
switch (kind) {
case DataGenConnectorOptionsUtil.RANDOM:
return type.getLogicalType().accept(new RandomGeneratorVisitor(name, options));
case DataGenConnectorOptionsUtil.SEQUENCE:
return type.getLogicalType().accept(new SequenceGeneratorVisitor(name, options));
default:
throw new ValidationException("Unsupported generator kind: " + kind);
}
}
}
| apache-2.0 |
sysalexis/mr4c | java/test/java/com/google/mr4c/sources/CompositeKeyFileMapperTest.java | 3718 | /**
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.mr4c.sources;
import com.google.mr4c.keys.DataKey;
import com.google.mr4c.keys.DataKeyDimension;
import com.google.mr4c.keys.DataKeyElement;
import com.google.mr4c.keys.DataKeyFactory;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import org.junit.*;
import static org.junit.Assert.*;
public class CompositeKeyFileMapperTest {
private String m_pattern1 = "image_${dim1}_${dim2}.jpg";
private String m_pattern2 = "video_${dim1}_${dim2}.mpg";
private String m_pattern3 = "whatever_${dim1}.bin";
private String m_file1 = "image_val1_val2.jpg";
private String m_file2 = "video_val1_val2.mpg";
private String m_file3 = "whatever_val1.bin";
private String m_file4 = "something_else";
private DataKeyDimension m_dim1 = new DataKeyDimension("dim1");
private DataKeyDimension m_dim2 = new DataKeyDimension("dim2");
private DataKeyElement m_ele1;
private DataKeyElement m_ele2;
private DataKey m_key1;
private DataKey m_key3;
private DataKey m_key4;
private CompositeKeyFileMapper m_mapper;
@Before public void setUp() {
buildMapper();
buildElements();
buildKeys();
}
private void buildMapper() {
m_mapper = new CompositeKeyFileMapper();
m_mapper.addMapper(buildMapper(m_pattern1, m_dim1, m_dim2));
m_mapper.addMapper(buildMapper(m_pattern2, m_dim1, m_dim2));
m_mapper.addMapper(buildMapper(m_pattern3, m_dim1));
}
private DataKeyFileMapper buildMapper(String pattern, DataKeyDimension ... dims) {
return new PatternKeyFileMapper( pattern, new HashSet<DataKeyDimension>(Arrays.asList(dims)) );
}
private void buildElements() {
m_ele1 = new DataKeyElement("val1", m_dim1);
m_ele2 = new DataKeyElement("val2", m_dim2);
}
private void buildKeys() {
m_key1 = DataKeyFactory.newKey(m_ele1, m_ele2);
m_key3 = DataKeyFactory.newKey(m_ele1);
m_key4 = DataKeyFactory.newKey();
}
@Test public void testParse() {
testParse(m_file1, m_key1);
testParse(m_file2, m_key1);
testParse(m_file3, m_key3);
}
private void testParse(String name, DataKey expected) {
DataKey key = m_mapper.getKey(name);
assertEquals(expected, key);
}
@Test(expected=IllegalArgumentException.class)
public void testParseFail() {
m_mapper.getKey(m_file4);
}
@Test public void testFormat() {
testFormat(m_key1, m_file1);
testFormat(m_key3, m_file3);
}
private void testFormat(DataKey key, String expected) {
String name = m_mapper.getFileName(key);
assertEquals(expected, name);
}
@Test(expected=IllegalArgumentException.class)
public void testFormatFail() {
m_mapper.getFileName(m_key4);
}
@Test public void testMatchNameTrue() {
assertTrue(m_mapper.canMapName(m_file1));
assertTrue(m_mapper.canMapName(m_file2));
assertTrue(m_mapper.canMapName(m_file3));
}
@Test public void testMatchNameFalse() {
assertFalse(m_mapper.canMapName(m_file4));
}
@Test public void testMatchKeyTrue() {
assertTrue(m_mapper.canMapKey(m_key1));
assertTrue(m_mapper.canMapKey(m_key3));
}
@Test public void testMatchKeyFalse() {
assertFalse(m_mapper.canMapKey(m_key4));
}
}
| apache-2.0 |
sheliu/openregistry | openregistry-webapp/src/main/java/org/openregistry/core/web/resources/config/DefaultLockExtractor.java | 1155 | /**
* Licensed to Jasig under one or more contributor license
* agreements. See the NOTICE file distributed with this work
* for additional information regarding copyright ownership.
* Jasig licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a
* copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.openregistry.core.web.resources.config;
import java.security.Principal;
/**
* Extracts the Principal name from the Principal.
*
* @version $Revision$ $Date$
* @since 0.1
*/
public final class DefaultLockExtractor implements LockExtractor {
public String extract(final Principal principal, final String lockValue) {
return principal.getName();
}
}
| apache-2.0 |
bclozel/spring-boot | spring-boot-project/spring-boot/src/main/java/org/springframework/boot/logging/logback/SpringBootJoranConfigurator.java | 1884 | /*
* Copyright 2012-2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.logging.logback;
import ch.qos.logback.classic.joran.JoranConfigurator;
import ch.qos.logback.core.joran.action.NOPAction;
import ch.qos.logback.core.joran.spi.ElementSelector;
import ch.qos.logback.core.joran.spi.RuleStore;
import org.springframework.boot.logging.LoggingInitializationContext;
import org.springframework.core.env.Environment;
/**
* Extended version of the Logback {@link JoranConfigurator} that adds additional Spring
* Boot rules.
*
* @author Phillip Webb
*/
class SpringBootJoranConfigurator extends JoranConfigurator {
private LoggingInitializationContext initializationContext;
SpringBootJoranConfigurator(LoggingInitializationContext initializationContext) {
this.initializationContext = initializationContext;
}
@Override
public void addInstanceRules(RuleStore rs) {
super.addInstanceRules(rs);
Environment environment = this.initializationContext.getEnvironment();
rs.addRule(new ElementSelector("configuration/springProperty"),
new SpringPropertyAction(environment));
rs.addRule(new ElementSelector("*/springProfile"),
new SpringProfileAction(this.initializationContext.getEnvironment()));
rs.addRule(new ElementSelector("*/springProfile/*"), new NOPAction());
}
}
| apache-2.0 |
iloveyou416068/CookNIOServer | netty_source_4_0_25/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/ContinuationWebSocketFrame.java | 3515 | /*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.http.websocketx;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.util.CharsetUtil;
/**
* Web Socket continuation frame containing continuation text or binary data. This is used for
* fragmented messages where the contents of a messages is contained more than 1 frame.
*/
public class ContinuationWebSocketFrame extends WebSocketFrame {
/**
* Creates a new empty continuation frame.
*/
public ContinuationWebSocketFrame() {
this(Unpooled.buffer(0));
}
/**
* Creates a new continuation frame with the specified binary data. The final fragment flag is
* set to true.
*
* @param binaryData the content of the frame.
*/
public ContinuationWebSocketFrame(ByteBuf binaryData) {
super(binaryData);
}
/**
* Creates a new continuation frame with the specified binary data
*
* @param finalFragment
* flag indicating if this frame is the final fragment
* @param rsv
* reserved bits used for protocol extensions
* @param binaryData
* the content of the frame.
*/
public ContinuationWebSocketFrame(boolean finalFragment, int rsv, ByteBuf binaryData) {
super(finalFragment, rsv, binaryData);
}
/**
* Creates a new continuation frame with the specified text data
*
* @param finalFragment
* flag indicating if this frame is the final fragment
* @param rsv
* reserved bits used for protocol extensions
* @param text
* text content of the frame.
*/
public ContinuationWebSocketFrame(boolean finalFragment, int rsv, String text) {
this(finalFragment, rsv, fromText(text));
}
/**
* Returns the text data in this frame
*/
public String text() {
return content().toString(CharsetUtil.UTF_8);
}
/**
* Sets the string for this frame
*
* @param text
* text to store
*/
private static ByteBuf fromText(String text) {
if (text == null || text.isEmpty()) {
return Unpooled.EMPTY_BUFFER;
} else {
return Unpooled.copiedBuffer(text, CharsetUtil.UTF_8);
}
}
@Override
public ContinuationWebSocketFrame copy() {
return new ContinuationWebSocketFrame(isFinalFragment(), rsv(), content().copy());
}
@Override
public ContinuationWebSocketFrame duplicate() {
return new ContinuationWebSocketFrame(isFinalFragment(), rsv(), content().duplicate());
}
@Override
public ContinuationWebSocketFrame retain() {
super.retain();
return this;
}
@Override
public ContinuationWebSocketFrame retain(int increment) {
super.retain(increment);
return this;
}
}
| apache-2.0 |
samaitra/ignite | modules/core/src/test/java/org/apache/ignite/platform/PlatformStopIgniteTask.java | 2795 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.platform;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.apache.ignite.IgniteException;
import org.apache.ignite.Ignition;
import org.apache.ignite.cluster.ClusterNode;
import org.apache.ignite.compute.ComputeJob;
import org.apache.ignite.compute.ComputeJobAdapter;
import org.apache.ignite.compute.ComputeJobResult;
import org.apache.ignite.compute.ComputeTaskAdapter;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
/**
* Task to stop an Ignite node.
*/
public class PlatformStopIgniteTask extends ComputeTaskAdapter<String, Boolean> {
/** {@inheritDoc} */
@NotNull @Override public Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid,
@Nullable String arg) throws IgniteException {
ClusterNode node = subgrid.get(0);
for (ClusterNode n : subgrid) {
if (n.isLocal()) {
node = n;
break;
}
}
return Collections.singletonMap(new PlatformStopIgniteJob(arg), node);
}
/** {@inheritDoc} */
@Nullable @Override public Boolean reduce(List<ComputeJobResult> results) throws IgniteException {
ComputeJobResult res = results.get(0);
if (res.getException() != null)
throw res.getException();
else
return results.get(0).getData();
}
/**
* Job.
*/
private static class PlatformStopIgniteJob extends ComputeJobAdapter {
/** */
private final String igniteInstanceName;
/**
* Ctor.
*
* @param igniteInstanceName Name.
*/
private PlatformStopIgniteJob(String igniteInstanceName) {
this.igniteInstanceName = igniteInstanceName;
}
/** {@inheritDoc} */
@Override public Object execute() throws IgniteException {
return Ignition.stop(igniteInstanceName, true);
}
}
}
| apache-2.0 |
hequn8128/flink | flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionVertexTest.java | 3518 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.executiongraph;
import org.apache.flink.runtime.io.network.partition.ResultPartitionID;
import org.apache.flink.runtime.io.network.partition.ResultPartitionType;
import org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker;
import org.apache.flink.runtime.jobgraph.DistributionPattern;
import org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID;
import org.apache.flink.runtime.jobgraph.JobGraph;
import org.apache.flink.runtime.jobgraph.JobVertex;
import org.apache.flink.util.TestLogger;
import org.junit.Test;
import java.util.Collection;
import java.util.concurrent.CompletableFuture;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.contains;
import static org.junit.Assert.assertFalse;
/**
* Tests for the {@link ExecutionVertex}.
*/
public class ExecutionVertexTest extends TestLogger {
@Test
public void testResetForNewExecutionReleasesPartitions() throws Exception {
final JobVertex producerJobVertex = ExecutionGraphTestUtils.createNoOpVertex(1);
final JobVertex consumerJobVertex = ExecutionGraphTestUtils.createNoOpVertex(1);
consumerJobVertex.connectNewDataSetAsInput(producerJobVertex, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
final CompletableFuture<Collection<ResultPartitionID>> releasePartitionsFuture = new CompletableFuture<>();
final TestingJobMasterPartitionTracker partitionTracker = new TestingJobMasterPartitionTracker();
partitionTracker.setStopTrackingAndReleasePartitionsConsumer(releasePartitionsFuture::complete);
final ExecutionGraph executionGraph = TestingExecutionGraphBuilder
.newBuilder()
.setJobGraph(new JobGraph(producerJobVertex, consumerJobVertex))
.setPartitionTracker(partitionTracker)
.build();
executionGraph.scheduleForExecution();
final ExecutionJobVertex producerExecutionJobVertex = executionGraph.getJobVertex(producerJobVertex.getID());
Execution execution = producerExecutionJobVertex
.getTaskVertices()[0]
.getCurrentExecutionAttempt();
assertFalse(releasePartitionsFuture.isDone());
execution.markFinished();
assertFalse(releasePartitionsFuture.isDone());
producerExecutionJobVertex.resetForNewExecution(1L, 1L);
final IntermediateResultPartitionID intermediateResultPartitionID = producerExecutionJobVertex
.getProducedDataSets()[0]
.getPartitions()[0]
.getPartitionId();
final ResultPartitionID resultPartitionID = execution
.getResultPartitionDeploymentDescriptor(intermediateResultPartitionID)
.get()
.getShuffleDescriptor()
.getResultPartitionID();
assertThat(releasePartitionsFuture.get(), contains(resultPartitionID));
}
}
| apache-2.0 |
xiaoyanit/cgeo | main/src/cgeo/geocaching/utils/AsyncTaskWithProgress.java | 3298 | package cgeo.geocaching.utils;
import android.app.Activity;
import android.app.ProgressDialog;
/**
* AsyncTask which automatically shows a progress dialog. The progress is tracked with integers.
*
* Use it like the {@code AsyncTask} class, but leave away the middle template parameter. Override
* {@link #doInBackgroundInternal(Object[])} and related methods.
*
* <p>
* If no style is given, the progress dialog uses "determinate" style with known maximum. The progress maximum is
* automatically derived from the number of {@code Params} given to the task in {@link #execute(Object...)}.
* </p>
*
* <p>
* Use {@code publishProgress(Integer)} to change the current progress.
* </p>
*
*/
public abstract class AsyncTaskWithProgress<Params, Result> extends AbstractAsyncTaskWithProgress<Params, Integer, Result> {
private boolean indeterminate = false;
/**
* Creates an AsyncTask with progress dialog.
*
*/
public AsyncTaskWithProgress(final Activity activity, final String progressTitle, final String progressMessage) {
this(activity, progressTitle, progressMessage, false);
}
/**
* Creates an AsyncTask with progress dialog.
*
*/
public AsyncTaskWithProgress(final Activity activity, final String progressTitle) {
this(activity, progressTitle, null);
}
/**
* Creates an AsyncTask with progress dialog.
*
*/
public AsyncTaskWithProgress(final Activity activity, final String progressTitle, final String progressMessage, final boolean indeterminate) {
super(activity, progressTitle, progressMessage);
this.indeterminate = indeterminate;
}
/**
* Creates an AsyncTask with progress dialog.
*
*/
public AsyncTaskWithProgress(final Activity activity, final String progressTitle, final boolean indeterminate) {
this(activity, progressTitle, null, indeterminate);
}
/**
* Show the progress dialog.
*
*/
@Override
protected final void onPreExecute() {
if (activity != null) {
if (indeterminate) {
progress.show(activity, progressTitle, progressMessage, true, null);
}
else {
progress.show(activity, progressTitle, progressMessage, ProgressDialog.STYLE_HORIZONTAL, null);
}
}
onPreExecuteInternal();
}
/**
* Define the progress logic.
*
* @param status
* The new progress status
*/
@Override
protected final void onProgressUpdate(final Integer... status) {
final int progressValue = status[0];
if (activity != null && progressValue >= 0) {
progress.setProgress(progressValue);
}
onProgressUpdateInternal(progressValue);
}
/**
* Launch the process in background.
*
* @param params
* The parameters of the task.
* @return
* A result, defined by the subclass of this task.
*/
@SuppressWarnings("unchecked")
@Override
protected final Result doInBackground(final Params... params) {
if (params != null) {
progress.setMaxProgressAndReset(params.length);
}
return doInBackgroundInternal(params);
}
}
| apache-2.0 |
Drifftr/devstudio-tooling-bps | plugins/org.eclipse.bpel.ui.noEmbeddedEditors/src/org/eclipse/bpel/ui/proposal/providers/Separator.java | 755 | /*******************************************************************************
* Copyright (c) 2006, 2012 Oracle Corporation and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Oracle Corporation - initial API and implementation
*******************************************************************************/
package org.eclipse.bpel.ui.proposal.providers;
/**
* @author Michal Chmielewski (michal.chmielewski@oracle.com)
* @date Jul 25, 2006
*
*/
public class Separator extends AbstractContentProposal {
}
| apache-2.0 |
ChristianMurphy/uPortal | uPortal-portlets/src/main/java/org/apereo/portal/portlets/statistics/JsonDataTable.java | 1170 | /**
* Licensed to Apereo under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright ownership. Apereo
* licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use
* this file except in compliance with the License. You may obtain a copy of the License at the
* following location:
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apereo.portal.portlets.statistics;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import com.google.visualization.datasource.datatable.DataTable;
/** JsonDataTable extends Google's DataTable class so that we can use a custom Json serializer. */
@JsonSerialize(using = GoogleDataTableSerializer.class)
public class JsonDataTable extends DataTable {}
| apache-2.0 |
skinzer/elephant-bird | core/src/test/java/com/twitter/elephantbird/mapreduce/output/TestLzoTextOutputFormat.java | 3690 | package com.twitter.elephantbird.mapreduce.output;
import com.hadoop.compression.lzo.LzoIndex;
import com.hadoop.compression.lzo.LzopCodec;
import com.twitter.elephantbird.util.HadoopCompat;
import com.twitter.elephantbird.util.LzoUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.net.URI;
import java.util.UUID;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class TestLzoTextOutputFormat {
private static final long SMALL_MIN_SIZE = 1000L;
private static final long BIG_MIN_SIZE = SMALL_MIN_SIZE * 1000;
private Path outputDir_;
private Configuration conf_;
private FileSystem lfs_;
@Before
public void setUp() throws Exception {
outputDir_ = new Path(System.getProperty("test.build.data", "data"),
"outputDir");
conf_ = new Configuration();
conf_.setBoolean(LzoUtils.LZO_OUTPUT_INDEX, true);
lfs_ = new RawLocalFileSystem();
lfs_.initialize(URI.create("file:///"), conf_);
FileSystem.closeAll(); // purge fs cache
}
@After
public void cleanup() throws Exception {
lfs_.delete(outputDir_, true);
}
private void testIndexFile(long minSize, boolean viaBlockSize)
throws Exception {
final Job job = new Job(conf_);
final Configuration conf = job.getConfiguration();
if (viaBlockSize) {
conf.setLong("fs.local.block.size", minSize);
} else {
conf.setLong("fs.local.block.size", 1L); // would always index
conf.setLong(LzoUtils.LZO_OUTPUT_INDEXABLE_MINSIZE, minSize);
}
FileOutputFormat.setOutputPath(job, outputDir_);
final LzoTextOutputFormat<Text,Text> outputFormat =
new LzoTextOutputFormat<Text,Text>();
final TaskAttemptContext attemptContext =
HadoopCompat.newTaskAttemptContext(HadoopCompat.getConfiguration(job),
new TaskAttemptID(TaskID.forName("task_1234567_0001_r_000001"), 1));
final RecordWriter writer = outputFormat.getRecordWriter(attemptContext);
for (int i = 0; i < 1024; i++) {
writer.write(new Text(UUID.randomUUID().toString()),
new Text(UUID.randomUUID().toString()));
}
writer.close(attemptContext);
final Path lzoFile = outputFormat.getDefaultWorkFile(attemptContext,
LzopCodec.DEFAULT_LZO_EXTENSION);
final Path lzoIndexFile = lzoFile.suffix(LzoIndex.LZO_INDEX_SUFFIX);
assertTrue(lzoFile + ": Lzo file should exist!", lfs_.exists(lzoFile));
if (minSize == SMALL_MIN_SIZE) {
assertTrue(lzoIndexFile + ": Lzo index file should exist!",
lfs_.exists(lzoIndexFile));
} else {
assertFalse(lzoIndexFile + ": Lzo index file should not exist!",
lfs_.exists(lzoIndexFile));
}
}
@Test
public void testLzoIndexViaBlockSize() throws Exception {
testIndexFile(SMALL_MIN_SIZE, true);
}
@Test
public void testNoLzoIndexViaBlockSize() throws Exception {
testIndexFile(BIG_MIN_SIZE, true);
}
@Test
public void testLzoIndexViaMinSize() throws Exception {
testIndexFile(SMALL_MIN_SIZE, false);
}
@Test
public void testNoLzoIndexViaMinSize() throws Exception {
testIndexFile(BIG_MIN_SIZE, false);
}
}
| apache-2.0 |
tomwscott/GoCD | common/src/com/thoughtworks/go/domain/DirectoryEntries.java | 2588 | /*
* Copyright 2015 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thoughtworks.go.domain;
import com.thoughtworks.go.server.presentation.html.HtmlElement;
import com.thoughtworks.go.server.presentation.html.HtmlRenderable;
import com.thoughtworks.go.server.presentation.models.HtmlRenderer;
import com.thoughtworks.go.util.json.JsonAware;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import static com.thoughtworks.go.server.presentation.html.HtmlElement.p;
public class DirectoryEntries extends ArrayList<DirectoryEntry> implements HtmlRenderable, JsonAware {
private boolean isArtifactsDeleted;
public void render(HtmlRenderer renderer) {
if (isArtifactsDeleted || isEmpty()) {
HtmlElement element = p().content("Artifacts for this job instance are unavailable as they may have been <a href='http://www.go.cd/documentation/user/current/configuration/delete_artifacts.html' target='blank'>purged by Go</a> or deleted externally. "
+ "Re-run the stage or job to generate them again.");
element.render(renderer);
}
for (DirectoryEntry entry : this) {
entry.toHtml().render(renderer);
}
}
public List<Map<String, Object>> toJson() {
List<Map<String, Object>> jsonList = new ArrayList();
for (DirectoryEntry entry : this) {
jsonList.add(entry.toJson());
}
return jsonList;
}
public boolean isArtifactsDeleted() {
return isArtifactsDeleted;
}
public void setIsArtifactsDeleted(boolean artifactsDeleted) {
isArtifactsDeleted = artifactsDeleted;
}
public FolderDirectoryEntry addFolder(String folderName) {
FolderDirectoryEntry folderDirectoryEntry = new FolderDirectoryEntry(folderName, "", new DirectoryEntries());
add(folderDirectoryEntry);
return folderDirectoryEntry;
}
public void addFile(String fileName, String url) {
add(new FileDirectoryEntry(fileName, url));
}
}
| apache-2.0 |
thomasdarimont/spring-security | core/src/test/java/org/springframework/security/concurrent/CurrentDelegatingSecurityContextScheduledExecutorServiceTests.java | 1275 | /*
* Copyright 2002-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.security.concurrent;
import org.junit.Before;
/**
* Tests using the current {@link SecurityContext} on
* {@link DelegatingSecurityContextScheduledExecutorService}
*
* @author Rob Winch
* @since 3.2
*
*/
public class CurrentDelegatingSecurityContextScheduledExecutorServiceTests extends
AbstractDelegatingSecurityContextScheduledExecutorServiceTests {
@Before
public void setUp() throws Exception {
this.currentSecurityContextPowermockSetup();
}
@Override
protected DelegatingSecurityContextScheduledExecutorService create() {
return new DelegatingSecurityContextScheduledExecutorService(delegate);
}
}
| apache-2.0 |
alexzaitzev/ignite | modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/PageDeltaRecord.java | 2525 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.pagemem.wal.record.delta;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.internal.pagemem.PageMemory;
import org.apache.ignite.internal.pagemem.wal.record.WALRecord;
import org.apache.ignite.internal.pagemem.wal.record.WalRecordCacheGroupAware;
import org.apache.ignite.internal.util.tostring.GridToStringExclude;
import org.apache.ignite.internal.util.typedef.internal.S;
import org.apache.ignite.internal.util.typedef.internal.U;
/**
* Abstract page delta record.
*/
public abstract class PageDeltaRecord extends WALRecord implements WalRecordCacheGroupAware {
/** */
private int grpId;
/** */
@GridToStringExclude
private long pageId;
/**
* @param grpId Cache group ID.
* @param pageId Page ID.
*/
protected PageDeltaRecord(int grpId, long pageId) {
this.grpId = grpId;
this.pageId = pageId;
}
/**
* @return Page ID.
*/
public long pageId() {
return pageId;
}
/** {@inheritDoc} */
@Override public int groupId() {
return grpId;
}
/**
* Apply changes from this delta to the given page.
* It is assumed that the given buffer represents page state right before this update.
*
* @param pageMem Page memory.
* @param pageAddr Page address.
* @throws IgniteCheckedException If failed.
*/
public abstract void applyDelta(PageMemory pageMem, long pageAddr) throws IgniteCheckedException;
/** {@inheritDoc} */
@Override public String toString() {
return S.toString(PageDeltaRecord.class, this,
"pageId", U.hexLong(pageId),
"super", super.toString());
}
}
| apache-2.0 |
HonzaKral/elasticsearch | server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DerivativeIT.java | 31784 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.pipeline;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket;
import org.elasticsearch.search.aggregations.metrics.Stats;
import org.elasticsearch.search.aggregations.metrics.Sum;
import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
import org.elasticsearch.search.aggregations.support.AggregationPath;
import org.elasticsearch.test.ESIntegTestCase;
import org.hamcrest.Matchers;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.search.aggregations.AggregationBuilders.avg;
import static org.elasticsearch.search.aggregations.AggregationBuilders.filters;
import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
import static org.elasticsearch.search.aggregations.AggregationBuilders.stats;
import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.derivative;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
import static org.hamcrest.Matchers.closeTo;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.core.IsNull.notNullValue;
import static org.hamcrest.core.IsNull.nullValue;
@ESIntegTestCase.SuiteScopeTestCase
public class DerivativeIT extends ESIntegTestCase {
private static final String SINGLE_VALUED_FIELD_NAME = "l_value";
private static int interval;
private static int numValueBuckets;
private static int numFirstDerivValueBuckets;
private static int numSecondDerivValueBuckets;
private static long[] valueCounts;
private static long[] firstDerivValueCounts;
private static long[] secondDerivValueCounts;
private static Long[] valueCounts_empty;
private static long numDocsEmptyIdx;
private static Double[] firstDerivValueCounts_empty;
// expected bucket values for random setup with gaps
private static int numBuckets_empty_rnd;
private static Long[] valueCounts_empty_rnd;
private static Double[] firstDerivValueCounts_empty_rnd;
private static long numDocsEmptyIdx_rnd;
@Override
public void setupSuiteScopeCluster() throws Exception {
createIndex("idx");
createIndex("idx_unmapped");
interval = 5;
numValueBuckets = randomIntBetween(6, 80);
valueCounts = new long[numValueBuckets];
for (int i = 0; i < numValueBuckets; i++) {
valueCounts[i] = randomIntBetween(1, 20);
}
numFirstDerivValueBuckets = numValueBuckets - 1;
firstDerivValueCounts = new long[numFirstDerivValueBuckets];
Long lastValueCount = null;
for (int i = 0; i < numValueBuckets; i++) {
long thisValue = valueCounts[i];
if (lastValueCount != null) {
long diff = thisValue - lastValueCount;
firstDerivValueCounts[i - 1] = diff;
}
lastValueCount = thisValue;
}
numSecondDerivValueBuckets = numFirstDerivValueBuckets - 1;
secondDerivValueCounts = new long[numSecondDerivValueBuckets];
Long lastFirstDerivativeValueCount = null;
for (int i = 0; i < numFirstDerivValueBuckets; i++) {
long thisFirstDerivativeValue = firstDerivValueCounts[i];
if (lastFirstDerivativeValueCount != null) {
long diff = thisFirstDerivativeValue - lastFirstDerivativeValueCount;
secondDerivValueCounts[i - 1] = diff;
}
lastFirstDerivativeValueCount = thisFirstDerivativeValue;
}
List<IndexRequestBuilder> builders = new ArrayList<>();
for (int i = 0; i < numValueBuckets; i++) {
for (int docs = 0; docs < valueCounts[i]; docs++) {
builders.add(client().prepareIndex("idx").setSource(newDocBuilder(i * interval)));
}
}
// setup for index with empty buckets
valueCounts_empty = new Long[] { 1L, 1L, 2L, 0L, 2L, 2L, 0L, 0L, 0L, 3L, 2L, 1L };
firstDerivValueCounts_empty = new Double[] { null, 0d, 1d, -2d, 2d, 0d, -2d, 0d, 0d, 3d, -1d, -1d };
assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer"));
for (int i = 0; i < valueCounts_empty.length; i++) {
for (int docs = 0; docs < valueCounts_empty[i]; docs++) {
builders.add(client().prepareIndex("empty_bucket_idx").setSource(newDocBuilder(i)));
numDocsEmptyIdx++;
}
}
// randomized setup for index with empty buckets
numBuckets_empty_rnd = randomIntBetween(20, 100);
valueCounts_empty_rnd = new Long[numBuckets_empty_rnd];
firstDerivValueCounts_empty_rnd = new Double[numBuckets_empty_rnd];
firstDerivValueCounts_empty_rnd[0] = null;
assertAcked(prepareCreate("empty_bucket_idx_rnd").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer"));
for (int i = 0; i < numBuckets_empty_rnd; i++) {
valueCounts_empty_rnd[i] = (long) randomIntBetween(1, 10);
// make approximately half of the buckets empty
if (randomBoolean())
valueCounts_empty_rnd[i] = 0L;
for (int docs = 0; docs < valueCounts_empty_rnd[i]; docs++) {
builders.add(client().prepareIndex("empty_bucket_idx_rnd").setSource(newDocBuilder(i)));
numDocsEmptyIdx_rnd++;
}
if (i > 0) {
firstDerivValueCounts_empty_rnd[i] = (double) valueCounts_empty_rnd[i] - valueCounts_empty_rnd[i - 1];
}
}
indexRandom(true, builders);
ensureSearchable();
}
private XContentBuilder newDocBuilder(int singleValueFieldValue) throws IOException {
return jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, singleValueFieldValue).endObject();
}
/**
* test first and second derivative on the sing
*/
public void testDocCountDerivative() {
SearchResponse response = client()
.prepareSearch("idx")
.addAggregation(
histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
.subAggregation(derivative("deriv", "_count"))
.subAggregation(derivative("2nd_deriv", "deriv"))).get();
assertSearchResponse(response);
Histogram deriv = response.getAggregations().get("histo");
assertThat(deriv, notNullValue());
assertThat(deriv.getName(), equalTo("histo"));
List<? extends Bucket> buckets = deriv.getBuckets();
assertThat(buckets.size(), equalTo(numValueBuckets));
for (int i = 0; i < numValueBuckets; ++i) {
Histogram.Bucket bucket = buckets.get(i);
checkBucketKeyAndDocCount("InternalBucket " + i, bucket, i * interval, valueCounts[i]);
SimpleValue docCountDeriv = bucket.getAggregations().get("deriv");
if (i > 0) {
assertThat(docCountDeriv, notNullValue());
assertThat(docCountDeriv.value(), equalTo((double) firstDerivValueCounts[i - 1]));
} else {
assertThat(docCountDeriv, nullValue());
}
SimpleValue docCount2ndDeriv = bucket.getAggregations().get("2nd_deriv");
if (i > 1) {
assertThat(docCount2ndDeriv, notNullValue());
assertThat(docCount2ndDeriv.value(), equalTo((double) secondDerivValueCounts[i - 2]));
} else {
assertThat(docCount2ndDeriv, nullValue());
}
}
}
/**
* test first and second derivative on the sing
*/
public void testSingleValuedField_normalised() {
SearchResponse response = client()
.prepareSearch("idx")
.addAggregation(
histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0)
.subAggregation(derivative("deriv", "_count").unit("1ms"))
.subAggregation(derivative("2nd_deriv", "deriv").unit("10ms"))).get();
assertSearchResponse(response);
Histogram deriv = response.getAggregations().get("histo");
assertThat(deriv, notNullValue());
assertThat(deriv.getName(), equalTo("histo"));
List<? extends Bucket> buckets = deriv.getBuckets();
assertThat(buckets.size(), equalTo(numValueBuckets));
for (int i = 0; i < numValueBuckets; ++i) {
Histogram.Bucket bucket = buckets.get(i);
checkBucketKeyAndDocCount("InternalBucket " + i, bucket, i * interval, valueCounts[i]);
Derivative docCountDeriv = bucket.getAggregations().get("deriv");
if (i > 0) {
assertThat(docCountDeriv, notNullValue());
assertThat(docCountDeriv.value(), closeTo((firstDerivValueCounts[i - 1]), 0.00001));
assertThat(docCountDeriv.normalizedValue(), closeTo((double) (firstDerivValueCounts[i - 1]) / 5, 0.00001));
} else {
assertThat(docCountDeriv, nullValue());
}
Derivative docCount2ndDeriv = bucket.getAggregations().get("2nd_deriv");
if (i > 1) {
assertThat(docCount2ndDeriv, notNullValue());
assertThat(docCount2ndDeriv.value(), closeTo((secondDerivValueCounts[i - 2]), 0.00001));
assertThat(docCount2ndDeriv.normalizedValue(), closeTo((double) (secondDerivValueCounts[i - 2]) * 2, 0.00001));
} else {
assertThat(docCount2ndDeriv, nullValue());
}
}
}
public void testSingleValueAggDerivative() throws Exception {
SearchResponse response = client()
.prepareSearch("idx")
.addAggregation(
histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
.subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))
.subAggregation(derivative("deriv", "sum"))).get();
assertSearchResponse(response);
Histogram deriv = response.getAggregations().get("histo");
assertThat(deriv, notNullValue());
assertThat(deriv.getName(), equalTo("histo"));
assertThat(deriv.getBuckets().size(), equalTo(numValueBuckets));
Object[] propertiesKeys = (Object[]) ((InternalAggregation)deriv).getProperty("_key");
Object[] propertiesDocCounts = (Object[]) ((InternalAggregation)deriv).getProperty("_count");
Object[] propertiesSumCounts = (Object[]) ((InternalAggregation)deriv).getProperty("sum.value");
List<Bucket> buckets = new ArrayList<>(deriv.getBuckets());
Long expectedSumPreviousBucket = Long.MIN_VALUE; // start value, gets
// overwritten
for (int i = 0; i < numValueBuckets; ++i) {
Histogram.Bucket bucket = buckets.get(i);
checkBucketKeyAndDocCount("InternalBucket " + i, bucket, i * interval, valueCounts[i]);
Sum sum = bucket.getAggregations().get("sum");
assertThat(sum, notNullValue());
long expectedSum = valueCounts[i] * (i * interval);
assertThat(sum.getValue(), equalTo((double) expectedSum));
SimpleValue sumDeriv = bucket.getAggregations().get("deriv");
if (i > 0) {
assertThat(sumDeriv, notNullValue());
long sumDerivValue = expectedSum - expectedSumPreviousBucket;
assertThat(sumDeriv.value(), equalTo((double) sumDerivValue));
assertThat(((InternalMultiBucketAggregation.InternalBucket)bucket).getProperty("histo",
AggregationPath.parse("deriv.value").getPathElementsAsStringList()),
equalTo((double) sumDerivValue));
} else {
assertThat(sumDeriv, nullValue());
}
expectedSumPreviousBucket = expectedSum;
assertThat(propertiesKeys[i], equalTo((double) i * interval));
assertThat((long) propertiesDocCounts[i], equalTo(valueCounts[i]));
assertThat((double) propertiesSumCounts[i], equalTo((double) expectedSum));
}
}
public void testMultiValueAggDerivative() throws Exception {
SearchResponse response = client()
.prepareSearch("idx")
.addAggregation(
histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
.subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))
.subAggregation(derivative("deriv", "stats.sum"))).get();
assertSearchResponse(response);
Histogram deriv = response.getAggregations().get("histo");
assertThat(deriv, notNullValue());
assertThat(deriv.getName(), equalTo("histo"));
assertThat(deriv.getBuckets().size(), equalTo(numValueBuckets));
Object[] propertiesKeys = (Object[]) ((InternalAggregation)deriv).getProperty("_key");
Object[] propertiesDocCounts = (Object[]) ((InternalAggregation)deriv).getProperty("_count");
Object[] propertiesSumCounts = (Object[]) ((InternalAggregation)deriv).getProperty("stats.sum");
List<Bucket> buckets = new ArrayList<>(deriv.getBuckets());
Long expectedSumPreviousBucket = Long.MIN_VALUE; // start value, gets
// overwritten
for (int i = 0; i < numValueBuckets; ++i) {
Histogram.Bucket bucket = buckets.get(i);
checkBucketKeyAndDocCount("InternalBucket " + i, bucket, i * interval, valueCounts[i]);
Stats stats = bucket.getAggregations().get("stats");
assertThat(stats, notNullValue());
long expectedSum = valueCounts[i] * (i * interval);
assertThat(stats.getSum(), equalTo((double) expectedSum));
SimpleValue sumDeriv = bucket.getAggregations().get("deriv");
if (i > 0) {
assertThat(sumDeriv, notNullValue());
long sumDerivValue = expectedSum - expectedSumPreviousBucket;
assertThat(sumDeriv.value(), equalTo((double) sumDerivValue));
assertThat(((InternalMultiBucketAggregation.InternalBucket)bucket).getProperty("histo",
AggregationPath.parse("deriv.value").getPathElementsAsStringList()),
equalTo((double) sumDerivValue));
} else {
assertThat(sumDeriv, nullValue());
}
expectedSumPreviousBucket = expectedSum;
assertThat(propertiesKeys[i], equalTo((double) i * interval));
assertThat((long) propertiesDocCounts[i], equalTo(valueCounts[i]));
assertThat((double) propertiesSumCounts[i], equalTo((double) expectedSum));
}
}
public void testUnmapped() throws Exception {
SearchResponse response = client()
.prepareSearch("idx_unmapped")
.addAggregation(
histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
.subAggregation(derivative("deriv", "_count"))).get();
assertSearchResponse(response);
Histogram deriv = response.getAggregations().get("histo");
assertThat(deriv, notNullValue());
assertThat(deriv.getName(), equalTo("histo"));
assertThat(deriv.getBuckets().size(), equalTo(0));
}
public void testPartiallyUnmapped() throws Exception {
SearchResponse response = client()
.prepareSearch("idx", "idx_unmapped")
.addAggregation(
histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
.subAggregation(derivative("deriv", "_count"))).get();
assertSearchResponse(response);
Histogram deriv = response.getAggregations().get("histo");
assertThat(deriv, notNullValue());
assertThat(deriv.getName(), equalTo("histo"));
List<? extends Bucket> buckets = deriv.getBuckets();
assertThat(deriv.getBuckets().size(), equalTo(numValueBuckets));
for (int i = 0; i < numValueBuckets; ++i) {
Histogram.Bucket bucket = buckets.get(i);
checkBucketKeyAndDocCount("InternalBucket " + i, bucket, i * interval, valueCounts[i]);
SimpleValue docCountDeriv = bucket.getAggregations().get("deriv");
if (i > 0) {
assertThat(docCountDeriv, notNullValue());
assertThat(docCountDeriv.value(), equalTo((double) firstDerivValueCounts[i - 1]));
} else {
assertThat(docCountDeriv, nullValue());
}
}
}
public void testDocCountDerivativeWithGaps() throws Exception {
SearchResponse searchResponse = client()
.prepareSearch("empty_bucket_idx")
.setQuery(matchAllQuery())
.addAggregation(
histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1)
.subAggregation(derivative("deriv", "_count"))).get();
assertThat(searchResponse.getHits().getTotalHits().value, equalTo(numDocsEmptyIdx));
Histogram deriv = searchResponse.getAggregations().get("histo");
assertThat(deriv, Matchers.notNullValue());
assertThat(deriv.getName(), equalTo("histo"));
List<? extends Bucket> buckets = deriv.getBuckets();
assertThat(buckets.size(), equalTo(valueCounts_empty.length));
for (int i = 0; i < valueCounts_empty.length; i++) {
Histogram.Bucket bucket = buckets.get(i);
checkBucketKeyAndDocCount("InternalBucket " + i, bucket, i, valueCounts_empty[i]);
SimpleValue docCountDeriv = bucket.getAggregations().get("deriv");
if (firstDerivValueCounts_empty[i] == null) {
assertThat(docCountDeriv, nullValue());
} else {
assertThat(docCountDeriv.value(), equalTo(firstDerivValueCounts_empty[i]));
}
}
}
public void testDocCountDerivativeWithGaps_random() throws Exception {
SearchResponse searchResponse = client()
.prepareSearch("empty_bucket_idx_rnd")
.setQuery(matchAllQuery())
.addAggregation(
histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1)
.extendedBounds(0L, numBuckets_empty_rnd - 1)
.subAggregation(derivative("deriv", "_count").gapPolicy(randomFrom(GapPolicy.values()))))
.get();
assertThat(searchResponse.getHits().getTotalHits().value, equalTo(numDocsEmptyIdx_rnd));
Histogram deriv = searchResponse.getAggregations().get("histo");
assertThat(deriv, Matchers.notNullValue());
assertThat(deriv.getName(), equalTo("histo"));
List<? extends Bucket> buckets = deriv.getBuckets();
assertThat(buckets.size(), equalTo(numBuckets_empty_rnd));
for (int i = 0; i < valueCounts_empty_rnd.length; i++) {
Histogram.Bucket bucket = buckets.get(i);
checkBucketKeyAndDocCount("InternalBucket " + i, bucket, i, valueCounts_empty_rnd[i]);
SimpleValue docCountDeriv = bucket.getAggregations().get("deriv");
if (firstDerivValueCounts_empty_rnd[i] == null) {
assertThat(docCountDeriv, nullValue());
} else {
assertThat(docCountDeriv.value(), equalTo(firstDerivValueCounts_empty_rnd[i]));
}
}
}
public void testDocCountDerivativeWithGaps_insertZeros() throws Exception {
SearchResponse searchResponse = client()
.prepareSearch("empty_bucket_idx")
.setQuery(matchAllQuery())
.addAggregation(
histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1)
.subAggregation(derivative("deriv", "_count").gapPolicy(GapPolicy.INSERT_ZEROS))).get();
assertThat(searchResponse.getHits().getTotalHits().value, equalTo(numDocsEmptyIdx));
Histogram deriv = searchResponse.getAggregations().get("histo");
assertThat(deriv, Matchers.notNullValue());
assertThat(deriv.getName(), equalTo("histo"));
List<? extends Bucket> buckets = deriv.getBuckets();
assertThat(buckets.size(), equalTo(valueCounts_empty.length));
for (int i = 0; i < valueCounts_empty.length; i++) {
Histogram.Bucket bucket = buckets.get(i);
checkBucketKeyAndDocCount("InternalBucket " + i + ": ", bucket, i, valueCounts_empty[i]);
SimpleValue docCountDeriv = bucket.getAggregations().get("deriv");
if (firstDerivValueCounts_empty[i] == null) {
assertThat(docCountDeriv, nullValue());
} else {
assertThat(docCountDeriv.value(), equalTo(firstDerivValueCounts_empty[i]));
}
}
}
public void testSingleValueAggDerivativeWithGaps() throws Exception {
SearchResponse searchResponse = client()
.prepareSearch("empty_bucket_idx")
.setQuery(matchAllQuery())
.addAggregation(
histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1)
.subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))
.subAggregation(derivative("deriv", "sum"))).get();
assertThat(searchResponse.getHits().getTotalHits().value, equalTo(numDocsEmptyIdx));
Histogram deriv = searchResponse.getAggregations().get("histo");
assertThat(deriv, Matchers.notNullValue());
assertThat(deriv.getName(), equalTo("histo"));
List<? extends Bucket> buckets = deriv.getBuckets();
assertThat(buckets.size(), equalTo(valueCounts_empty.length));
double lastSumValue = Double.NaN;
for (int i = 0; i < valueCounts_empty.length; i++) {
Histogram.Bucket bucket = buckets.get(i);
checkBucketKeyAndDocCount("InternalBucket " + i, bucket, i, valueCounts_empty[i]);
Sum sum = bucket.getAggregations().get("sum");
double thisSumValue = sum.value();
if (bucket.getDocCount() == 0) {
thisSumValue = Double.NaN;
}
SimpleValue sumDeriv = bucket.getAggregations().get("deriv");
if (i == 0) {
assertThat(sumDeriv, nullValue());
} else {
double expectedDerivative = thisSumValue - lastSumValue;
if (Double.isNaN(expectedDerivative)) {
assertThat(sumDeriv.value(), equalTo(expectedDerivative));
} else {
assertThat(sumDeriv.value(), closeTo(expectedDerivative, 0.00001));
}
}
lastSumValue = thisSumValue;
}
}
public void testSingleValueAggDerivativeWithGaps_insertZeros() throws Exception {
SearchResponse searchResponse = client()
.prepareSearch("empty_bucket_idx")
.setQuery(matchAllQuery())
.addAggregation(
histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1)
.subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))
.subAggregation(derivative("deriv", "sum").gapPolicy(GapPolicy.INSERT_ZEROS))).get();
assertThat(searchResponse.getHits().getTotalHits().value, equalTo(numDocsEmptyIdx));
Histogram deriv = searchResponse.getAggregations().get("histo");
assertThat(deriv, Matchers.notNullValue());
assertThat(deriv.getName(), equalTo("histo"));
List<? extends Bucket> buckets = deriv.getBuckets();
assertThat(buckets.size(), equalTo(valueCounts_empty.length));
double lastSumValue = Double.NaN;
for (int i = 0; i < valueCounts_empty.length; i++) {
Histogram.Bucket bucket = buckets.get(i);
checkBucketKeyAndDocCount("InternalBucket " + i, bucket, i, valueCounts_empty[i]);
Sum sum = bucket.getAggregations().get("sum");
double thisSumValue = sum.value();
if (bucket.getDocCount() == 0) {
thisSumValue = 0;
}
SimpleValue sumDeriv = bucket.getAggregations().get("deriv");
if (i == 0) {
assertThat(sumDeriv, nullValue());
} else {
double expectedDerivative = thisSumValue - lastSumValue;
assertThat(sumDeriv.value(), closeTo(expectedDerivative, 0.00001));
}
lastSumValue = thisSumValue;
}
}
public void testSingleValueAggDerivativeWithGaps_random() throws Exception {
GapPolicy gapPolicy = randomFrom(GapPolicy.values());
SearchResponse searchResponse = client()
.prepareSearch("empty_bucket_idx_rnd")
.setQuery(matchAllQuery())
.addAggregation(
histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1)
.extendedBounds(0L, (long) numBuckets_empty_rnd - 1)
.subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))
.subAggregation(derivative("deriv", "sum").gapPolicy(gapPolicy))).get();
assertThat(searchResponse.getHits().getTotalHits().value, equalTo(numDocsEmptyIdx_rnd));
Histogram deriv = searchResponse.getAggregations().get("histo");
assertThat(deriv, Matchers.notNullValue());
assertThat(deriv.getName(), equalTo("histo"));
List<? extends Bucket> buckets = deriv.getBuckets();
assertThat(buckets.size(), equalTo(numBuckets_empty_rnd));
double lastSumValue = Double.NaN;
for (int i = 0; i < valueCounts_empty_rnd.length; i++) {
Histogram.Bucket bucket = buckets.get(i);
checkBucketKeyAndDocCount("InternalBucket " + i, bucket, i, valueCounts_empty_rnd[i]);
Sum sum = bucket.getAggregations().get("sum");
double thisSumValue = sum.value();
if (bucket.getDocCount() == 0) {
thisSumValue = gapPolicy == GapPolicy.INSERT_ZEROS ? 0 : Double.NaN;
}
SimpleValue sumDeriv = bucket.getAggregations().get("deriv");
if (i == 0) {
assertThat(sumDeriv, nullValue());
} else {
double expectedDerivative = thisSumValue - lastSumValue;
if (Double.isNaN(expectedDerivative)) {
assertThat(sumDeriv.value(), equalTo(expectedDerivative));
} else {
assertThat(sumDeriv.value(), closeTo(expectedDerivative, 0.00001));
}
}
lastSumValue = thisSumValue;
}
}
public void testSingleValueAggDerivative_invalidPath() throws Exception {
try {
client().prepareSearch("idx")
.addAggregation(
histogram("histo")
.field(SINGLE_VALUED_FIELD_NAME)
.interval(interval)
.subAggregation(
filters("filters", QueryBuilders.termQuery("tag", "foo")).subAggregation(
sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
.subAggregation(derivative("deriv", "filters>get>sum"))).get();
fail("Expected an Exception but didn't get one");
} catch (Exception e) {
Throwable cause = ExceptionsHelper.unwrapCause(e);
if (cause == null) {
throw e;
} else if (cause instanceof SearchPhaseExecutionException) {
SearchPhaseExecutionException spee = (SearchPhaseExecutionException) e;
Throwable rootCause = spee.getRootCause();
if (!(rootCause instanceof IllegalArgumentException)) {
throw e;
}
} else if (!(cause instanceof IllegalArgumentException)) {
throw e;
}
}
}
public void testDerivDerivNPE() throws Exception {
createIndex("deriv_npe");
for (int i = 0; i < 10; i++) {
Integer value = i;
if (i == 1 || i == 3) {
value = null;
}
XContentBuilder doc = jsonBuilder()
.startObject()
.field("tick", i)
.field("value", value)
.endObject();
client().prepareIndex("deriv_npe").setSource(doc).get();
}
refresh();
SearchResponse response = client()
.prepareSearch("deriv_npe")
.addAggregation(
histogram("histo").field("tick").interval(1)
.subAggregation(avg("avg").field("value"))
.subAggregation(derivative("deriv1", "avg"))
.subAggregation(derivative("deriv2", "deriv1"))).get();
assertSearchResponse(response);
}
private void checkBucketKeyAndDocCount(final String msg, final Histogram.Bucket bucket, final long expectedKey,
final long expectedDocCount) {
assertThat(msg, bucket, notNullValue());
assertThat(msg + " key", ((Number) bucket.getKey()).longValue(), equalTo(expectedKey));
assertThat(msg + " docCount", bucket.getDocCount(), equalTo(expectedDocCount));
}
}
| apache-2.0 |
mvolaart/openhab2-addons | addons/binding/org.openhab.binding.jeelink/src/main/java/org/openhab/binding/jeelink/internal/connection/JeeLinkConnection.java | 927 | /**
* Copyright (c) 2010-2017 by the respective copyright holders.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*/
package org.openhab.binding.jeelink.internal.connection;
/**
* Interface for connections to JeeLink USB Receivers.
*
* @author Volker Bier - Initial contribution
*/
public interface JeeLinkConnection {
/**
* closes the connection to the receiver.
*/
void closeConnection();
/**
* opens the connection to the receiver.
*/
void openConnection();
/**
* returns port to which the receiver is connected.
*/
String getPort();
/**
* sends the specified init commands to the receiver.
*/
void sendInitCommands(String initCommands);
}
| epl-1.0 |
md-5/jdk10 | test/hotspot/jtreg/vmTestbase/nsk/monitoring/GarbageCollectorMXBean/getCollectionCount/getcollectioncount001.java | 3042 | /*
* Copyright (c) 2004, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package nsk.monitoring.GarbageCollectorMXBean.getCollectionCount;
import java.lang.management.*;
import java.io.*;
import nsk.share.*;
import nsk.monitoring.share.*;
public class getcollectioncount001 {
private static boolean testFailed = false;
public static void main(String[] args) {
System.exit(Consts.JCK_STATUS_BASE + run(args, System.out));
}
private static Log log;
static int run(String[] args, PrintStream out) {
ArgumentHandler argumentHandler = new ArgumentHandler(args);
log = new Log(out, argumentHandler);
// Test case 1. check that
// getCollectionCount() does not throw unexpected exceptions
System.gc();
System.gc();
System.gc();
GarbageCollectorMonitor gcMonitor =
Monitor.getGarbageCollectorMonitor(log, argumentHandler);
Object[] pool = gcMonitor.getGarbageCollectorMXBeans();
for (int i=0; i<pool.length; i++) {
String beanName = "";
long collectionCount = gcMonitor.getCollectionCount(pool[i]);
if (pool[i] instanceof javax.management.ObjectName) {
beanName = ((javax.management.ObjectName)pool[i]).toString();
} else {
beanName = ((java.lang.management.GarbageCollectorMXBean)
pool[i]).getName();
}
log.display(beanName+": getCollectionCount() = "+collectionCount);
if (collectionCount < -1) {
// value can be non-negative or -1 if if the collection count
// is undefined for this collector.
log.complain("FAILURE 1.");
log.complain("getCollectionCount() returns unexpected value: " +
collectionCount);
testFailed = true;
}
}
if (testFailed)
log.complain("TEST FAILED");
return (testFailed) ? Consts.TEST_FAILED : Consts.TEST_PASSED;
}
}
| gpl-2.0 |
md-5/jdk10 | test/hotspot/jtreg/vmTestbase/nsk/jvmti/RedefineClasses/redefclass001.java | 5049 | /*
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package nsk.jvmti.RedefineClasses;
import java.io.*;
/**
* This test makes simple check that class can be redefined. It creates
* an instance of tested class <code>redefclass001r</code>. Then the test
* invokes native function <code>makeRedefinition()</code>. This native
* function makes class file redifinition of loaded class
* <code>redefclass001r</code>.<br>
* Bytes of new version of the class <code>redefclass001r</code> are taken
* from the <i>./newclass</i> directory.<br>
* Finally, the test checks that the class <code>redefclass001r</code> was
* redefined by invoking new version of its method.
*/
public class redefclass001 {
static final int PASSED = 0;
static final int FAILED = 2;
static final int JCK_STATUS_BASE = 95;
static boolean DEBUG_MODE = false;
static String fileDir = ".";
private PrintStream out;
static {
try {
System.loadLibrary("redefclass001");
} catch (UnsatisfiedLinkError e) {
System.err.println("Could not load redefclass001 library");
System.err.println("java.library.path:" +
System.getProperty("java.library.path"));
throw e;
}
}
native static int makeRedefinition(int verbose, Class redefClass,
byte[] classBytes);
public static void main(String[] argv) {
argv = nsk.share.jvmti.JVMTITest.commonInit(argv);
System.exit(run(argv, System.out) + JCK_STATUS_BASE);
}
public static int run(String argv[], PrintStream out) {
return new redefclass001().runIt(argv, out);
}
private int runIt(String argv[], PrintStream out) {
File newRedefClassFile = null;
byte[] redefClassBytes;
int retValue = 0;
this.out = out;
for (int i = 0; i < argv.length; i++) {
String token = argv[i];
if (token.equals("-v")) // verbose mode
DEBUG_MODE = true;
else
fileDir = token;
}
redefclass001r redefClsObj = new redefclass001r();
if ((retValue=redefClsObj.checkIt(DEBUG_MODE, out)) == 19) {
if (DEBUG_MODE)
out.println("Successfully invoke method checkIt() of OLD redefclass001r");
} else {
out.println("TEST: failed to invoke method redefclass001r.checkIt()");
return FAILED;
}
// try to redefine class redefclass001r
String fileName = fileDir + File.separator + "newclass" + File.separator +
redefclass001r.class.getName().replace('.', File.separatorChar) +
".class";
if (DEBUG_MODE)
out.println("Trying to redefine class from the file: " + fileName);
try {
FileInputStream in = new FileInputStream(fileName);
redefClassBytes = new byte[in.available()];
in.read(redefClassBytes);
in.close();
} catch (Exception ex) {
out.println("# Unexpected exception while reading class file:");
out.println("# " + ex);
return FAILED;
}
// make real redefinition
if (DEBUG_MODE)
retValue=makeRedefinition(2, redefClsObj.getClass(),
redefClassBytes);
else
retValue=makeRedefinition(1, redefClsObj.getClass(),
redefClassBytes);
if (retValue != PASSED) {
out.println("TEST: failed to redefine class");
return FAILED;
}
if ((retValue=redefClsObj.checkIt(DEBUG_MODE, out)) == 73) {
if (DEBUG_MODE)
out.println("Successfully invoke method checkIt() of NEW redefclass001r");
return PASSED;
} else {
if (retValue == 19)
out.println("TEST: the method redefclass001r.checkIt() is still old");
else
out.println("TEST: failed to call method redefclass001r.checkIt()");
return FAILED;
}
}
}
| gpl-2.0 |
UnlimitedFreedom/UF-WorldEdit | worldedit-core/src/main/java/com/sk89q/worldedit/util/PropertiesConfiguration.java | 9207 | /*
* WorldEdit, a Minecraft world manipulation toolkit
* Copyright (C) sk89q <http://www.sk89q.com>
* Copyright (C) WorldEdit team and contributors
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published by the
* Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
* for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// $Id$
package com.sk89q.worldedit.util;
import com.sk89q.util.StringUtil;
import com.sk89q.worldedit.LocalConfiguration;
import com.sk89q.worldedit.LocalSession;
import com.sk89q.worldedit.world.snapshot.SnapshotRepository;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.HashSet;
import java.util.Properties;
import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Simple LocalConfiguration that loads settings using
* {@code java.util.Properties}.
*/
public class PropertiesConfiguration extends LocalConfiguration {
private static final Logger log = Logger.getLogger(PropertiesConfiguration.class.getCanonicalName());
protected Properties properties;
protected File path;
/**
* Construct the object. The configuration isn't loaded yet.
*
* @param path the path tot he configuration
*/
public PropertiesConfiguration(File path) {
this.path = path;
properties = new Properties();
}
@Override
public void load() {
InputStream stream = null;
try {
stream = new FileInputStream(path);
properties.load(stream);
} catch (FileNotFoundException ignored) {
} catch (IOException e) {
log.log(Level.WARNING, "Failed to read configuration", e);
} finally {
if (stream != null) {
try {
stream.close();
} catch (IOException ignored) {
}
}
}
loadExtra();
profile = getBool("profile", profile);
disallowedBlocks = getIntSet("disallowed-blocks", defaultDisallowedBlocks);
defaultChangeLimit = getInt("default-max-changed-blocks", defaultChangeLimit);
maxChangeLimit = getInt("max-changed-blocks", maxChangeLimit);
defaultMaxPolygonalPoints = getInt("default-max-polygon-points", defaultMaxPolygonalPoints);
maxPolygonalPoints = getInt("max-polygon-points", maxPolygonalPoints);
defaultMaxPolyhedronPoints = getInt("default-max-polyhedron-points", defaultMaxPolyhedronPoints);
maxPolyhedronPoints = getInt("max-polyhedron-points", maxPolyhedronPoints);
shellSaveType = getString("shell-save-type", shellSaveType);
maxRadius = getInt("max-radius", maxRadius);
maxSuperPickaxeSize = getInt("max-super-pickaxe-size", maxSuperPickaxeSize);
maxBrushRadius = getInt("max-brush-radius", maxBrushRadius);
logCommands = getBool("log-commands", logCommands);
logFile = getString("log-file", logFile);
registerHelp = getBool("register-help", registerHelp);
wandItem = getInt("wand-item", wandItem);
superPickaxeDrop = getBool("super-pickaxe-drop-items", superPickaxeDrop);
superPickaxeManyDrop = getBool("super-pickaxe-many-drop-items", superPickaxeManyDrop);
noDoubleSlash = getBool("no-double-slash", noDoubleSlash);
useInventory = getBool("use-inventory", useInventory);
useInventoryOverride = getBool("use-inventory-override", useInventoryOverride);
useInventoryCreativeOverride = getBool("use-inventory-creative-override", useInventoryCreativeOverride);
navigationWand = getInt("nav-wand-item", navigationWand);
navigationWandMaxDistance = getInt("nav-wand-distance", navigationWandMaxDistance);
navigationUseGlass = getBool("nav-use-glass", navigationUseGlass);
scriptTimeout = getInt("scripting-timeout", scriptTimeout);
saveDir = getString("schematic-save-dir", saveDir);
scriptsDir = getString("craftscript-dir", scriptsDir);
butcherDefaultRadius = getInt("butcher-default-radius", butcherDefaultRadius);
butcherMaxRadius = getInt("butcher-max-radius", butcherMaxRadius);
allowSymlinks = getBool("allow-symbolic-links", allowSymlinks);
LocalSession.MAX_HISTORY_SIZE = Math.max(15, getInt("history-size", 15));
String snapshotsDir = getString("snapshots-dir", "");
if (!snapshotsDir.isEmpty()) {
snapshotRepo = new SnapshotRepository(snapshotsDir);
}
OutputStream output = null;
path.getParentFile().mkdirs();
try {
output = new FileOutputStream(path);
properties.store(output, "Don't put comments; they get removed");
} catch (FileNotFoundException e) {
log.log(Level.WARNING, "Failed to write configuration", e);
} catch (IOException e) {
log.log(Level.WARNING, "Failed to write configuration", e);
} finally {
if (output != null) {
try {
output.close();
} catch (IOException ignored) {
}
}
}
}
/**
* Called to load extra configuration.
*/
protected void loadExtra() {
}
/**
* Get a string value.
*
* @param key the key
* @param def the default value
* @return the value
*/
protected String getString(String key, String def) {
if (def == null) {
def = "";
}
String val = properties.getProperty(key);
if (val == null) {
properties.setProperty(key, def);
return def;
} else {
return val;
}
}
/**
* Get a boolean value.
*
* @param key the key
* @param def the default value
* @return the value
*/
protected boolean getBool(String key, boolean def) {
String val = properties.getProperty(key);
if (val == null) {
properties.setProperty(key, def ? "true" : "false");
return def;
} else {
return val.equalsIgnoreCase("true")
|| val.equals("1");
}
}
/**
* Get an integer value.
*
* @param key the key
* @param def the default value
* @return the value
*/
protected int getInt(String key, int def) {
String val = properties.getProperty(key);
if (val == null) {
properties.setProperty(key, String.valueOf(def));
return def;
} else {
try {
return Integer.parseInt(val);
} catch (NumberFormatException e) {
properties.setProperty(key, String.valueOf(def));
return def;
}
}
}
/**
* Get a double value.
*
* @param key the key
* @param def the default value
* @return the value
*/
protected double getDouble(String key, double def) {
String val = properties.getProperty(key);
if (val == null) {
properties.setProperty(key, String.valueOf(def));
return def;
} else {
try {
return Double.parseDouble(val);
} catch (NumberFormatException e) {
properties.setProperty(key, String.valueOf(def));
return def;
}
}
}
/**
* Get a double value.
*
* @param key the key
* @param def the default value
* @return the value
*/
protected Set<Integer> getIntSet(String key, int[] def) {
String val = properties.getProperty(key);
if (val == null) {
properties.setProperty(key, StringUtil.joinString(def, ",", 0));
Set<Integer> set = new HashSet<Integer>();
for (int i : def) {
set.add(i);
}
return set;
} else {
Set<Integer> set = new HashSet<Integer>();
String[] parts = val.split(",");
for (String part : parts) {
try {
int v = Integer.parseInt(part.trim());
set.add(v);
} catch (NumberFormatException ignored) {
}
}
return set;
}
}
}
| gpl-3.0 |
likaiwalkman/cassandra | src/java/org/apache/cassandra/db/index/composites/CompositesSearcher.java | 10111 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.index.composites;
import java.nio.ByteBuffer;
import java.util.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.rows.*;
import org.apache.cassandra.db.filter.*;
import org.apache.cassandra.db.index.*;
import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
import org.apache.cassandra.utils.btree.BTreeSet;
import org.apache.cassandra.utils.concurrent.OpOrder;
public class CompositesSearcher extends SecondaryIndexSearcher
{
private static final Logger logger = LoggerFactory.getLogger(CompositesSearcher.class);
public CompositesSearcher(SecondaryIndexManager indexManager, Set<ColumnDefinition> columns)
{
super(indexManager, columns);
}
private boolean isMatchingEntry(DecoratedKey partitionKey, CompositesIndex.IndexedEntry entry, ReadCommand command)
{
return command.selects(partitionKey, entry.indexedEntryClustering);
}
protected UnfilteredPartitionIterator queryDataFromIndex(AbstractSimplePerColumnSecondaryIndex secondaryIdx,
final DecoratedKey indexKey,
final RowIterator indexHits,
final ReadCommand command,
final ReadOrderGroup orderGroup)
{
assert indexHits.staticRow() == Rows.EMPTY_STATIC_ROW;
assert secondaryIdx instanceof CompositesIndex;
final CompositesIndex index = (CompositesIndex)secondaryIdx;
return new UnfilteredPartitionIterator()
{
private CompositesIndex.IndexedEntry nextEntry;
private UnfilteredRowIterator next;
public boolean isForThrift()
{
return command.isForThrift();
}
public CFMetaData metadata()
{
return command.metadata();
}
public boolean hasNext()
{
return prepareNext();
}
public UnfilteredRowIterator next()
{
if (next == null)
prepareNext();
UnfilteredRowIterator toReturn = next;
next = null;
return toReturn;
}
private boolean prepareNext()
{
if (next != null)
return true;
if (nextEntry == null)
{
if (!indexHits.hasNext())
return false;
nextEntry = index.decodeEntry(indexKey, indexHits.next());
}
// Gather all index hits belonging to the same partition and query the data for those hits.
// TODO: it's much more efficient to do 1 read for all hits to the same partition than doing
// 1 read per index hit. However, this basically mean materializing all hits for a partition
// in memory so we should consider adding some paging mechanism. However, index hits should
// be relatively small so it's much better than the previous code that was materializing all
// *data* for a given partition.
BTreeSet.Builder<Clustering> clusterings = BTreeSet.builder(baseCfs.getComparator());
List<CompositesIndex.IndexedEntry> entries = new ArrayList<>();
DecoratedKey partitionKey = baseCfs.decorateKey(nextEntry.indexedKey);
while (nextEntry != null && partitionKey.getKey().equals(nextEntry.indexedKey))
{
// We're queried a slice of the index, but some hits may not match some of the clustering column constraints
if (isMatchingEntry(partitionKey, nextEntry, command))
{
clusterings.add(nextEntry.indexedEntryClustering);
entries.add(nextEntry);
}
nextEntry = indexHits.hasNext() ? index.decodeEntry(indexKey, indexHits.next()) : null;
}
// Because we've eliminated entries that don't match the clustering columns, it's possible we added nothing
if (clusterings.isEmpty())
return prepareNext();
// Query the gathered index hits. We still need to filter stale hits from the resulting query.
ClusteringIndexNamesFilter filter = new ClusteringIndexNamesFilter(clusterings.build(), false);
SinglePartitionReadCommand dataCmd = new SinglePartitionNamesCommand(metadata(),
command.nowInSec(),
command.columnFilter(),
command.rowFilter(),
DataLimits.NONE,
partitionKey,
filter);
@SuppressWarnings("resource") // We close right away if empty, and if it's assign to next it will be called either
// by the next caller of next, or through closing this iterator is this come before.
UnfilteredRowIterator dataIter = filterStaleEntries(dataCmd.queryMemtableAndDisk(baseCfs, orderGroup.baseReadOpOrderGroup()),
index,
indexKey.getKey(),
entries,
orderGroup.writeOpOrderGroup(),
command.nowInSec());
if (dataIter.isEmpty())
{
dataIter.close();
return prepareNext();
}
next = dataIter;
return true;
}
public void remove()
{
throw new UnsupportedOperationException();
}
public void close()
{
indexHits.close();
if (next != null)
next.close();
}
};
}
private UnfilteredRowIterator filterStaleEntries(UnfilteredRowIterator dataIter,
final CompositesIndex index,
final ByteBuffer indexValue,
final List<CompositesIndex.IndexedEntry> entries,
final OpOrder.Group writeOp,
final int nowInSec)
{
return new AlteringUnfilteredRowIterator(dataIter)
{
private int entriesIdx;
@Override
protected Row computeNext(Row row)
{
CompositesIndex.IndexedEntry entry = findEntry(row.clustering(), writeOp, nowInSec);
if (!index.isStale(row, indexValue, nowInSec))
return row;
// The entry is stale: delete the entry and ignore otherwise
index.delete(entry, writeOp, nowInSec);
return null;
}
private CompositesIndex.IndexedEntry findEntry(Clustering clustering, OpOrder.Group writeOp, int nowInSec)
{
assert entriesIdx < entries.size();
while (entriesIdx < entries.size())
{
CompositesIndex.IndexedEntry entry = entries.get(entriesIdx++);
// The entries are in clustering order. So that the requested entry should be the
// next entry, the one at 'entriesIdx'. However, we can have stale entries, entries
// that have no corresponding row in the base table typically because of a range
// tombstone or partition level deletion. Delete such stale entries.
int cmp = metadata().comparator.compare(entry.indexedEntryClustering, clustering);
assert cmp <= 0; // this would means entries are not in clustering order, which shouldn't happen
if (cmp == 0)
return entry;
else
index.delete(entry, writeOp, nowInSec);
}
// entries correspond to the rows we've queried, so we shouldn't have a row that has no corresponding entry.
throw new AssertionError();
}
};
}
}
| apache-2.0 |
alexkli/jackrabbit-oak | oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/osgi/SegmentNodeStoreServiceTest.java | 10006 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.jackrabbit.oak.segment.osgi;
import static org.junit.Assert.assertTrue;
import org.apache.jackrabbit.oak.segment.osgi.MetatypeInformation.ObjectClassDefinition;
import org.junit.Test;
public class SegmentNodeStoreServiceTest {
@Test
public void testComponentDescriptor() throws Exception {
ComponentDescriptor cd = ComponentDescriptor.open(getClass().getResourceAsStream("/OSGI-INF/org.apache.jackrabbit.oak.segment.SegmentNodeStoreService.xml"));
assertTrue(cd.hasName("org.apache.jackrabbit.oak.segment.SegmentNodeStoreService"));
assertTrue(cd.hasRequireConfigurationPolicy());
assertTrue(cd.hasActivateMethod("activate"));
assertTrue(cd.hasDeactivateMethod("deactivate"));
assertTrue(cd.hasImplementationClass("org.apache.jackrabbit.oak.segment.SegmentNodeStoreService"));
assertTrue(cd.hasProperty("repository.home").check());
assertTrue(cd.hasProperty("tarmk.mode").check());
assertTrue(cd.hasProperty("repository.backup.dir").check());
assertTrue(cd.hasProperty("tarmk.size")
.withIntegerType()
.withValue("256")
.check());
assertTrue(cd.hasProperty("segmentCache.size")
.withIntegerType()
.withValue("256")
.check());
assertTrue(cd.hasProperty("stringCache.size")
.withIntegerType()
.withValue("256")
.check());
assertTrue(cd.hasProperty("templateCache.size")
.withIntegerType()
.withValue("64")
.check());
assertTrue(cd.hasProperty("stringDeduplicationCache.size")
.withIntegerType()
.withValue("15000")
.check());
assertTrue(cd.hasProperty("templateDeduplicationCache.size")
.withIntegerType()
.withValue("3000")
.check());
assertTrue(cd.hasProperty("nodeDeduplicationCache.size")
.withIntegerType()
.withValue("1048576")
.check());
assertTrue(cd.hasProperty("pauseCompaction")
.withBooleanType()
.withValue("false")
.check());
assertTrue(cd.hasProperty("compaction.retryCount")
.withIntegerType()
.withValue("5")
.check());
assertTrue(cd.hasProperty("compaction.force.timeout")
.withIntegerType()
.withValue("60")
.check());
assertTrue(cd.hasProperty("compaction.sizeDeltaEstimation")
.withLongType()
.withValue("1073741824")
.check());
assertTrue(cd.hasProperty("compaction.disableEstimation")
.withBooleanType()
.withValue("false")
.check());
assertTrue(cd.hasProperty("compaction.retainedGenerations")
.withIntegerType()
.withValue("2")
.check());
assertTrue(cd.hasProperty("compaction.memoryThreshold")
.withIntegerType()
.withValue("15")
.check());
assertTrue(cd.hasProperty("compaction.progressLog")
.withLongType()
.withValue("-1")
.check());
assertTrue(cd.hasProperty("standby")
.withBooleanType()
.withValue("false")
.check());
assertTrue(cd.hasProperty("customBlobStore")
.withBooleanType()
.withValue("false")
.check());
assertTrue(cd.hasProperty("customSegmentStore")
.withBooleanType()
.withValue("false")
.check());
assertTrue(cd.hasProperty("blobGcMaxAgeInSecs")
.withLongType()
.withValue("86400")
.check());
assertTrue(cd.hasProperty("blobTrackSnapshotIntervalInSecs")
.withLongType()
.withValue("43200")
.check());
assertTrue(cd.hasReference("blobStore")
.withInterface("org.apache.jackrabbit.oak.spi.blob.BlobStore")
.withOptionalUnaryCardinality()
.withStaticPolicy()
.withGreedyPolicyOption()
.withTarget("(&(!(split.blobstore=old))(!(split.blobstore=new)))")
.withField("blobStore")
.check());
assertTrue(cd.hasReference("segmentStore")
.withInterface("org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence")
.withOptionalUnaryCardinality()
.withStaticPolicy()
.withGreedyPolicyOption()
.withField("segmentStore")
.check());
assertTrue(cd.hasReference("statisticsProvider")
.withInterface("org.apache.jackrabbit.oak.stats.StatisticsProvider")
.withMandatoryUnaryCardinality()
.withStaticPolicy()
.withField("statisticsProvider")
.check());
}
@Test
public void testMetatypeInformation() throws Exception {
MetatypeInformation mi = MetatypeInformation.open(getClass().getResourceAsStream("/OSGI-INF/metatype/org.apache.jackrabbit.oak.segment.SegmentNodeStoreService$Configuration.xml"));
assertTrue(mi.hasDesignate()
.withPid("org.apache.jackrabbit.oak.segment.SegmentNodeStoreService")
.withReference("org.apache.jackrabbit.oak.segment.SegmentNodeStoreService$Configuration")
.check());
ObjectClassDefinition ocd = mi.getObjectClassDefinition("org.apache.jackrabbit.oak.segment.SegmentNodeStoreService$Configuration");
assertTrue(ocd.hasAttributeDefinition("repository.home")
.withStringType()
.check());
assertTrue(ocd.hasAttributeDefinition("tarmk.mode")
.withStringType()
.check());
assertTrue(ocd.hasAttributeDefinition("tarmk.size")
.withIntegerType()
.withDefaultValue("256")
.check());
assertTrue(ocd.hasAttributeDefinition("segmentCache.size")
.withIntegerType()
.withDefaultValue("256")
.check());
assertTrue(ocd.hasAttributeDefinition("stringCache.size")
.withIntegerType()
.withDefaultValue("256")
.check());
assertTrue(ocd.hasAttributeDefinition("templateCache.size")
.withIntegerType()
.withDefaultValue("64")
.check());
assertTrue(ocd.hasAttributeDefinition("stringDeduplicationCache.size")
.withIntegerType()
.withDefaultValue("15000")
.check());
assertTrue(ocd.hasAttributeDefinition("templateDeduplicationCache.size")
.withIntegerType()
.withDefaultValue("3000")
.check());
assertTrue(ocd.hasAttributeDefinition("nodeDeduplicationCache.size")
.withIntegerType()
.withDefaultValue("1048576")
.check());
assertTrue(ocd.hasAttributeDefinition("pauseCompaction")
.withBooleanType()
.withDefaultValue("false")
.check());
assertTrue(ocd.hasAttributeDefinition("compaction.retryCount")
.withIntegerType()
.withDefaultValue("5")
.check());
assertTrue(ocd.hasAttributeDefinition("compaction.force.timeout")
.withIntegerType()
.withDefaultValue("60")
.check());
assertTrue(ocd.hasAttributeDefinition("compaction.sizeDeltaEstimation")
.withLongType()
.withDefaultValue("1073741824")
.check());
assertTrue(ocd.hasAttributeDefinition("compaction.disableEstimation")
.withBooleanType()
.withDefaultValue("false")
.check());
assertTrue(ocd.hasAttributeDefinition("compaction.retainedGenerations")
.withIntegerType()
.withDefaultValue("2")
.check());
assertTrue(ocd.hasAttributeDefinition("compaction.memoryThreshold")
.withIntegerType()
.withDefaultValue("15")
.check());
assertTrue(ocd.hasAttributeDefinition("compaction.progressLog")
.withLongType()
.withDefaultValue("-1")
.check());
assertTrue(ocd.hasAttributeDefinition("standby")
.withBooleanType()
.withDefaultValue("false")
.check());
assertTrue(ocd.hasAttributeDefinition("customBlobStore")
.withBooleanType()
.withDefaultValue("false")
.check());
assertTrue(ocd.hasAttributeDefinition("customSegmentStore")
.withBooleanType()
.withDefaultValue("false")
.check());
assertTrue(ocd.hasAttributeDefinition("repository.backup.dir")
.withStringType()
.check());
assertTrue(ocd.hasAttributeDefinition("blobGcMaxAgeInSecs")
.withLongType()
.withDefaultValue("86400")
.check());
assertTrue(ocd.hasAttributeDefinition("blobTrackSnapshotIntervalInSecs")
.withLongType()
.withDefaultValue("43200")
.check());
}
}
| apache-2.0 |
Kast0rTr0y/jboss-modules | src/test/java/org/jboss/modules/test/ClassC.java | 814 | /*
* JBoss, Home of Professional Open Source.
* Copyright 2014 Red Hat, Inc., and individual contributors
* as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jboss.modules.test;
/**
* @author John E. Bailey
*/
public class ClassC extends ClassD {
}
| apache-2.0 |
lemonJun/TakinMQ | takinmq-kclient/src/test/java/org/apache/kafka/clients/consumer/MockConsumerTest.java | 2918 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.consumer;
import org.apache.kafka.clients.consumer.internals.NoOpConsumerRebalanceListener;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.record.TimestampType;
import org.junit.Test;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
public class MockConsumerTest {
private MockConsumer<String, String> consumer = new MockConsumer<String, String>(OffsetResetStrategy.EARLIEST);
@Test
public void testSimpleMock() {
consumer.subscribe(Arrays.asList("test"), new NoOpConsumerRebalanceListener());
assertEquals(0, consumer.poll(1000).count());
consumer.rebalance(Arrays.asList(new TopicPartition("test", 0), new TopicPartition("test", 1)));
// Mock consumers need to seek manually since they cannot automatically reset offsets
HashMap<TopicPartition, Long> beginningOffsets = new HashMap<>();
beginningOffsets.put(new TopicPartition("test", 0), 0L);
beginningOffsets.put(new TopicPartition("test", 1), 0L);
consumer.updateBeginningOffsets(beginningOffsets);
consumer.seek(new TopicPartition("test", 0), 0);
ConsumerRecord<String, String> rec1 = new ConsumerRecord<String, String>("test", 0, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, "key1", "value1");
ConsumerRecord<String, String> rec2 = new ConsumerRecord<String, String>("test", 0, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, "key2", "value2");
consumer.addRecord(rec1);
consumer.addRecord(rec2);
ConsumerRecords<String, String> recs = consumer.poll(1);
Iterator<ConsumerRecord<String, String>> iter = recs.iterator();
assertEquals(rec1, iter.next());
assertEquals(rec2, iter.next());
assertFalse(iter.hasNext());
assertEquals(2L, consumer.position(new TopicPartition("test", 0)));
consumer.commitSync();
assertEquals(2L, consumer.committed(new TopicPartition("test", 0)).offset());
}
}
| apache-2.0 |
AnshulJain1985/Roadcast-Tracker | src/org/traccar/model/CommandType.java | 909 | /*
* Copyright 2016 Gabor Somogyi (gabor.g.somogyi@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.traccar.model;
public class CommandType {
private String type;
public CommandType(String type) {
this.type = type;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
}
| apache-2.0 |
shreejay/elasticsearch | core/src/test/java/org/elasticsearch/search/suggest/SuggestBuilderTests.java | 7153 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.suggest;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.suggest.completion.CompletionSuggesterBuilderTests;
import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilderTests;
import org.elasticsearch.search.suggest.term.TermSuggestionBuilderTests;
import org.elasticsearch.test.ESTestCase;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import java.io.IOException;
import java.util.Map.Entry;
import static java.util.Collections.emptyList;
import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode;
public class SuggestBuilderTests extends ESTestCase {
private static final int NUMBER_OF_RUNS = 20;
private static NamedWriteableRegistry namedWriteableRegistry;
private static NamedXContentRegistry xContentRegistry;
/**
* Setup for the whole base test class.
*/
@BeforeClass
public static void init() {
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList());
namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables());
xContentRegistry = new NamedXContentRegistry(searchModule.getNamedXContents());
}
@AfterClass
public static void afterClass() {
namedWriteableRegistry = null;
xContentRegistry = null;
}
/**
* creates random suggestion builder, renders it to xContent and back to new instance that should be equal to original
*/
public void testFromXContent() throws IOException {
for (int runs = 0; runs < NUMBER_OF_RUNS; runs++) {
SuggestBuilder suggestBuilder = randomSuggestBuilder();
XContentBuilder xContentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values()));
if (randomBoolean()) {
xContentBuilder.prettyPrint();
}
suggestBuilder.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS);
XContentParser parser = createParser(xContentBuilder);
SuggestBuilder secondSuggestBuilder = SuggestBuilder.fromXContent(parser);
assertNotSame(suggestBuilder, secondSuggestBuilder);
assertEquals(suggestBuilder, secondSuggestBuilder);
assertEquals(suggestBuilder.hashCode(), secondSuggestBuilder.hashCode());
}
}
/**
* Test equality and hashCode properties
*/
public void testEqualsAndHashcode() throws IOException {
for (int runs = 0; runs < NUMBER_OF_RUNS; runs++) {
checkEqualsAndHashCode(randomSuggestBuilder(), original -> {
return copyWriteable(original, namedWriteableRegistry, SuggestBuilder::new);
}, this::createMutation);
}
}
/**
* Test serialization and deserialization
*/
public void testSerialization() throws IOException {
for (int i = 0; i < NUMBER_OF_RUNS; i++) {
SuggestBuilder suggestBuilder = randomSuggestBuilder();
SuggestBuilder deserializedModel = copyWriteable(suggestBuilder, namedWriteableRegistry, SuggestBuilder::new);
assertEquals(suggestBuilder, deserializedModel);
assertEquals(suggestBuilder.hashCode(), deserializedModel.hashCode());
assertNotSame(suggestBuilder, deserializedModel);
}
}
public void testIllegalSuggestionName() {
try {
new SuggestBuilder().addSuggestion(null, PhraseSuggestionBuilderTests.randomPhraseSuggestionBuilder());
fail("exception expected");
} catch (NullPointerException e) {
assertEquals("every suggestion needs a name", e.getMessage());
}
try {
new SuggestBuilder().addSuggestion("my-suggest", PhraseSuggestionBuilderTests.randomPhraseSuggestionBuilder())
.addSuggestion("my-suggest", PhraseSuggestionBuilderTests.randomPhraseSuggestionBuilder());
fail("exception expected");
} catch (IllegalArgumentException e) {
assertEquals("already added another suggestion with name [my-suggest]", e.getMessage());
}
}
protected SuggestBuilder createMutation(SuggestBuilder original) throws IOException {
SuggestBuilder mutation = new SuggestBuilder().setGlobalText(original.getGlobalText());
for (Entry<String, SuggestionBuilder<?>> suggestionBuilder : original.getSuggestions().entrySet()) {
mutation.addSuggestion(suggestionBuilder.getKey(), suggestionBuilder.getValue());
}
if (randomBoolean()) {
mutation.setGlobalText(randomAlphaOfLengthBetween(5, 60));
} else {
mutation.addSuggestion(randomAlphaOfLength(10), PhraseSuggestionBuilderTests.randomPhraseSuggestionBuilder());
}
return mutation;
}
public static SuggestBuilder randomSuggestBuilder() {
SuggestBuilder builder = new SuggestBuilder();
if (randomBoolean()) {
builder.setGlobalText(randomAlphaOfLengthBetween(1, 20));
}
final int numSuggestions = randomIntBetween(1, 5);
for (int i = 0; i < numSuggestions; i++) {
builder.addSuggestion(randomAlphaOfLengthBetween(5, 10), randomSuggestionBuilder());
}
return builder;
}
private static SuggestionBuilder<?> randomSuggestionBuilder() {
switch (randomIntBetween(0, 2)) {
case 0: return TermSuggestionBuilderTests.randomTermSuggestionBuilder();
case 1: return PhraseSuggestionBuilderTests.randomPhraseSuggestionBuilder();
case 2: return CompletionSuggesterBuilderTests.randomCompletionSuggestionBuilder();
default: return TermSuggestionBuilderTests.randomTermSuggestionBuilder();
}
}
@Override
protected NamedXContentRegistry xContentRegistry() {
return xContentRegistry;
}
}
| apache-2.0 |
jtanx/sfntly-java-mod | src/com/google/typography/font/sfntly/table/opentype/component/GsubLookupType.java | 755 | package com.google.typography.font.sfntly.table.opentype.component;
public enum GsubLookupType implements LookupType {
GSUB_SINGLE,
GSUB_MULTIPLE,
GSUB_ALTERNATE,
GSUB_LIGATURE,
GSUB_CONTEXTUAL,
GSUB_CHAINING_CONTEXTUAL,
GSUB_EXTENSION,
GSUB_REVERSE_CHAINING_CONTEXTUAL_SINGLE;
@Override
public int typeNum() {
return ordinal() + 1;
}
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static GsubLookupType forTypeNum(int typeNum) {
if (typeNum <= 0 || typeNum > values.length) {
System.err.format("unknown gsub lookup typeNum: %d\n", typeNum);
return null;
}
return values[typeNum - 1];
}
private static final GsubLookupType[] values = values();
}
| apache-2.0 |
melix/golo-lang | src/main/java/fr/insalyon/citi/golo/compiler/CodeGenerationResult.java | 1625 | /*
* Copyright 2012-2013 Institut National des Sciences Appliquées de Lyon (INSA-Lyon)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fr.insalyon.citi.golo.compiler;
/**
* A code generation result.
* <p>
* Compiling a single Golo source file may result in several JVM classes to be produced.
* A <code>CodeGenerationResult</code> represents one such output.
*/
public final class CodeGenerationResult {
private final byte[] bytecode;
private final PackageAndClass packageAndClass;
/**
* Constructor for a code generation result.
*
* @param bytecode the JVM bytecode as an array.
* @param packageAndClass the package and class descriptor for the bytecode.
*/
public CodeGenerationResult(byte[] bytecode, PackageAndClass packageAndClass) {
this.bytecode = bytecode;
this.packageAndClass = packageAndClass;
}
/**
* @return the bytecode array.
*/
public byte[] getBytecode() {
return bytecode;
}
/**
* @return the package and class descriptor.
*/
public PackageAndClass getPackageAndClass() {
return packageAndClass;
}
}
| apache-2.0 |
ern/elasticsearch | server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java | 2624 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.rest.action.admin.indices;
import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.Strings;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.RestToXContentListener;
import java.io.IOException;
import java.util.List;
import static org.elasticsearch.rest.RestRequest.Method.POST;
public class RestClearIndicesCacheAction extends BaseRestHandler {
@Override
public List<Route> routes() {
return List.of(
new Route(POST, "/_cache/clear"),
new Route(POST, "/{index}/_cache/clear"));
}
@Override
public String getName() {
return "clear_indices_cache_action";
}
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
ClearIndicesCacheRequest clearIndicesCacheRequest = new ClearIndicesCacheRequest(
Strings.splitStringByCommaToArray(request.param("index")));
clearIndicesCacheRequest.indicesOptions(IndicesOptions.fromRequest(request, clearIndicesCacheRequest.indicesOptions()));
fromRequest(request, clearIndicesCacheRequest);
return channel -> client.admin().indices().clearCache(clearIndicesCacheRequest, new RestToXContentListener<>(channel));
}
@Override
public boolean canTripCircuitBreaker() {
return false;
}
public static ClearIndicesCacheRequest fromRequest(final RestRequest request, ClearIndicesCacheRequest clearIndicesCacheRequest) {
clearIndicesCacheRequest.queryCache(request.paramAsBoolean("query", clearIndicesCacheRequest.queryCache()));
clearIndicesCacheRequest.requestCache(request.paramAsBoolean("request", clearIndicesCacheRequest.requestCache()));
clearIndicesCacheRequest.fieldDataCache(request.paramAsBoolean("fielddata", clearIndicesCacheRequest.fieldDataCache()));
clearIndicesCacheRequest.fields(request.paramAsStringArray("fields", clearIndicesCacheRequest.fields()));
return clearIndicesCacheRequest;
}
}
| apache-2.0 |
paulk-asert/incubator-groovy | src/test/groovy/mock/interceptor/IteratorCounter.java | 1023 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package groovy.mock.interceptor;
public class IteratorCounter {
public int count(java.util.Iterator it) {
int count = 0;
while (it.hasNext()) count++;
return count;
}
} | apache-2.0 |
SimonHuber/oasp4j | samples/core/src/main/java/io/oasp/gastronomy/restaurant/offermanagement/logic/api/to/OfferSearchCriteriaTo.java | 2501 | package io.oasp.gastronomy.restaurant.offermanagement.logic.api.to;
import io.oasp.gastronomy.restaurant.general.common.api.datatype.Money;
import io.oasp.gastronomy.restaurant.offermanagement.common.api.datatype.OfferState;
import io.oasp.module.jpa.common.api.to.SearchCriteriaTo;
/**
* This is the {@link SearchCriteriaTo search criteria} {@link net.sf.mmm.util.transferobject.api.TransferObject TO}
* used to find {@link io.oasp.gastronomy.restaurant.salesmanagement.common.api.Order}s.
*
* @author hohwille
*/
public class OfferSearchCriteriaTo extends SearchCriteriaTo {
/** UID for serialization. */
private static final long serialVersionUID = 1L;
private Long number;
private Money minPrice;
private Money maxPrice;
private Long mealId;
private Long drinkId;
private Long sideDishId;
private OfferState state;
/**
* The constructor.
*/
public OfferSearchCriteriaTo() {
super();
}
/**
* @return number
*/
public Long getNumber() {
return this.number;
}
/**
* @param number the number to set
*/
public void setNumber(Long number) {
this.number = number;
}
/**
* @return minPrice
*/
public Money getMinPrice() {
return this.minPrice;
}
/**
* @param minPrice the minPrice to set
*/
public void setMinPrice(Money minPrice) {
this.minPrice = minPrice;
}
/**
* @return maxPrice
*/
public Money getMaxPrice() {
return this.maxPrice;
}
/**
* @param maxPrice the maxPrice to set
*/
public void setMaxPrice(Money maxPrice) {
this.maxPrice = maxPrice;
}
/**
* @return mealId
*/
public Long getMealId() {
return this.mealId;
}
/**
* @param mealId the mealId to set
*/
public void setMealId(Long mealId) {
this.mealId = mealId;
}
/**
* @return drinkId
*/
public Long getDrinkId() {
return this.drinkId;
}
/**
* @param drinkId the drinkId to set
*/
public void setDrinkId(Long drinkId) {
this.drinkId = drinkId;
}
/**
* @return sideDishId
*/
public Long getSideDishId() {
return this.sideDishId;
}
/**
* @param sideDishId the sideDishId to set
*/
public void setSideDishId(Long sideDishId) {
this.sideDishId = sideDishId;
}
/**
* @return state
*/
public OfferState getState() {
return this.state;
}
/**
* @param state the state to set
*/
public void setState(OfferState state) {
this.state = state;
}
}
| apache-2.0 |
GlenRSmith/elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisLimitsTests.java | 10211 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
package org.elasticsearch.xpack.core.ml.job.config;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.test.AbstractSerializingTestCase;
import org.elasticsearch.xcontent.DeprecationHandler;
import org.elasticsearch.xcontent.NamedXContentRegistry;
import org.elasticsearch.xcontent.XContentFactory;
import org.elasticsearch.xcontent.XContentParseException;
import org.elasticsearch.xcontent.XContentParser;
import org.elasticsearch.xcontent.XContentType;
import org.elasticsearch.xpack.core.ml.job.messages.Messages;
import java.io.IOException;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.notNullValue;
public class AnalysisLimitsTests extends AbstractSerializingTestCase<AnalysisLimits> {
@Override
protected AnalysisLimits createTestInstance() {
return createRandomized();
}
public static AnalysisLimits createRandomized() {
return new AnalysisLimits(
randomBoolean() ? (long) randomIntBetween(1, 1000000) : null,
randomBoolean() ? randomNonNegativeLong() : null
);
}
@Override
protected Writeable.Reader<AnalysisLimits> instanceReader() {
return AnalysisLimits::new;
}
@Override
protected AnalysisLimits doParseInstance(XContentParser parser) {
return AnalysisLimits.STRICT_PARSER.apply(parser, null);
}
public void testParseModelMemoryLimitGivenNegativeNumber() throws IOException {
String json = "{\"model_memory_limit\": -1}";
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json);
XContentParseException e = expectThrows(XContentParseException.class, () -> AnalysisLimits.STRICT_PARSER.apply(parser, null));
assertThat(e.getCause(), notNullValue());
assertThat(e.getCause().getMessage(), containsString("model_memory_limit must be at least 1 MiB. Value = -1"));
}
public void testParseModelMemoryLimitGivenZero() throws IOException {
String json = "{\"model_memory_limit\": 0}";
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json);
XContentParseException e = expectThrows(XContentParseException.class, () -> AnalysisLimits.STRICT_PARSER.apply(parser, null));
assertThat(e.getCause(), notNullValue());
assertThat(e.getCause().getMessage(), containsString("model_memory_limit must be at least 1 MiB. Value = 0"));
}
public void testParseModelMemoryLimitGivenPositiveNumber() throws IOException {
String json = "{\"model_memory_limit\": 2048}";
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json);
AnalysisLimits limits = AnalysisLimits.STRICT_PARSER.apply(parser, null);
assertThat(limits.getModelMemoryLimit(), equalTo(2048L));
}
public void testParseModelMemoryLimitGivenNegativeString() throws IOException {
String json = "{\"model_memory_limit\":\"-4MB\"}";
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json);
XContentParseException e = expectThrows(XContentParseException.class, () -> AnalysisLimits.STRICT_PARSER.apply(parser, null));
// the root cause is wrapped in an intermediate ElasticsearchParseException
assertThat(e.getCause(), instanceOf(ElasticsearchParseException.class));
assertThat(e.getCause().getCause(), instanceOf(IllegalArgumentException.class));
assertThat(e.getCause().getCause().getMessage(), containsString("Values less than -1 bytes are not supported: -4mb"));
}
public void testParseModelMemoryLimitGivenZeroString() throws IOException {
String json = "{\"model_memory_limit\":\"0MB\"}";
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json);
XContentParseException e = expectThrows(XContentParseException.class, () -> AnalysisLimits.STRICT_PARSER.apply(parser, null));
assertThat(e.getCause(), notNullValue());
assertThat(e.getCause().getMessage(), containsString("model_memory_limit must be at least 1 MiB. Value = 0"));
}
public void testParseModelMemoryLimitGivenLessThanOneMBString() throws IOException {
String json = "{\"model_memory_limit\":\"1000Kb\"}";
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json);
XContentParseException e = expectThrows(XContentParseException.class, () -> AnalysisLimits.STRICT_PARSER.apply(parser, null));
assertThat(e.getCause(), notNullValue());
assertThat(e.getCause().getMessage(), containsString("model_memory_limit must be at least 1 MiB. Value = 0"));
}
public void testParseModelMemoryLimitGivenStringMultipleOfMBs() throws IOException {
String json = "{\"model_memory_limit\":\"4g\"}";
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json);
AnalysisLimits limits = AnalysisLimits.STRICT_PARSER.apply(parser, null);
assertThat(limits.getModelMemoryLimit(), equalTo(4096L));
}
public void testParseModelMemoryLimitGivenStringNonMultipleOfMBs() throws IOException {
String json = "{\"model_memory_limit\":\"1300kb\"}";
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json);
AnalysisLimits limits = AnalysisLimits.STRICT_PARSER.apply(parser, null);
assertThat(limits.getModelMemoryLimit(), equalTo(1L));
}
public void testModelMemoryDefault() {
AnalysisLimits limits = new AnalysisLimits(randomNonNegativeLong());
assertThat(limits.getModelMemoryLimit(), equalTo(AnalysisLimits.DEFAULT_MODEL_MEMORY_LIMIT_MB));
}
public void testEquals_GivenEqual() {
AnalysisLimits analysisLimits1 = new AnalysisLimits(10L, 20L);
AnalysisLimits analysisLimits2 = new AnalysisLimits(10L, 20L);
assertTrue(analysisLimits1.equals(analysisLimits1));
assertTrue(analysisLimits1.equals(analysisLimits2));
assertTrue(analysisLimits2.equals(analysisLimits1));
}
public void testEquals_GivenDifferentModelMemoryLimit() {
AnalysisLimits analysisLimits1 = new AnalysisLimits(10L, 20L);
AnalysisLimits analysisLimits2 = new AnalysisLimits(11L, 20L);
assertFalse(analysisLimits1.equals(analysisLimits2));
assertFalse(analysisLimits2.equals(analysisLimits1));
}
public void testEquals_GivenDifferentCategorizationExamplesLimit() {
AnalysisLimits analysisLimits1 = new AnalysisLimits(10L, 20L);
AnalysisLimits analysisLimits2 = new AnalysisLimits(10L, 21L);
assertFalse(analysisLimits1.equals(analysisLimits2));
assertFalse(analysisLimits2.equals(analysisLimits1));
}
public void testHashCode_GivenEqual() {
AnalysisLimits analysisLimits1 = new AnalysisLimits(5555L, 3L);
AnalysisLimits analysisLimits2 = new AnalysisLimits(5555L, 3L);
assertEquals(analysisLimits1.hashCode(), analysisLimits2.hashCode());
}
public void testVerify_GivenNegativeCategorizationExamplesLimit() {
ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> new AnalysisLimits(1L, -1L));
String errorMessage = Messages.getMessage(
Messages.JOB_CONFIG_FIELD_VALUE_TOO_LOW,
AnalysisLimits.CATEGORIZATION_EXAMPLES_LIMIT,
0,
-1L
);
assertEquals(errorMessage, e.getMessage());
}
public void testVerify_GivenValid() {
new AnalysisLimits(null, 1L);
new AnalysisLimits(1L, null);
new AnalysisLimits(1L, 1L);
}
@Override
protected AnalysisLimits mutateInstance(AnalysisLimits instance) throws IOException {
Long memoryModelLimit = instance.getModelMemoryLimit();
Long categorizationExamplesLimit = instance.getCategorizationExamplesLimit();
switch (between(0, 1)) {
case 0:
if (memoryModelLimit == null) {
memoryModelLimit = randomNonNegativeLong();
} else {
if (randomBoolean()) {
memoryModelLimit = null;
} else {
memoryModelLimit += between(1, 10000);
}
}
break;
case 1:
if (categorizationExamplesLimit == null) {
categorizationExamplesLimit = randomNonNegativeLong();
} else {
if (randomBoolean()) {
categorizationExamplesLimit = null;
} else {
categorizationExamplesLimit += between(1, 10000);
}
}
break;
default:
throw new AssertionError("Illegal randomisation branch");
}
return new AnalysisLimits(memoryModelLimit, categorizationExamplesLimit);
}
}
| apache-2.0 |
ameybarve15/incubator-geode | gemfire-core/src/main/java/com/gemstone/gemfire/internal/stats50/Atomic50StatisticsImpl.java | 15769 | /*=========================================================================
* Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
* This product is protected by U.S. and international copyright
* and intellectual property laws. Pivotal products are covered by
* one or more patents listed at http://www.pivotal.io/patents.
*=========================================================================
*/
package com.gemstone.gemfire.internal.stats50;
import com.gemstone.gemfire.*;
import com.gemstone.gemfire.internal.*;
import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
import java.util.concurrent.atomic.AtomicIntegerArray; // don't use backport here!
import java.util.concurrent.atomic.AtomicLongArray; // don't use backport here!
import java.util.concurrent.ConcurrentLinkedQueue; // don't use backport here!
import java.util.concurrent.CopyOnWriteArrayList; // don't use backport here!
import java.util.*;
/**
* An implementation of {@link Statistics} that stores its statistics
* in local java memory.
*
* @see <A href="package-summary.html#statistics">Package introduction</A>
*
* @author Darrel Schneider
*
* @since 3.0
*
*/
public class Atomic50StatisticsImpl extends StatisticsImpl {
/** In JOM Statistics, the values of the int statistics */
private final AtomicIntegerArray intStorage;
private final AtomicIntegerArray intDirty;
private final Object[] intReadPrepLock;
/** In JOM Statistics, the values of the long statistics */
private final AtomicLongArray longStorage;
private final AtomicIntegerArray longDirty;
private final Object[] longReadPrepLock;
/** The StatisticsFactory that created this instance */
private final StatisticsManager dSystem;
/////////////////////// Constructors ///////////////////////
/**
* Creates a new statistics instance of the given type
*
* @param type
* A description of the statistics
* @param textId
* Text that identifies this statistic when it is monitored
* @param numericId
* A number that displayed when this statistic is monitored
* @param uniqueId
* A number that uniquely identifies this instance
* @param system
* The distributed system that determines whether or not these
* statistics are stored (and collected) in GemFire shared
* memory or in the local VM
*/
public Atomic50StatisticsImpl(StatisticsType type, String textId,
long numericId,
long uniqueId,
StatisticsManager system) {
super(type, calcTextId(system, textId), calcNumericId(system, numericId),
uniqueId, 0);
this.dSystem = system;
StatisticsTypeImpl realType = (StatisticsTypeImpl)type;
if (realType.getDoubleStatCount() > 0) {
throw new IllegalArgumentException(LocalizedStrings.Atomic50StatisticsImpl_ATOMICS_DO_NOT_SUPPORT_DOUBLE_STATS.toLocalizedString());
}
int intCount = realType.getIntStatCount();
int longCount = realType.getLongStatCount();
if (intCount > 0) {
this.intStorage = new AtomicIntegerArray(intCount);
this.intDirty = new AtomicIntegerArray(intCount);
this.intReadPrepLock = new Object[intCount];
for (int i=0; i < intCount; i++) {
this.intReadPrepLock[i] = new Object();
}
} else {
this.intStorage = null;
this.intDirty = null;
this.intReadPrepLock = null;
}
if (longCount > 0) {
this.longStorage = new AtomicLongArray(longCount);
this.longDirty = new AtomicIntegerArray(longCount);
this.longReadPrepLock = new Object[longCount];
for (int i=0; i < longCount; i++) {
this.longReadPrepLock[i] = new Object();
}
} else {
this.longStorage = null;
this.longDirty = null;
this.longReadPrepLock = null;
}
}
////////////////////// Static Methods //////////////////////
private static long calcNumericId(StatisticsManager system, long userValue) {
if (userValue != 0) {
return userValue;
} else {
long result = OSProcess.getId(); // fix for bug 30239
if (result == 0) {
if (system != null) {
result = system.getId();
}
}
return result;
}
}
private static String calcTextId(StatisticsManager system, String userValue) {
if (userValue != null && !userValue.equals("")) {
return userValue;
} else {
if (system != null) {
return system.getName();
} else {
return "";
}
}
}
////////////////////// Instance Methods //////////////////////
@Override
public final boolean isAtomic() {
return true;
}
@Override
public void close() {
super.close();
if (this.dSystem != null) {
dSystem.destroyStatistics(this);
}
}
/**
* Queue of new ThreadStorage instances.
*/
private final ConcurrentLinkedQueue<ThreadStorage> threadStoreQ = new ConcurrentLinkedQueue<ThreadStorage>();
/**
* List of ThreadStorage instances that will be used to roll up stat values
* on this instance. They come from the threadStoreQ.
*/
private final CopyOnWriteArrayList<ThreadStorage> threadStoreList = new CopyOnWriteArrayList<ThreadStorage>();
/**
* The workspace each thread that modifies statistics will use to do the
* mods locally.
*/
private static class ThreadStorage {
private final Thread owner;
public volatile boolean dirty = false;
public final AtomicIntegerArray intStore;
public final AtomicLongArray longStore;
public boolean isAlive() {
return this.owner.isAlive();
}
public ThreadStorage(int intSize, int longSize) {
this.owner = Thread.currentThread();
if (intSize > 0) {
this.intStore = new AtomicIntegerArray(intSize);
} else {
this.intStore = null;
}
if (longSize > 0) {
this.longStore = new AtomicLongArray(longSize);
} else {
this.longStore = null;
}
}
}
private final ThreadLocal<ThreadStorage> threadStore = new ThreadLocal<ThreadStorage>();
private ThreadStorage getThreadStorage() {
ThreadStorage result = this.threadStore.get();
if (result == null) {
int intSize = 0;
int longSize = 0;
if (this.intStorage != null) {
intSize = this.intStorage.length();
}
if (this.longStorage != null) {
longSize = this.longStorage.length();
}
result = new ThreadStorage(intSize, longSize);
this.threadStore.set(result);
this.threadStoreQ.add(result);
}
return result;
}
private ThreadStorage getThreadStorageForWrite() {
ThreadStorage result = getThreadStorage();
if (!result.dirty) result.dirty = true;
return result;
}
private AtomicIntegerArray getThreadIntStorage() {
return getThreadStorageForWrite().intStore;
}
private AtomicLongArray getThreadLongStorage() {
return getThreadStorageForWrite().longStore;
}
//////////////////////// store() Methods ///////////////////////
@Override
protected final void _setInt(int offset, int value) {
doIntWrite(offset, value);
}
@Override
protected final void _setLong(int offset, long value) {
doLongWrite(offset, value);
}
@Override
protected final void _setDouble(int offset, double value) {
throw new IllegalStateException(LocalizedStrings.Atomic50StatisticsImpl_DOUBLE_STATS_NOT_ON_ATOMIC50.toLocalizedString());
}
/////////////////////// get() Methods ///////////////////////
@Override
protected final int _getInt(int offset) {
return doIntRead(offset);
}
@Override
protected final long _getLong(int offset) {
return doLongRead(offset);
}
@Override
protected final double _getDouble(int offset) {
throw new IllegalStateException(LocalizedStrings.Atomic50StatisticsImpl_DOUBLE_STATS_NOT_ON_ATOMIC50.toLocalizedString());
}
//////////////////////// inc() Methods ////////////////////////
@Override
protected final void _incInt(int offset, int delta) {
getThreadIntStorage().getAndAdd(offset, delta);
setIntDirty(offset);
}
@Override
protected final void _incLong(int offset, long delta) {
getThreadLongStorage().getAndAdd(offset, delta);
setLongDirty(offset);
}
@Override
protected final void _incDouble(int offset, double delta) {
throw new IllegalStateException(LocalizedStrings.Atomic50StatisticsImpl_DOUBLE_STATS_NOT_ON_ATOMIC50.toLocalizedString());
}
private static final ThreadLocal samplerThread = new ThreadLocal();
/**
* Prepare the threadStoreList by moving into it all the new instances in Q.
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="JLM_JSR166_UTILCONCURRENT_MONITORENTER",
justification="findbugs complains about this synchronize. It could be changed to a sync on a dedicated Object instance to make findbugs happy. see comments below")
private void prepareThreadStoreList() {
// The following sync is for the rare case when this method is called concurrently.
// In that case it would be sub-optimal for both threads to concurrently create their
// own ArrayList and then for both of them to call addAll.
// findbugs complains about this synchronize. It could be changed to a sync on a dedicated Object instance to make findbugs happy.
synchronized(threadStoreList) {
ThreadStorage ts = this.threadStoreQ.poll();
if (ts == null) return;
ArrayList<ThreadStorage> tmp = new ArrayList<ThreadStorage>(64);
do {
tmp.add(ts);
ts = this.threadStoreQ.poll();
} while (ts != null);
if (tmp.size() > 0) {
this.threadStoreList.addAll(tmp);
}
}
}
/**
* Used to take striped thread stats and "roll them up" into a single
* shared stat.
* @since 5.1
*/
@Override
public void prepareForSample() {
// mark this thread as the sampler
if (samplerThread.get() == null) samplerThread.set(Boolean.TRUE);
prepareThreadStoreList();
ArrayList<ThreadStorage> removed = null;
for (ThreadStorage ts: this.threadStoreList) {
if (!ts.isAlive()) {
if (removed == null) {
removed = new ArrayList<ThreadStorage>(64);
}
removed.add(ts);
}
if (ts.dirty) {
ts.dirty = false;
if (ts.intStore != null) {
for (int i=0; i < ts.intStore.length(); i++) {
synchronized (this.intReadPrepLock[i]) {
int delta = ts.intStore.getAndSet(i, 0);
if (delta != 0) {
this.intStorage.getAndAdd(i, delta);
}
}
}
}
if (ts.longStore != null) {
for (int i=0; i < ts.longStore.length(); i++) {
synchronized (this.longReadPrepLock[i]) {
long delta = ts.longStore.getAndSet(i, 0);
if (delta != 0) {
this.longStorage.getAndAdd(i, delta);
}
}
}
}
}
}
if (removed != null) {
this.threadStoreList.removeAll(removed);
}
}
private final boolean isIntDirty(final int idx) {
return this.intDirty.get(idx) != 0;
}
private final boolean isLongDirty(final int idx) {
return this.longDirty.get(idx) != 0;
}
private final boolean clearIntDirty(final int idx) {
if (!this.intDirty.weakCompareAndSet(idx, 1/*expected*/, 0/*update*/)) {
return this.intDirty.compareAndSet(idx, 1/*expected*/, 0/*update*/);
}
return true;
}
private final boolean clearLongDirty(final int idx) {
if (!this.longDirty.weakCompareAndSet(idx, 1/*expected*/, 0/*update*/)) {
return this.longDirty.compareAndSet(idx, 1/*expected*/, 0/*update*/);
}
return true;
}
private final void setIntDirty(final int idx) {
if (!this.intDirty.weakCompareAndSet(idx, 0/*expected*/, 1/*update*/)) {
if (!isIntDirty(idx)) {
this.intDirty.set(idx, 1);
}
}
}
private final void setLongDirty(final int idx) {
if (!this.longDirty.weakCompareAndSet(idx, 0/*expected*/, 1/*update*/)) {
if (!isLongDirty(idx)) {
this.longDirty.set(idx, 1);
}
}
}
private final int doIntRead(final int idx) {
// early out for sampler; it called prepareForSample
if (samplerThread.get() != null) {
return this.intStorage.get(idx);
}
synchronized (this.intReadPrepLock[idx]) {
if (!isIntDirty(idx)) {
// no need to prepare if not dirty
return this.intStorage.get(idx);
}
}
// this can take a while so release sync
prepareThreadStoreList();
synchronized (this.intReadPrepLock[idx]) {
if (!clearIntDirty(idx)) {
// no need to prepare if not dirty
return this.intStorage.get(idx);
}
int delta = 0;
for (ThreadStorage ts: this.threadStoreList) {
delta += ts.intStore.getAndSet(idx, 0);
}
if (delta != 0) {
return this.intStorage.addAndGet(idx, delta);
}
else {
return this.intStorage.get(idx);
}
}
}
private final void doIntWrite(final int idx, int value) {
synchronized (this.intReadPrepLock[idx]) {
if (!isIntDirty(idx)) {
// no need to prepare if not dirty
this.intStorage.set(idx, value);
return;
}
}
prepareThreadStoreList();
synchronized (this.intReadPrepLock[idx]) {
if (clearIntDirty(idx)) {
for (ThreadStorage ts: this.threadStoreList) {
if (ts.intStore.get(idx) != 0) {
ts.intStore.set(idx, 0);
}
}
}
this.intStorage.set(idx, value);
}
}
private final long doLongRead(final int idx) {
if (samplerThread.get() != null) {
return this.longStorage.get(idx);
}
synchronized (this.longReadPrepLock[idx]) {
if (!isLongDirty(idx)) {
// no need to prepare if not dirty
return this.longStorage.get(idx);
}
}
// this can take a while so release sync
prepareThreadStoreList();
synchronized (this.longReadPrepLock[idx]) {
if (!clearLongDirty(idx)) {
// no need to prepare if not dirty
return this.longStorage.get(idx);
}
long delta = 0;
for (ThreadStorage ts: this.threadStoreList) {
delta += ts.longStore.getAndSet(idx, 0);
}
if (delta != 0) {
return this.longStorage.addAndGet(idx, delta);
}
else {
return this.longStorage.get(idx);
}
}
}
private final void doLongWrite(int idx, long value) {
synchronized (this.longReadPrepLock[idx]) {
if (!isLongDirty(idx)) {
// no need to prepare if not dirty
this.longStorage.set(idx, value);
return;
}
}
// this can take a while so release sync
prepareThreadStoreList();
synchronized (this.longReadPrepLock[idx]) {
if (clearLongDirty(idx)) {
for (ThreadStorage ts: this.threadStoreList) {
if (ts.longStore.get(idx) != 0) {
ts.longStore.set(idx, 0);
}
}
}
this.longStorage.set(idx, value);
}
}
/////////////////// internal package methods //////////////////
final int[] _getIntStorage() {
throw new IllegalStateException(LocalizedStrings.Atomic50StatisticsImpl_DIRECT_ACCESS_NOT_ON_ATOMIC50.toLocalizedString());
}
final long[] _getLongStorage() {
throw new IllegalStateException(LocalizedStrings.Atomic50StatisticsImpl_DIRECT_ACCESS_NOT_ON_ATOMIC50.toLocalizedString());
}
final double[] _getDoubleStorage() {
throw new IllegalStateException(LocalizedStrings.Atomic50StatisticsImpl_DIRECT_ACCESS_NOT_ON_ATOMIC50.toLocalizedString());
}
}
| apache-2.0 |
marcust/docker-java | src/main/java/com/github/dockerjava/api/command/ContainerDiffCmd.java | 927 | package com.github.dockerjava.api.command;
import java.util.List;
import com.github.dockerjava.api.DockerException;
import com.github.dockerjava.api.InternalServerErrorException;
import com.github.dockerjava.api.NotFoundException;
import com.github.dockerjava.api.model.ChangeLog;
public interface ContainerDiffCmd extends SyncDockerCmd<List<ChangeLog>> {
public String getContainerId();
public ContainerDiffCmd withContainerId(String containerId);
@Override
public String toString();
/**
* @throws NotFoundException
* No such container
* @throws InternalServerErrorException
* server error
* @throws DockerException
* unexpected http status code
*/
@Override
public List<ChangeLog> exec() throws NotFoundException;
public static interface Exec extends DockerCmdSyncExec<ContainerDiffCmd, List<ChangeLog>> {
}
} | apache-2.0 |
ern/elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/CompositeRoleMapper.java | 2385 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
package org.elasticsearch.xpack.security.authc.support.mapper;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.GroupedActionListener;
import org.elasticsearch.watcher.ResourceWatcherService;
import org.elasticsearch.xpack.core.security.authc.RealmConfig;
import org.elasticsearch.xpack.core.security.authc.support.CachingRealm;
import org.elasticsearch.xpack.security.authc.support.DnRoleMapper;
import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper;
/**
* A {@link UserRoleMapper} that composes one or more <i>delegate</i> role-mappers.
* During {@link #resolveRoles(UserData, ActionListener) role resolution}, each of the delegates is
* queried, and the individual results are merged into a single {@link Set} which includes all the roles from each mapper.
*/
public class CompositeRoleMapper implements UserRoleMapper {
private List<UserRoleMapper> delegates;
public CompositeRoleMapper(RealmConfig realmConfig,
ResourceWatcherService watcherService,
NativeRoleMappingStore nativeRoleMappingStore) {
this(new DnRoleMapper(realmConfig, watcherService), nativeRoleMappingStore);
}
private CompositeRoleMapper(UserRoleMapper... delegates) {
this.delegates = new ArrayList<>(Arrays.asList(delegates));
}
@Override
public void resolveRoles(UserData user, ActionListener<Set<String>> listener) {
GroupedActionListener<Set<String>> groupListener = new GroupedActionListener<>(ActionListener.wrap(
composite -> listener.onResponse(composite.stream().flatMap(Set::stream).collect(Collectors.toSet())), listener::onFailure
), delegates.size());
this.delegates.forEach(mapper -> mapper.resolveRoles(user, groupListener));
}
@Override
public void refreshRealmOnChange(CachingRealm realm) {
this.delegates.forEach(mapper -> mapper.refreshRealmOnChange(realm));
}
}
| apache-2.0 |
jwren/intellij-community | plugins/kotlin/idea/tests/testData/refactoring/move/kotlin/moveNestedClass/deepInnerToTopLevelWithThis/before/test2/usages2.java | 125 | package test2;
import test.A;
import test.A.B.C;
class Test {
C foo() {
return new A().new B().new C();
}
} | apache-2.0 |
jahnaviancha/scouter | scouter.agent/src/scouter/agent/counter/task/Debug.java | 2254 | package scouter.agent.counter.task;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.Enumeration;
import scouter.agent.Configure;
import scouter.agent.Logger;
import scouter.agent.counter.CounterBasket;
import scouter.agent.counter.anotation.Counter;
import scouter.agent.trace.TraceContext;
import scouter.agent.trace.TraceContextManager;
import scouter.agent.util.DumpUtil;
import scouter.util.DateUtil;
import scouter.util.FileUtil;
import scouter.util.Hexa32;
import scouter.util.SysJMX;
public class Debug {
@Counter
public void autoStack(CounterBasket pw) {
Configure conf = Configure.getInstance();
if (conf.debug_long_tx_autostack <= 0)
return;
PrintWriter out = null;
try {
Enumeration<TraceContext> en = TraceContextManager.getContextEnumeration();
while (en.hasMoreElements()) {
TraceContext ctx = en.nextElement();
long etime = System.currentTimeMillis() - ctx.startTime;
if (etime > conf.debug_long_tx_autostack) {
try {
if (out == null) {
out = open();
}
out.print(ctx.thread.getId() + ":");
out.print(ctx.thread.getName() + ":");
out.print(ctx.thread.getState().name() + ":");
out.print("cpu " + SysJMX.getThreadCpuTime(ctx.thread) + ":");
out.print(Hexa32.toString32(ctx.txid) + ":");
out.print(ctx.serviceName + ":");
out.print(etime + " ms");
if (ctx.sqltext != null) {
out.print(":sql=" + ctx.sqltext );
if(ctx.sqlActiveArgs!=null){
out.print("[" + ctx.sqlActiveArgs + "]");
}
out.print(":");
}
if (ctx.apicall_name != null) {
out.println(":subcall=" + ctx.apicall_name);
}
out.println("");
DumpUtil.printStack(out, ctx.thread.getId());
out.println("");
} catch (Exception e) {
Logger.println("A155", e.toString());
FileUtil.close(out);
return;
}
}
}
} finally {
FileUtil.close(out);
}
}
public PrintWriter open() throws IOException {
File file = new File(Configure.getInstance().dump_dir, "longtx_" +Configure.getInstance().scouter_name+ "_"+DateUtil.timestampFileName()+".txt");
return new PrintWriter(new FileWriter(file));
}
}
| apache-2.0 |
idea4bsd/idea4bsd | plugins/maven/jps-plugin/src/org/jetbrains/jps/maven/compiler/MavenEjbArtifactRootCopyingHandlerProvider.java | 3172 | /*
* Copyright 2000-2014 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jetbrains.jps.maven.compiler;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.jps.builders.storage.BuildDataPaths;
import org.jetbrains.jps.incremental.artifacts.instructions.ArtifactRootCopyingHandlerProvider;
import org.jetbrains.jps.incremental.artifacts.instructions.FileCopyingHandler;
import org.jetbrains.jps.incremental.artifacts.instructions.FilterCopyHandler;
import org.jetbrains.jps.maven.model.JpsMavenExtensionService;
import org.jetbrains.jps.maven.model.impl.MavenEjbClientConfiguration;
import org.jetbrains.jps.maven.model.impl.MavenProjectConfiguration;
import org.jetbrains.jps.maven.model.impl.MavenResourceFileFilter;
import org.jetbrains.jps.model.JpsElement;
import org.jetbrains.jps.model.JpsModel;
import org.jetbrains.jps.model.artifact.JpsArtifact;
import org.jetbrains.jps.model.artifact.elements.JpsPackagingElement;
import org.jetbrains.jps.model.ex.JpsElementBase;
import java.io.File;
/**
* @author nik
*/
public class MavenEjbArtifactRootCopyingHandlerProvider extends ArtifactRootCopyingHandlerProvider {
@Nullable
@Override
public FileCopyingHandler createCustomHandler(@NotNull JpsArtifact artifact,
@NotNull File root,
@NotNull JpsPackagingElement contextElement,
@NotNull JpsModel model,
@NotNull BuildDataPaths buildDataPaths) {
MavenProjectConfiguration projectConfiguration = JpsMavenExtensionService.getInstance().getMavenProjectConfiguration(buildDataPaths);
if (projectConfiguration == null) return null;
MavenEjbClientConfiguration ejbCfg = projectConfiguration.ejbClientArtifactConfigs.get(artifact.getName());
if (ejbCfg == null) {
JpsArtifact parentArtifact = findParentArtifact(contextElement);
if (parentArtifact != null) {
ejbCfg = projectConfiguration.ejbClientArtifactConfigs.get(parentArtifact.getName());
}
}
return ejbCfg == null ? null : new FilterCopyHandler(new MavenResourceFileFilter(root, ejbCfg));
}
private static JpsArtifact findParentArtifact(JpsElement element) {
if (element instanceof JpsElementBase) {
JpsElementBase parent = ((JpsElementBase)element).getParent();
if (parent instanceof JpsArtifact) {
return (JpsArtifact)parent;
}
if (parent != null) {
return findParentArtifact(parent);
}
}
return null;
}
}
| apache-2.0 |
melix/golo-lang | src/main/java/fr/insalyon/citi/golo/compiler/ir/ReferenceLookup.java | 1129 | /*
* Copyright 2012-2013 Institut National des Sciences Appliquées de Lyon (INSA-Lyon)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fr.insalyon.citi.golo.compiler.ir;
public class ReferenceLookup extends ExpressionStatement {
private final String name;
public ReferenceLookup(String name) {
super();
this.name = name;
}
public String getName() {
return name;
}
public LocalReference resolveIn(ReferenceTable referenceTable) {
return referenceTable.get(name);
}
@Override
public void accept(GoloIrVisitor visitor) {
visitor.visitReferenceLookup(this);
}
}
| apache-2.0 |
swarmsandbox/wildfly-swarm | testsuite/testsuite-webservices/src/main/java/org/wildfly/swarm/webservices/EchoServiceClient.java | 2846 |
package org.wildfly.swarm.webservices;
import java.net.MalformedURLException;
import java.net.URL;
import javax.xml.namespace.QName;
import javax.xml.ws.Service;
import javax.xml.ws.WebEndpoint;
import javax.xml.ws.WebServiceClient;
import javax.xml.ws.WebServiceException;
import javax.xml.ws.WebServiceFeature;
/**
* This class was generated by the JAX-WS RI.
* JAX-WS RI 2.2.9-b130926.1035
* Generated source version: 2.2
*/
@WebServiceClient(name = "echoService", targetNamespace = "webservices.swarm.wildfly.org", wsdlLocation = "http://localhost:8080/ws/echo?wsdl")
public class EchoServiceClient extends Service {
private static final URL ECHOSERVICE_WSDL_LOCATION;
private static final WebServiceException ECHOSERVICE_EXCEPTION;
private static final QName ECHOSERVICE_QNAME = new QName("webservices.swarm.wildfly.org", "echoService");
static {
URL url = null;
WebServiceException e = null;
try {
url = new URL("http://localhost:8080/ws/echo?wsdl");
} catch (MalformedURLException ex) {
e = new WebServiceException(ex);
}
ECHOSERVICE_WSDL_LOCATION = url;
ECHOSERVICE_EXCEPTION = e;
}
public EchoServiceClient() {
super(__getWsdlLocation(), ECHOSERVICE_QNAME);
}
public EchoServiceClient(WebServiceFeature... features) {
super(__getWsdlLocation(), ECHOSERVICE_QNAME, features);
}
public EchoServiceClient(URL wsdlLocation) {
super(wsdlLocation, ECHOSERVICE_QNAME);
}
public EchoServiceClient(URL wsdlLocation, WebServiceFeature... features) {
super(wsdlLocation, ECHOSERVICE_QNAME, features);
}
public EchoServiceClient(URL wsdlLocation, QName serviceName) {
super(wsdlLocation, serviceName);
}
public EchoServiceClient(URL wsdlLocation, QName serviceName, WebServiceFeature... features) {
super(wsdlLocation, serviceName, features);
}
/**
* @return returns EchoService
*/
@WebEndpoint(name = "echoPort")
public EchoService getEchoPort() {
return super.getPort(new QName("webservices.swarm.wildfly.org", "echoPort"), EchoService.class);
}
/**
* @param features A list of {@link WebServiceFeature} to configure on the proxy. Supported features not in the <code>features</code> parameter will have their default values.
* @return returns EchoService
*/
@WebEndpoint(name = "echoPort")
public EchoService getEchoPort(WebServiceFeature... features) {
return super.getPort(new QName("webservices.swarm.wildfly.org", "echoPort"), EchoService.class, features);
}
private static URL __getWsdlLocation() {
if (ECHOSERVICE_EXCEPTION != null) {
throw ECHOSERVICE_EXCEPTION;
}
return ECHOSERVICE_WSDL_LOCATION;
}
}
| apache-2.0 |
IanEdington/ud405 | MiscDemos/IciclesPrototype/core/src/com/udacity/gamedev/icicles/Icicles.java | 1794 | package com.udacity.gamedev.icicles;
import com.badlogic.gdx.graphics.glutils.ShapeRenderer;
import com.badlogic.gdx.math.MathUtils;
import com.badlogic.gdx.math.Vector2;
import com.badlogic.gdx.utils.DelayedRemovalArray;
import com.badlogic.gdx.utils.viewport.Viewport;
import com.udacity.gamedev.icicles.Constants.Difficulty;
public class Icicles {
public static final String TAG = Icicles.class.getName();
Difficulty difficulty;
int iciclesDodged;
DelayedRemovalArray<Icicle> icicleList;
Viewport viewport;
public Icicles(Viewport viewport, Difficulty difficulty) {
this.difficulty = difficulty;
this.viewport = viewport;
init();
}
public void init() {
icicleList = new DelayedRemovalArray<Icicle>(false, 100);
iciclesDodged = 0;
}
public void update(float delta) {
if (MathUtils.random() < delta * difficulty.spawnRate) {
Vector2 newIciclePosition = new Vector2(
MathUtils.random() * viewport.getWorldWidth(),
viewport.getWorldHeight()
);
Icicle newIcicle = new Icicle(newIciclePosition);
icicleList.add(newIcicle);
}
for (Icicle icicle : icicleList) {
icicle.update(delta);
}
icicleList.begin();
for (int i = 0; i < icicleList.size; i++) {
if (icicleList.get(i).position.y < -Constants.ICICLES_HEIGHT) {
iciclesDodged += 1;
icicleList.removeIndex(i);
}
}
icicleList.end();
}
public void render(ShapeRenderer renderer) {
renderer.setColor(Constants.ICICLE_COLOR);
for (Icicle icicle : icicleList) {
icicle.render(renderer);
}
}
}
| mit |
tectronics/phantomuserland | tools/plc/src/ru/dz/jpc/tophantom/Repr.java | 4067 | // Repr.java -- methods dealing with C representations
package ru.dz.jpc.tophantom;
import ru.dz.jpc.classfile.*;
import java.io.*;
class Repr {
static final int DECLS_PER_LINE = 15; // variables per declaration line
static final int GCHARS_PER_LINE = 18; // generated char consts on one line
static final int GCHARS_PER_CMMT = 40; // max chars of string gen in comment
// ctype(c) -- return C type indicated by character c.
//
// upper case characters are Java signature characters.
// lower case characters are JVM datatypes.
static String ctype(char c)
{
switch (c) {
case 'B': return "Byte";
case 'C': return "Char";
case 'D': case 'd': return "Double";
case 'F': case 'f': return "Float";
case 'I': case 'i': return "Int";
case 'J': case 'l': return "Long";
case 'S': return "Short";
case 'V': return "Void";
case 'Z': return "Boolean";
default: return "Object";
}
}
// rettype(s) -- Compute C return type of Java method given signature.
static String rettype(String s)
{
return ctype(s.charAt(s.indexOf(')') + 1));
}
// isQuotable(c) -- is character c reasonably specified as 'c'?
static boolean isQuotable(char c)
{
return c >= ' ' && c <= '~' && c != '\'' && c != '\\';
}
// con(m, c) -- return C representation of constant c in method m.
//
// Valid for INT, LONG, FLOAT, DOUBLE, or STRING constants
// (assuming that necessary static constants were generated earlier).
static String con(Method m, Constant c)
{
switch (c.tag) {
case Constant.INTEGER:
if (((Integer)c.value).intValue() == Integer.MIN_VALUE)
return "0x80000000";
else
return c.value.toString();
case Constant.LONG:
long v = ((Long)c.value).longValue();
if (v > (long)Integer.MIN_VALUE && v < (long)Integer.MAX_VALUE)
return c.value.toString();
// now we must use "ANSI C" to construct a possibly "long long" val
int lh = (int)(v >>> 32);
int rh = (int)v;
return "((((Long)0x" + Integer.toHexString(lh) +
"u) << 32) | ((Long)0x" + Integer.toHexString(rh) + "u))";
case Constant.FLOAT:
case Constant.DOUBLE:
if (simplefloat(((Number)c.value).doubleValue()))
return c.value.toString();
else if (c.tag == Constant.FLOAT)
return "fc" + c.index + ".v";
else /* c.tag == Constant.DOUBLE */
return "dc" + c.index + ".v";
case Constant.STRING:
return "(Object)st_" + m.cl.cname + "[" + CFile.strref(c) + "]";
default:
return c.value.toString();
}
}
// simplefloat(d) -- is d represented accurately by toString()?
//
// This method errs on the side of caution, but is good enough
// to accept many exact numbers such as 23.0, 1.5, and 0.125.
static boolean simplefloat(double d)
{
if (Double.isNaN(d) || Double.isInfinite(d))
return false;
if (Double.doubleToLongBits(d) == 0x8000000000000000L)
return false; // negative zero
if (d % 1.0 == 0.0 && d < 1000000.0 && d > -1000000.0)
return true;
if (d % 0.03125 == 0.0 && d < 10.0 && d > -10.0)
return true;
return false;
}
/** Write to stream a sequence of comma-separated integral expressions which
* can be used to initialize an array of unsigned shorts representing a
* Unicode string. */
static public void
emitCharData (PrintWriter d, // Where to write data
String strdata) // String containing data
{
int slen = strdata.length ();
int i = 0;
while (i < slen) {
char c = strdata.charAt (i);
if (isQuotable (c)) {
/* Consider this a printable that can appear as a C char constant */
d.print("'" + c + "'");
} else {
/* A non-printable; treat as an integer */
d.print((int) c);
}
if (++i < slen) {
/* Separate with commas */
d.print (",");
if (0 == (i % GCHARS_PER_LINE)) {
/* Keep lines from getting too big */
d.print ("\n");
}
}
}
return;
}
} // class Repr
| lgpl-3.0 |
jomarko/kie-wb-common | kie-wb-common-stunner/kie-wb-common-stunner-core/kie-wb-common-stunner-api/kie-wb-common-stunner-core-api/src/main/java/org/kie/workbench/common/stunner/core/rule/annotation/Occurrences.java | 1250 | /*
* Copyright 2017 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.workbench.common.stunner.core.rule.annotation;
import java.lang.annotation.ElementType;
import java.lang.annotation.Inherited;
import java.lang.annotation.Repeatable;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Annotation for specifying a containment rule.
* It's only allowed to use on Definition Sets
*/
@Inherited
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
@Repeatable(AllowedOccurrences.class)
public @interface Occurrences {
String role();
long min() default 0;
long max() default -1;
}
| apache-2.0 |
WilliamDo/ignite | modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/CacheObjectBinaryProcessor.java | 5045 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.cache.binary;
import java.util.Collection;
import java.util.Map;
import org.apache.ignite.IgniteBinary;
import org.apache.ignite.IgniteException;
import org.apache.ignite.binary.BinaryObject;
import org.apache.ignite.binary.BinaryObjectBuilder;
import org.apache.ignite.binary.BinaryType;
import org.apache.ignite.internal.binary.BinaryFieldMetadata;
import org.apache.ignite.internal.processors.cacheobject.IgniteCacheObjectProcessor;
import org.jetbrains.annotations.Nullable;
/**
* Extended cache object processor interface with additional methods for binary.
*/
public interface CacheObjectBinaryProcessor extends IgniteCacheObjectProcessor {
/**
* @param clsName Class name.
* @return Builder.
*/
public BinaryObjectBuilder builder(String clsName);
/**
* Creates builder initialized by existing binary object.
*
* @param binaryObj Binary object to edit.
* @return Binary builder.
*/
public BinaryObjectBuilder builder(BinaryObject binaryObj);
/**
* @param typeId Type ID.
* @param newMeta New meta data.
* @throws IgniteException In case of error.
*/
public void addMeta(int typeId, final BinaryType newMeta) throws IgniteException;
/**
* Adds metadata locally without triggering discovery exchange.
*
* Must be used only during startup and only if it is guaranteed that all nodes have the same copy
* of BinaryType.
*
* @param typeId Type ID.
* @param newMeta New meta data.
* @throws IgniteException In case of error.
*/
public void addMetaLocally(int typeId, final BinaryType newMeta) throws IgniteException;
/**
* @param typeId Type ID.
* @param typeName Type name.
* @param affKeyFieldName Affinity key field name.
* @param fieldTypeIds Fields map.
* @param isEnum Enum flag.
* @param enumMap Enum name to ordinal mapping.
* @throws IgniteException In case of error.
*/
public void updateMetadata(int typeId, String typeName, @Nullable String affKeyFieldName,
Map<String, BinaryFieldMetadata> fieldTypeIds, boolean isEnum, @Nullable Map<String, Integer> enumMap)
throws IgniteException;
/**
* @param typeId Type ID.
* @return Meta data.
* @throws IgniteException In case of error.
*/
@Nullable public BinaryType metadata(int typeId) throws IgniteException;
/**
* @param typeId Type ID.
* @param schemaId Schema ID.
* @return Meta data.
* @throws IgniteException In case of error.
*/
@Nullable public BinaryType metadata(int typeId, int schemaId) throws IgniteException;
/**
* @param typeIds Type ID.
* @return Meta data.
* @throws IgniteException In case of error.
*/
public Map<Integer, BinaryType> metadata(Collection<Integer> typeIds) throws IgniteException;
/**
* @return Metadata for all types.
* @throws IgniteException In case of error.
*/
public Collection<BinaryType> metadata() throws IgniteException;
/**
* @param typeName Type name.
* @param ord ordinal.
* @return Enum object.
* @throws IgniteException If failed.
*/
public BinaryObject buildEnum(String typeName, int ord) throws IgniteException;
/**
* @param typeName Type name.
* @param name Name.
* @return Enum object.
* @throws IgniteException If failed.
*/
public BinaryObject buildEnum(String typeName, String name) throws IgniteException;
/**
* Register enum type
*
* @param typeName Type name.
* @param vals Mapping of enum constant names to ordinals.
* @return Binary Type for registered enum.
*/
public BinaryType registerEnum(String typeName, Map<String, Integer> vals) throws IgniteException;
/**
* @return Binaries interface.
* @throws IgniteException If failed.
*/
public IgniteBinary binary() throws IgniteException;
/**
* @param obj Original object.
* @return Binary object (in case binary marshaller is used).
* @throws IgniteException If failed.
*/
public Object marshalToBinary(Object obj) throws IgniteException;
}
| apache-2.0 |
ThiagoGarciaAlves/intellij-community | platform/util/src/com/intellij/openapi/wm/FocusWatcher.java | 6420 | /*
* Copyright 2000-2017 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.openapi.wm;
import com.intellij.reference.SoftReference;
import com.intellij.util.ui.UIUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import javax.swing.text.JTextComponent;
import java.awt.*;
import java.awt.event.ContainerEvent;
import java.awt.event.ContainerListener;
import java.awt.event.FocusEvent;
import java.awt.event.FocusListener;
import java.lang.ref.WeakReference;
/**
* Spies how focus goes in the component.
* @author Vladimir Kondratyev
*/
public class FocusWatcher implements ContainerListener,FocusListener{
private WeakReference<Component> myTopComponent;
/**
* Last component that had focus.
*/
private WeakReference<Component> myFocusedComponent;
/**
* TODO[vova,anton] the name getMostRecentFocusOwner is better. The description could be copied from
* java.awt.Window.getMostRecentFocusOwner() method.
* This is the nearest component to the myFocusableComponent
*/
private WeakReference<Component> myNearestFocusableComponent;
/**
* @return top component on which focus watcher was installed.
* The method always return {@code null} if focus watcher was installed
* on some component hierarchy.
*/
public Component getTopComponent() {
return SoftReference.dereference(myTopComponent);
}
@Override
public final void componentAdded(final ContainerEvent e){
installImpl(e.getChild());
}
@Override
public final void componentRemoved(final ContainerEvent e){
Component removedChild=e.getChild();
if(getNearestFocusableComponent() !=null&&SwingUtilities.isDescendingFrom(getNearestFocusableComponent(),removedChild)){
setNearestFocusableComponent(null);
}
if(getFocusedComponent() !=null&&SwingUtilities.isDescendingFrom(getFocusedComponent(),removedChild)){
setNearestFocusableComponent(e.getContainer());
}
deinstall(removedChild, e);
}
public final void deinstall(final Component component){
deinstall(component, null);
}
public final void deinstall(final Component component, @Nullable AWTEvent cause){
if (component == null) return;
if(component instanceof Container){
Container container=(Container)component;
int componentCount=container.getComponentCount();
for(int i=0;i<componentCount;i++){
deinstall(container.getComponent(i));
}
container.removeContainerListener(this);
}
component.removeFocusListener(this);
if(getFocusedComponent() ==component){
setFocusedComponentImpl(null, cause);
}
}
@Override
public final void focusGained(final FocusEvent e){
final Component component = e.getComponent();
if(e.isTemporary()||!component.isShowing()){
return;
}
if (component instanceof JTextComponent) {
UIUtil.addUndoRedoActions((JTextComponent)component);
}
setFocusedComponentImpl(component, e);
setNearestFocusableComponent(component.getParent());
}
@Override
public final void focusLost(final FocusEvent e){
Component component = e.getOppositeComponent();
if(component != null && !SwingUtilities.isDescendingFrom(component, SoftReference.dereference(myTopComponent))){
focusLostImpl(e);
}
}
/**
* @return last focused component or {@code null}.
*/
public final Component getFocusedComponent(){
return SoftReference.dereference(myFocusedComponent);
}
public final Component getNearestFocusableComponent() {
return SoftReference.dereference(myNearestFocusableComponent);
}
public final void install(@NotNull Component component){
myTopComponent = new WeakReference<Component>(component);
installImpl(component);
}
private void installImpl(Component component){
if(component instanceof Container){
Container container=(Container)component;
synchronized (container.getTreeLock()) {
int componentCount = container.getComponentCount();
for (int i = 0; i < componentCount; i++) {
installImpl(container.getComponent(i));
}
container.addContainerListener(this);
}
}
if(component instanceof JMenuItem||component instanceof JMenuBar){
return;
}
component.addFocusListener(this);
}
public void setFocusedComponentImpl(Component component){
setFocusedComponentImpl(component, null);
}
public void setFocusedComponentImpl(Component component, @Nullable AWTEvent cause){
if (!isFocusedComponentChangeValid(component, cause)) return;
if (UIUtil.isFocusProxy(component)) {
_setFocused(getFocusedComponent(), cause);
return;
}
_setFocused(component, cause);
}
private void _setFocused(final Component component, final AWTEvent cause) {
setFocusedComponent(component);
focusedComponentChanged(component, cause);
}
protected boolean isFocusedComponentChangeValid(final Component comp, final AWTEvent cause) {
return comp != null || cause != null;
}
/**
* Override this method to get notifications about focus. {@code FocusWatcher} invokes
* this method each time one of the populated component gains focus. All "temporary" focus
* event are ignored.
*
* @param component currenly focused component. The component can be {@code null}
* @param cause
*/
protected void focusedComponentChanged(Component component, @Nullable final AWTEvent cause){}
protected void focusLostImpl(final FocusEvent e){}
private void setFocusedComponent(final Component focusedComponent) {
myFocusedComponent = new WeakReference<Component>(focusedComponent);
}
private void setNearestFocusableComponent(final Component nearestFocusableComponent) {
myNearestFocusableComponent = new WeakReference<Component>(nearestFocusableComponent);
}
} | apache-2.0 |
CamelBackNotation/CarnotKE | jyhton/Neo4j/WDB/src/parser/generated/wdb/parser/False.java | 754 | /* Generated By:JJTree: Do not edit this line. False.java */
package wdb.parser;
import java.util.ArrayList;
import wdb.SleepyCatDataAdapter;
import wdb.metadata.IndexSelectResult;
import wdb.metadata.WDBObject;
public class False extends SimpleNode {
public False(int id) {
super(id);
}
public False(QueryParser p, int id) {
super(p, id);
}
public IndexSelectResult filterObjectsWithIndexes(SleepyCatDataAdapter da, ArrayList indexes) throws Exception
{
IndexSelectResult isr = new IndexSelectResult();
//These conditions are not supported so return a "scan" or "can't help" result
return isr;
}
public boolean eval(SleepyCatDataAdapter da, WDBObject wdbO)
{
return false;
}
}
| apache-2.0 |
nagyistoce/camunda-bpm-platform | engine/src/test/java/org/camunda/bpm/engine/test/el/ExecutionTestVariable.java | 1037 | /* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.engine.test.el;
import java.io.Serializable;
import org.camunda.bpm.engine.delegate.DelegateExecution;
/**
* Class used to test passing of execution in expressions/
*
* @author Frederik Heremans
*/
public class ExecutionTestVariable implements Serializable{
private static final long serialVersionUID = 1L;
public void testMethod(DelegateExecution delegateExecution) {
delegateExecution.setVariable("testVar", "myValue");
}
}
| apache-2.0 |
donNewtonAlpha/onos | core/store/primitives/src/main/java/org/onosproject/store/primitives/impl/UpdateRequest.java | 1968 | /*
* Copyright 2016-present Open Networking Laboratory
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.store.primitives.impl;
import com.google.common.base.MoreObjects;
import com.google.common.collect.ImmutableSet;
import org.onosproject.cluster.NodeId;
import java.util.Set;
import static com.google.common.base.Preconditions.checkNotNull;
/**
* Describes a request for update events in an EventuallyConsistentMap.
*/
final class UpdateRequest<K> {
private final NodeId sender;
private final Set<K> keys;
/**
* Creates a new update request.
*
* @param sender the sender's node ID
* @param keys keys requested
*/
public UpdateRequest(NodeId sender, Set<K> keys) {
this.sender = checkNotNull(sender);
this.keys = ImmutableSet.copyOf(keys);
}
/**
* Returns the sender's node ID.
*
* @return the sender's node ID
*/
public NodeId sender() {
return sender;
}
/**
* Returns the keys.
*
* @return the keys
*/
public Set<K> keys() {
return keys;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(getClass())
.add("sender", sender)
.add("keys", keys())
.toString();
}
@SuppressWarnings("unused")
private UpdateRequest() {
this.sender = null;
this.keys = null;
}
}
| apache-2.0 |
ghostflare76/scouter | scouter.client/src/scouter/client/actions/OpenRedisInfoViewAction.java | 2836 | /*
* Copyright 2015 LG CNS.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package scouter.client.actions;
import org.eclipse.core.runtime.IProgressMonitor;
import org.eclipse.core.runtime.IStatus;
import org.eclipse.core.runtime.Status;
import org.eclipse.core.runtime.jobs.Job;
import org.eclipse.jface.action.Action;
import org.eclipse.ui.IWorkbenchPage;
import org.eclipse.ui.IWorkbenchWindow;
import scouter.client.Images;
import scouter.client.model.TextProxy;
import scouter.client.net.TcpProxy;
import scouter.client.util.ExUtil;
import scouter.client.util.ImageUtil;
import scouter.client.views.WhiteBoardView;
import scouter.lang.pack.MapPack;
import scouter.lang.pack.Pack;
import scouter.net.RequestCmd;
public class OpenRedisInfoViewAction extends Action {
public final static String ID = OpenRedisInfoViewAction.class.getName();
private final IWorkbenchWindow window;
int serverId;
int objHash;
public OpenRedisInfoViewAction(IWorkbenchWindow window, int serverId, int objHash) {
this.window = window;
this.serverId = serverId;
this.objHash = objHash;
setText("Info");
}
public void run() {
new LoadRedisInfo().schedule();
}
class LoadRedisInfo extends Job {
public LoadRedisInfo() {
super("Load Redis Info");
}
protected IStatus run(IProgressMonitor monitor) {
TcpProxy tcp = TcpProxy.getTcpProxy(serverId);
try {
MapPack param = new MapPack();
param.put("objHash", objHash);
Pack p = tcp.getSingle(RequestCmd.REDIS_INFO, param);
if (p != null) {
MapPack m = (MapPack) p;
final String content = m.getText("info");
ExUtil.exec(window.getShell().getDisplay(), new Runnable() {
public void run() {
try {
WhiteBoardView view = (WhiteBoardView) window.getActivePage().showView(WhiteBoardView.ID, serverId + "&" + objHash, IWorkbenchPage.VIEW_ACTIVATE);
if (view != null) {
view.setInput("Info[" + TextProxy.object.getText(objHash) + "]", content);
}
} catch (Exception e) {
e.printStackTrace();
}
}
});
}
} catch (Exception e) {
e.printStackTrace();
} finally {
TcpProxy.putTcpProxy(tcp);
}
return Status.OK_STATUS;
}
}
}
| apache-2.0 |
NJUJYB/disYarn | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java | 6907 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.net.URISyntaxException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestInitializeSharedEdits {
private static final Log LOG = LogFactory.getLog(TestInitializeSharedEdits.class);
private static final Path TEST_PATH = new Path("/test");
private Configuration conf;
private MiniDFSCluster cluster;
@Before
public void setupCluster() throws IOException {
conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
HAUtil.setAllowStandbyReads(conf, true);
MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology();
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(topology)
.numDataNodes(0)
.build();
cluster.waitActive();
shutdownClusterAndRemoveSharedEditsDir();
}
@After
public void shutdownCluster() throws IOException {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
private void shutdownClusterAndRemoveSharedEditsDir() throws IOException {
cluster.shutdownNameNode(0);
cluster.shutdownNameNode(1);
File sharedEditsDir = new File(cluster.getSharedEditsDir(0, 1));
assertTrue(FileUtil.fullyDelete(sharedEditsDir));
}
private void assertCannotStartNameNodes() {
// Make sure we can't currently start either NN.
try {
cluster.restartNameNode(0, false);
fail("Should not have been able to start NN1 without shared dir");
} catch (IOException ioe) {
LOG.info("Got expected exception", ioe);
GenericTestUtils.assertExceptionContains(
"storage directory does not exist or is not accessible", ioe);
}
try {
cluster.restartNameNode(1, false);
fail("Should not have been able to start NN2 without shared dir");
} catch (IOException ioe) {
LOG.info("Got expected exception", ioe);
GenericTestUtils.assertExceptionContains(
"storage directory does not exist or is not accessible", ioe);
}
}
private void assertCanStartHaNameNodes(String pathSuffix)
throws ServiceFailedException, IOException, URISyntaxException,
InterruptedException {
// Now should be able to start both NNs. Pass "false" here so that we don't
// try to waitActive on all NNs, since the second NN doesn't exist yet.
cluster.restartNameNode(0, false);
cluster.restartNameNode(1, true);
// Make sure HA is working.
cluster.getNameNode(0).getRpcServer().transitionToActive(
new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
FileSystem fs = null;
try {
Path newPath = new Path(TEST_PATH, pathSuffix);
fs = HATestUtil.configureFailoverFs(cluster, conf);
assertTrue(fs.mkdirs(newPath));
HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
cluster.getNameNode(1));
assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
newPath.toString(), false).isDir());
} finally {
if (fs != null) {
fs.close();
}
}
}
@Test
public void testInitializeSharedEdits() throws Exception {
assertCannotStartNameNodes();
// Initialize the shared edits dir.
assertFalse(NameNode.initializeSharedEdits(cluster.getConfiguration(0)));
assertCanStartHaNameNodes("1");
// Now that we've done a metadata operation, make sure that deleting and
// re-initializing the shared edits dir will let the standby still start.
shutdownClusterAndRemoveSharedEditsDir();
assertCannotStartNameNodes();
// Re-initialize the shared edits dir.
assertFalse(NameNode.initializeSharedEdits(cluster.getConfiguration(0)));
// Should *still* be able to start both NNs
assertCanStartHaNameNodes("2");
}
@Test
public void testFailWhenNoSharedEditsSpecified() throws Exception {
Configuration confNoShared = new Configuration(conf);
confNoShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
assertFalse(NameNode.initializeSharedEdits(confNoShared, true));
}
@Test
public void testDontOverWriteExistingDir() throws IOException {
assertFalse(NameNode.initializeSharedEdits(conf, false));
assertTrue(NameNode.initializeSharedEdits(conf, false));
}
@Test
public void testInitializeSharedEditsConfiguresGenericConfKeys() throws IOException {
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
"ns1"), "nn1,nn2");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
"ns1", "nn1"), "localhost:1234");
assertNull(conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY));
NameNode.initializeSharedEdits(conf);
assertNotNull(conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY));
}
}
| apache-2.0 |
jarst/camel | components/camel-aws/src/test/java/org/apache/camel/component/aws/s3/S3ComponentListBucketsTest.java | 3022 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.aws.s3;
import java.util.List;
import com.amazonaws.services.s3.model.Bucket;
import org.apache.camel.EndpointInject;
import org.apache.camel.Exchange;
import org.apache.camel.ExchangePattern;
import org.apache.camel.ProducerTemplate;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.component.mock.MockEndpoint;
import org.apache.camel.impl.JndiRegistry;
import org.apache.camel.test.junit4.CamelTestSupport;
import org.junit.Test;
public class S3ComponentListBucketsTest extends CamelTestSupport {
@EndpointInject(uri = "direct:listBuckets")
private ProducerTemplate template;
@EndpointInject(uri = "mock:result")
private MockEndpoint result;
private AmazonS3ClientMock client;
@Test
public void sendIn() throws Exception {
result.expectedMessageCount(1);
template.sendBody("direct:listBuckets", ExchangePattern.InOnly, "");
assertMockEndpointsSatisfied();
assertResultExchange(result.getExchanges().get(0));
}
private void assertResultExchange(Exchange resultExchange) {
List<Bucket> list = resultExchange.getIn().getBody(List.class);
assertEquals(1, list.size());
assertEquals("camel", ((Bucket) list.get(0)).getOwner().getDisplayName());
assertEquals("camel-bucket", ((Bucket) list.get(0)).getName());
}
@Override
protected JndiRegistry createRegistry() throws Exception {
JndiRegistry registry = super.createRegistry();
client = new AmazonS3ClientMock();
registry.bind("amazonS3Client", client);
return registry;
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
String awsEndpoint = "aws-s3://mycamelbucket?amazonS3Client=#amazonS3Client®ion=us-west-1&operation=listBuckets";
from("direct:listBuckets")
.to(awsEndpoint)
.to("mock:result");
}
};
}
} | apache-2.0 |
simleo/openmicroscopy | components/insight/SRC/org/openmicroscopy/shoola/agents/metadata/ChannelDataSaver.java | 3574 | /*
*------------------------------------------------------------------------------
* Copyright (C) 2006-2014 University of Dundee & Open Microscopy Environment.
* All rights reserved.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*------------------------------------------------------------------------------
*/
package org.openmicroscopy.shoola.agents.metadata;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.collections.CollectionUtils;
import org.openmicroscopy.shoola.agents.events.metadata.ChannelSavedEvent;
import org.openmicroscopy.shoola.agents.metadata.editor.Editor;
import omero.gateway.SecurityContext;
import org.openmicroscopy.shoola.env.data.views.CallHandle;
import org.openmicroscopy.shoola.env.event.EventBus;
import omero.gateway.model.ChannelData;
import omero.gateway.model.DataObject;
/**
* Updates the channels for images related to the specified data object.
* This class calls one of the <code>saveChannelData</code> methods in the
* <code>DataManagerView</code>.
*
* @author Jean-Marie Burel
* <a href="mailto:j.burel@dundee.ac.uk">j.burel@dundee.ac.uk</a>
* @since 4.4
*/
public class ChannelDataSaver
extends EditorLoader
{
/** The id of the pixels set. */
private List<ChannelData> channels;
/** The id of the user. */
private DataObject parent;
/** Handle to the asynchronous call so that we can cancel it. */
private CallHandle handle;
/**
* Creates a new instance.
*
* @param viewer Reference to the viewer. Mustn't be <code>null</code>.
* @param ctx The security context.
* @param channels The channels to handle.
* @param parent The parent of the channels.
*/
public ChannelDataSaver(Editor viewer, SecurityContext ctx,
List<ChannelData> channels, DataObject parent)
{
super(viewer, ctx);
if (CollectionUtils.isEmpty(channels))
throw new IllegalArgumentException("No Channels specified.");
this.channels = channels;
this.parent = parent;
}
/**
* Saves the channels and updates the images linked to the specified
* object.
* @see EditorLoader#load()
*/
public void load()
{
List<DataObject> list = new ArrayList<DataObject>();
if (parent != null) list.add(parent);
handle = dmView.saveChannelData(ctx, channels, list, this);
}
/**
* Cancels the data loading.
* @see EditorLoader#cancel()
*/
public void cancel() { handle.cancel(); }
/**
* Feeds the result back to the viewer.
* @see EditorLoader#handleResult(Object)
*/
public void handleResult(Object result)
{
EventBus bus = MetadataViewerAgent.getRegistry().getEventBus();
bus.post(new ChannelSavedEvent(ctx, channels, (List<Long>) result));
}
}
| gpl-2.0 |
zhiqinghuang/core | src/org/apache/velocity/tools/generic/Alternator.java | 4332 | /*
* Copyright 2004 The Apache Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.velocity.tools.generic;
import java.util.List;
/**
* Utility class for easily alternating over values in a list.
*
* <p><b>Example usage:</b>
* <pre>
* java...
* String[] myColors = new String[]{"red", "blue"};
* context.put("color", new Alternator(myColors));
* String[] myStyles = new String[]{"hip", "fly", "groovy"};
* // demonstrate manual alternation with this one
* context.put("style", new Alternator(false, myStyles));
*
* template...
* #foreach( $foo in [1..5] )
* $foo is $color and $style.next
* #end
*
* output...
* 1 is red and hip
* 2 is blue and fly
* 3 is red and groovy
* 4 is blue and hip
* 5 is red and fly
* </pre></p>
*
* @since Velocity Tools 1.2
* @version $Revision: 72057 $ $Date: 2004-05-05 17:36:40 -0700 (Wed, 05 May 2004) $
*/
public class Alternator
{
private Object[] list;
private int index = 0;
private boolean auto = true;
/**
* Creates a new Alternator for the specified list. Alternation
* defaults to automatic.
*/
public Alternator(List list)
{
this(true, list);
}
/**
* Creates a new Alternator for the specified list. Alternation
* defaults to automatic.
*/
public Alternator(Object[] list)
{
this(true, list);
}
/**
* Creates a new Alternator for the specified list with the specified
* automatic shifting preference.
*
* @param auto See {@link #setAuto(boolean auto)}.
* @param list The (non-<code>null</code>) list of elements to
* alternate.
*/
public Alternator(boolean auto, List list)
{
this(auto, list.toArray(new Object[list.size()]));
}
/**
* Creates a new Alternator for the specified list with the specified
* automatic shifting preference.
*
* @param auto See {@link #setAuto(boolean auto)}.
* @param list The (non-<code>null</code>) list of elements to
* alternate.
*/
public Alternator(boolean auto, Object[] list)
{
this.auto = auto;
this.list = list;
}
/**
* @return Whether this Alternator shifts the list index
* automatically after a call to {@link #toString()}.
*/
public boolean isAuto()
{
return auto;
}
/**
* If set to true, the list index will shift automatically after a
* call to toString().
*/
public void setAuto(boolean auto)
{
this.auto = auto;
}
/**
* Manually shifts the list index. If it reaches the end of the list,
* it will start over again at zero.
*/
public void shift()
{
index = (index + 1) % list.length;
}
/**
* Returns the current item without shifting the list index.
*/
public Object getCurrent()
{
return list[index];
}
/**
* Returns the current item, then shifts the list index.
*/
public Object getNext()
{
Object o = getCurrent();
shift();
return o;
}
/**
* Returns a string representation of the current item or
* <code>null</code> if the current item is null. <b>If {@link
* #auto} is true, this will shift after returning the current
* item</b>.
*/
public String toString()
{
Object o = list[index];
if (auto)
{
shift();
}
if (o == null)
{
return null;
}
return o.toString();
}
}
| gpl-3.0 |
MitchellBot/openmrs-core | api/src/main/java/org/openmrs/ConceptName.java | 15366 | /**
* This Source Code Form is subject to the terms of the Mozilla Public License,
* v. 2.0. If a copy of the MPL was not distributed with this file, You can
* obtain one at http://mozilla.org/MPL/2.0/. OpenMRS is also distributed under
* the terms of the Healthcare Disclaimer located at http://openmrs.org/license.
*
* Copyright (C) OpenMRS Inc. OpenMRS is a registered trademark and the OpenMRS
* graphic logo is a trademark of OpenMRS Inc.
*/
package org.openmrs;
import java.util.Collection;
import java.util.Date;
import java.util.HashSet;
import java.util.Locale;
import org.apache.commons.lang.StringUtils;
import org.apache.lucene.analysis.core.LowerCaseFilterFactory;
import org.apache.lucene.analysis.standard.StandardFilterFactory;
import org.apache.lucene.analysis.standard.StandardTokenizerFactory;
import org.codehaus.jackson.annotate.JsonIgnore;
import org.hibernate.search.annotations.Analyze;
import org.hibernate.search.annotations.Analyzer;
import org.hibernate.search.annotations.AnalyzerDef;
import org.hibernate.search.annotations.DocumentId;
import org.hibernate.search.annotations.Field;
import org.hibernate.search.annotations.FieldBridge;
import org.hibernate.search.annotations.Indexed;
import org.hibernate.search.annotations.IndexedEmbedded;
import org.hibernate.search.annotations.TokenFilterDef;
import org.hibernate.search.annotations.TokenizerDef;
import org.openmrs.api.ConceptNameType;
import org.openmrs.api.db.hibernate.search.bridge.LocaleFieldBridge;
import org.openmrs.util.OpenmrsUtil;
/**
* ConceptName is the real world term used to express a Concept within the idiom of a particular
* locale.
*/
@Indexed
@AnalyzerDef(name = "ConceptNameAnalyzer", tokenizer = @TokenizerDef(factory = StandardTokenizerFactory.class), filters = {
@TokenFilterDef(factory = StandardFilterFactory.class), @TokenFilterDef(factory = LowerCaseFilterFactory.class) })
@Analyzer(definition = "ConceptNameAnalyzer")
public class ConceptName extends BaseOpenmrsObject implements Auditable, Voidable, java.io.Serializable {
public static final long serialVersionUID = 2L;
@DocumentId
private Integer conceptNameId;
@IndexedEmbedded(includeEmbeddedObjectId = true)
private Concept concept;
@Field
private String name;
@Field(analyze = Analyze.NO)
@FieldBridge(impl = LocaleFieldBridge.class)
private Locale locale; // ABK: upgraded from a plain string to a full locale object
private User creator;
private Date dateCreated;
@Field
private Boolean voided = false;
private User voidedBy;
private Date dateVoided;
private String voidReason;
private Collection<ConceptNameTag> tags;
@Field
private ConceptNameType conceptNameType;
@Field
private Boolean localePreferred = false;
// Constructors
/** default constructor */
public ConceptName() {
}
/**
* Convenience constructor to create a ConceptName object by primary key
*
* @param conceptNameId
*/
public ConceptName(Integer conceptNameId) {
this.conceptNameId = conceptNameId;
}
public ConceptName(String name, Locale locale) {
setName(name);
setLocale(locale);
}
/**
* @return Returns the conceptId.
*/
public Integer getConceptNameId() {
return conceptNameId;
}
/**
* @param conceptNameId The conceptId to set.
*/
public void setConceptNameId(Integer conceptNameId) {
this.conceptNameId = conceptNameId;
}
public Concept getConcept() {
return concept;
}
public void setConcept(Concept concept) {
this.concept = concept;
}
public String getName() {
return name;
}
public void setName(String name) {
if (name != null && StringUtils.isBlank(name) && StringUtils.isNotBlank(this.name)
&& this.getConceptNameType().equals(ConceptNameType.SHORT)) {
this.setVoided(true);
} else {
this.name = name;
}
}
public Locale getLocale() {
return locale;
}
public void setLocale(Locale locale) {
this.locale = locale;
}
/**
* @return Returns the creator.
*/
public User getCreator() {
return creator;
}
/**
* @param creator The creator to set.
*/
public void setCreator(User creator) {
this.creator = creator;
}
/**
* @return Returns the dateCreated.
*/
public Date getDateCreated() {
return dateCreated;
}
/**
* @param dateCreated The dateCreated to set.
*/
public void setDateCreated(Date dateCreated) {
this.dateCreated = dateCreated;
}
/**
* Returns whether the ConceptName has been voided.
*
* @return true if the ConceptName has been voided, false otherwise.
*
* @deprecated as of 2.0, use {@link #getVoided()}
*/
@Deprecated
@JsonIgnore
public Boolean isVoided() {
return getVoided();
}
/**
* Returns whether the ConceptName has been voided.
*
* @return true if the ConceptName has been voided, false otherwise.
*/
public Boolean getVoided() {
return voided;
}
/**
* Sets the voided status of this ConceptName.
*
* @param voided the voided status to set.
*/
public void setVoided(Boolean voided) {
this.voided = voided;
}
/**
* Returns the User who voided this ConceptName.
*
* @return the User who voided this ConceptName, or null if not set
*/
public User getVoidedBy() {
return voidedBy;
}
/**
* Sets the User who voided this ConceptName.
*
* @param voidedBy the user who voided this ConceptName.
*/
public void setVoidedBy(User voidedBy) {
this.voidedBy = voidedBy;
}
/**
* Returns the Date this ConceptName was voided.
*
* @return the Date this ConceptName was voided.
*/
public Date getDateVoided() {
return dateVoided;
}
/**
* Sets the Data this ConceptName was voided.
*
* @param dateVoided the date the ConceptName was voided.
*/
public void setDateVoided(Date dateVoided) {
this.dateVoided = dateVoided;
}
/**
* Returns the reason this ConceptName was voided.
*
* @return the reason this ConceptName was voided
*/
public String getVoidReason() {
return voidReason;
}
/**
* Sets the reason this ConceptName was voided.
*
* @param voidReason the reason this ConceptName was voided
*/
public void setVoidReason(String voidReason) {
this.voidReason = voidReason;
}
/**
* Returns the tags which have been attached to this ConceptName.
*
* @return the tags.
*/
public Collection<ConceptNameTag> getTags() {
return tags;
}
/**
* Set the tags which are attached to this ConceptName.
*
* @see Concept#setPreferredName(ConceptName)
* @see Concept#setFullySpecifiedName(ConceptName)
* @see Concept#setShortName(ConceptName)
* @param tags the tags to set.
*/
public void setTags(Collection<ConceptNameTag> tags) {
this.tags = tags;
}
/**
* @return the conceptNameType
*/
public ConceptNameType getConceptNameType() {
return this.conceptNameType;
}
/**
* @param conceptNameType the conceptNameType to set
*/
public void setConceptNameType(ConceptNameType conceptNameType) {
this.conceptNameType = conceptNameType;
}
/**
* Getter for localePreferred
*
* @return localPreferred
*
* @deprecated as of 2.0, use {@link #getLocalePreferred()}
*/
@Deprecated
@JsonIgnore
public Boolean isLocalePreferred() {
return getLocalePreferred();
}
/**
* Getter to be used by spring, developers should use {@link #isLocalePreferred()}
*
* @return true if it is the localePreferred name otherwise false
*/
public Boolean getLocalePreferred() {
return localePreferred;
}
/**
* @param localePreferred the localePreferred to set
*/
public void setLocalePreferred(Boolean localePreferred) {
this.localePreferred = localePreferred;
}
/**
* Adds a tag to the concept name. If the tag is new (has no existing occurrences) a new
* ConceptNameTag will be created with a blank description.
*
* @see Concept#setPreferredName(ConceptName)
* @see Concept#setFullySpecifiedName(ConceptName)
* @see Concept#setShortName(ConceptName)
* @param tag human-readable text string for the tag
*/
public void addTag(String tag) {
addTag(tag, "");
}
/**
* Adds a tag to the concept name. If the tag is new (has no existing occurrences) a new
* ConceptNameTag will be created with the given description.
*
* @see Concept#setPreferredName(ConceptName)
* @see Concept#setFullySpecifiedName(ConceptName)
* @see Concept#setShortName(ConceptName)
* @param tag human-readable text string for the tag
* @param description description of the tag's purpose
*/
public void addTag(String tag, String description) {
ConceptNameTag nameTag = new ConceptNameTag(tag, description);
addTag(nameTag);
}
/**
* Attaches a tag to the concept name.
*
* @see Concept#setPreferredName(ConceptName)
* @see Concept#setFullySpecifiedName(ConceptName)
* @see Concept#setShortName(ConceptName)
* @param tag the tag to add
*/
public void addTag(ConceptNameTag tag) {
if (tags == null) {
tags = new HashSet<ConceptNameTag>();
}
if (!tags.contains(tag)) {
tags.add(tag);
}
}
/**
* Removes a tag from the concept name.
*
* @see Concept#setPreferredName(ConceptName)
* @see Concept#setFullySpecifiedName(ConceptName)
* @see Concept#setShortName(ConceptName)
* @param tag the tag to remove
*/
public void removeTag(ConceptNameTag tag) {
if (tags.contains(tag)) {
tags.remove(tag);
}
}
/**
* Checks whether the name has a particular tag.
*
* @see #isPreferred()
* @see #isFullySpecifiedName()
* @see #isIndexTerm()
* @see #isSynonym()
* @see #isShort()
* @param tagToFind the tag for which to check
* @return true if the tags include the specified tag, false otherwise
*/
public Boolean hasTag(ConceptNameTag tagToFind) {
return hasTag(tagToFind.getTag());
}
/**
* Checks whether the name has a particular tag.
*
* @see #isPreferred()
* @see #isFullySpecifiedName()
* @see #isIndexTerm()
* @see #isSynonym()
* @see #isShort()
* @param tagToFind the string of the tag for which to check
* @return true if the tags include the specified tag, false otherwise
*/
public Boolean hasTag(String tagToFind) {
boolean foundTag = false;
if (tags != null) {
for (ConceptNameTag nameTag : getTags()) {
if (nameTag.getTag().equals(tagToFind)) {
foundTag = true;
break;
}
}
}
return foundTag;
}
/**
* Checks whether the name is explicitly marked as preferred in a locale with a matching
* language. E.g 'en_US' and 'en_UK' for language en
*
* @see #isPreferredForLocale(Locale)
* @param language ISO 639 2-letter code for a language
* @return true if the name is preferred in a locale with a matching language code, otherwise
* false
*/
public Boolean isPreferredInLanguage(String language) {
if (!StringUtils.isBlank(language) && this.locale != null && isPreferred()
&& this.locale.getLanguage().equals(language)) {
return true;
}
return false;
}
/**
* Checks whether the name is explicitly marked as preferred in a locale with a matching country
* code E.g 'fr_RW' and 'en_RW' for country RW
*
* @see #isPreferredForLocale(Locale)
* @param country ISO 3166 2-letter code for a country
* @return true if the name is preferred in a locale with a matching country code, otherwise
* false
*/
public Boolean isPreferredInCountry(String country) {
if (!StringUtils.isBlank(country) && this.locale != null && isPreferred()
&& this.locale.getCountry().equals(country)) {
return true;
}
return false;
}
/**
* Checks whether the name is explicitly marked as preferred for any locale. Note that this
* method is different from {@link #isPreferredForLocale(Locale)} in that it checks if the given
* name is marked as preferred irrespective of the locale in which it is preferred.
*
* @see #isPreferredForLocale(Locale)
*/
public Boolean isPreferred() {
return isLocalePreferred();
}
/**
* Checks whether the name is explicitly marked as preferred for the given locale
*
* @param locale the locale in which the name is preferred
* @return true if the name is marked as preferred for the given locale otherwise false.
*/
public Boolean isPreferredForLocale(Locale locale) {
return isLocalePreferred() && this.locale.equals(locale);
}
/**
* Checks whether the concept name is explicitly marked as fully specified
*
* @return true if the name is marked as 'fully specified' otherwise false
* @since Version 1.7
*/
public Boolean isFullySpecifiedName() {
return OpenmrsUtil.nullSafeEquals(getConceptNameType(), ConceptNameType.FULLY_SPECIFIED);
}
/**
* Convenience method for determining whether this is a short name.
*
* @return true if the name is marked as a short name, otherwise false
*/
public Boolean isShort() {
return OpenmrsUtil.nullSafeEquals(getConceptNameType(), ConceptNameType.SHORT);
}
/**
* Convenience method for checking whether this is an index Term.
*
* @return true if the name is marked as an index term, otherwise false
* @since Version 1.7
*/
public Boolean isIndexTerm() {
return OpenmrsUtil.nullSafeEquals(getConceptNameType(), ConceptNameType.INDEX_TERM);
}
/**
* Convenience method for determining whether this is an index Term for a given locale.
*
* @param locale The locale in which this concept name should belong as an index term
* @return true if the name is marked as an index term, otherwise false
*/
public Boolean isIndexTermInLocale(Locale locale) {
return getConceptNameType() != null && getConceptNameType().equals(ConceptNameType.INDEX_TERM)
&& locale.equals(getLocale());
}
/**
* Convenience method for determining whether this is a synonym in a given locale.
*
* @param locale The locale in which this synonym should belong
* @return true if the concept name is marked as a synonym in the given locale, otherwise false
*/
public Boolean isSynonymInLocale(Locale locale) {
return getConceptNameType() == null && locale.equals(getLocale());
}
/**
* Convenience method for checking whether this is a a synonym.
*
* @return true if the name is tagged as a synonym, false otherwise
* @since Version 1.7
*/
public Boolean isSynonym() {
return getConceptNameType() == null;
}
/**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
if (this.name == null) {
return "ConceptNameId: " + this.conceptNameId;
}
return this.name;
}
/**
* @since 1.5
* @see org.openmrs.OpenmrsObject#getId()
*/
public Integer getId() {
return getConceptNameId();
}
/**
* @since 1.5
* @see org.openmrs.OpenmrsObject#setId(java.lang.Integer)
*/
public void setId(Integer id) {
setConceptNameId(id);
}
/**
* Not currently used. Always returns null.
*
* @see org.openmrs.Auditable#getChangedBy()
*/
public User getChangedBy() {
return null;
}
/**
* Not currently used. Always returns null.
*
* @see org.openmrs.Auditable#getDateChanged()
*/
public Date getDateChanged() {
return null;
}
/**
* Not currently used.
*
* @see org.openmrs.Auditable#setChangedBy(org.openmrs.User)
*/
public void setChangedBy(User changedBy) {
}
/**
* Not currently used.
*
* @see org.openmrs.Auditable#setDateChanged(java.util.Date)
*/
public void setDateChanged(Date dateChanged) {
}
}
| mpl-2.0 |
nikhilvibhav/camel | components/camel-tracing/src/test/java/org/apache/camel/tracing/decorators/IronmqSpanDecoratorTest.java | 1592 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.tracing.decorators;
import org.apache.camel.Exchange;
import org.apache.camel.Message;
import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class IronmqSpanDecoratorTest {
@Test
public void testGetMessageId() {
String messageId = "abcd";
Exchange exchange = Mockito.mock(Exchange.class);
Message message = Mockito.mock(Message.class);
Mockito.when(exchange.getIn()).thenReturn(message);
Mockito.when(message.getHeader(IronmqSpanDecorator.CAMEL_IRON_MQ_MESSAGE_ID)).thenReturn(messageId);
IronmqSpanDecorator decorator = new IronmqSpanDecorator();
assertEquals(messageId, decorator.getMessageId(exchange));
}
}
| apache-2.0 |
siosio/intellij-community | platform/external-system-api/src/com/intellij/openapi/externalSystem/model/project/ModuleDependencyData.java | 1944 | // Copyright 2000-2019 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.openapi.externalSystem.model.project;
import com.intellij.serialization.PropertyMapping;
import org.jetbrains.annotations.NotNull;
import java.util.Collection;
public final class ModuleDependencyData extends AbstractDependencyData<ModuleData> {
private boolean productionOnTestDependency;
private Collection<String> moduleDependencyArtifacts;
@PropertyMapping({"ownerModule", "target"})
public ModuleDependencyData(@NotNull ModuleData ownerModule, @NotNull ModuleData module) {
super(ownerModule, module);
}
public boolean isProductionOnTestDependency() {
return productionOnTestDependency;
}
public void setProductionOnTestDependency(boolean productionOnTestDependency) {
this.productionOnTestDependency = productionOnTestDependency;
}
public Collection<String> getModuleDependencyArtifacts() {
return moduleDependencyArtifacts;
}
public void setModuleDependencyArtifacts(Collection<String> moduleDependencyArtifacts) {
this.moduleDependencyArtifacts = moduleDependencyArtifacts;
}
@Override
public boolean equals(Object o) {
if (!super.equals(o)) return false;
ModuleDependencyData that = (ModuleDependencyData)o;
if (productionOnTestDependency != that.productionOnTestDependency) return false;
if (moduleDependencyArtifacts != null ? !moduleDependencyArtifacts.equals(that.moduleDependencyArtifacts)
: that.moduleDependencyArtifacts != null) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + (productionOnTestDependency ? 1 : 0);
result = 31 * result + (moduleDependencyArtifacts != null ? moduleDependencyArtifacts.hashCode() : 0);
return result;
}
}
| apache-2.0 |
hequn8128/flink | flink-runtime/src/test/java/org/apache/flink/runtime/state/ttl/mock/MockKeyedStateBackendBuilder.java | 3188 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.state.ttl.mock;
import org.apache.flink.api.common.ExecutionConfig;
import org.apache.flink.api.common.typeutils.TypeSerializer;
import org.apache.flink.core.fs.CloseableRegistry;
import org.apache.flink.runtime.query.TaskKvStateRegistry;
import org.apache.flink.runtime.state.AbstractKeyedStateBackendBuilder;
import org.apache.flink.runtime.state.KeyGroupRange;
import org.apache.flink.runtime.state.KeyedStateHandle;
import org.apache.flink.runtime.state.StateSnapshotTransformer;
import org.apache.flink.runtime.state.StreamCompressionDecorator;
import org.apache.flink.runtime.state.heap.InternalKeyContextImpl;
import org.apache.flink.runtime.state.ttl.TtlTimeProvider;
import javax.annotation.Nonnull;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
/**
* Builder class for {@link MockKeyedStateBackend}.
*
* @param <K> The data type that the key serializer serializes.
*/
public class MockKeyedStateBackendBuilder<K> extends AbstractKeyedStateBackendBuilder<K> {
public MockKeyedStateBackendBuilder(
TaskKvStateRegistry kvStateRegistry,
TypeSerializer<K> keySerializer,
ClassLoader userCodeClassLoader,
int numberOfKeyGroups,
KeyGroupRange keyGroupRange,
ExecutionConfig executionConfig,
TtlTimeProvider ttlTimeProvider,
@Nonnull Collection<KeyedStateHandle> stateHandles,
StreamCompressionDecorator keyGroupCompressionDecorator,
CloseableRegistry cancelStreamRegistry) {
super(
kvStateRegistry,
keySerializer,
userCodeClassLoader,
numberOfKeyGroups,
keyGroupRange,
executionConfig,
ttlTimeProvider,
stateHandles,
keyGroupCompressionDecorator,
cancelStreamRegistry);
}
@Override
public MockKeyedStateBackend<K> build() {
Map<String, Map<K, Map<Object, Object>>> stateValues = new HashMap<>();
Map<String, StateSnapshotTransformer<Object>> stateSnapshotFilters = new HashMap<>();
MockRestoreOperation<K> restoreOperation = new MockRestoreOperation<>(restoreStateHandles, stateValues);
restoreOperation.restore();
return new MockKeyedStateBackend<>(
kvStateRegistry,
keySerializerProvider.currentSchemaSerializer(),
userCodeClassLoader,
executionConfig,
ttlTimeProvider,
stateValues,
stateSnapshotFilters,
cancelStreamRegistry,
new InternalKeyContextImpl<>(
keyGroupRange,
numberOfKeyGroups
));
}
}
| apache-2.0 |
siosio/intellij-community | plugins/ui-designer-core/src/com/intellij/designer/designSurface/InplaceEditingLayer.java | 9040 | // Copyright 2000-2018 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.designer.designSurface;
import com.intellij.designer.DesignerBundle;
import com.intellij.designer.designSurface.feedbacks.LineMarginBorder;
import com.intellij.designer.model.Property;
import com.intellij.designer.model.RadComponent;
import com.intellij.designer.propertyTable.InplaceContext;
import com.intellij.designer.propertyTable.PropertyEditor;
import com.intellij.designer.propertyTable.PropertyEditorListener;
import com.intellij.openapi.actionSystem.AnAction;
import com.intellij.openapi.actionSystem.AnActionEvent;
import com.intellij.openapi.actionSystem.CommonShortcuts;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.application.ModalityState;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.util.Comparing;
import com.intellij.openapi.wm.FocusWatcher;
import com.intellij.openapi.wm.ex.IdeFocusTraversalPolicy;
import com.intellij.uiDesigner.core.GridConstraints;
import com.intellij.uiDesigner.core.GridLayoutManager;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import java.awt.*;
import java.awt.event.FocusEvent;
import java.awt.event.MouseEvent;
import java.util.ArrayList;
import java.util.List;
/**
* @author Alexander Lobas
* @author Anton Katilin
* @author Vladimir Kondratyev
*/
public class InplaceEditingLayer extends JComponent {
private static final Logger LOG = Logger.getInstance(InplaceEditingLayer.class);
private final FocusWatcher myFocusWatcher = new FocusWatcher() {
@Override
protected void focusLostImpl(FocusEvent e) {
Component opposite = e.getOppositeComponent();
if (e.isTemporary() || opposite != null && SwingUtilities.isDescendingFrom(opposite, getTopComponent())) {
// Do nothing if focus moves inside top component hierarchy
return;
}
// [vova] we need LaterInvocator here to prevent write-access assertions
ApplicationManager.getApplication().invokeLater(() -> finishEditing(true), ModalityState.NON_MODAL);
}
};
private final ComponentSelectionListener mySelectionListener = new ComponentSelectionListener() {
@Override
public void selectionChanged(EditableArea area) {
finishEditing(true);
}
};
private final PropertyEditorListener myEditorListener = new PropertyEditorListener() {
@Override
public void valueCommitted(@NotNull PropertyEditor source, boolean continueEditing, boolean closeEditorOnError) {
finishEditing(true);
}
@Override
public void editingCanceled(@NotNull PropertyEditor source) {
finishEditing(false);
}
@Override
public void preferredSizeChanged(@NotNull PropertyEditor source) {
adjustInplaceComponentSize();
}
};
private RadComponent myRadComponent;
private List<Property> myProperties;
private List<PropertyEditor> myEditors;
private final DesignerEditorPanel myDesigner;
private JComponent myInplaceComponent;
private int myPreferredWidth;
public InplaceEditingLayer(DesignerEditorPanel designer) {
myDesigner = designer;
}
public void startEditing(@Nullable InplaceContext inplaceContext) {
try {
List<RadComponent> selection = myDesigner.getSurfaceArea().getSelection();
if (selection.size() != 1) {
return;
}
myRadComponent = selection.get(0);
myProperties = myRadComponent.getInplaceProperties();
if (myProperties.isEmpty()) {
myRadComponent = null;
myProperties = null;
return;
}
myInplaceComponent = new JPanel(new GridLayoutManager(myProperties.size(), 2));
myInplaceComponent.setBorder(new LineMarginBorder(5, 5, 5, 5));
new AnAction() {
@Override
public void actionPerformed(@NotNull AnActionEvent e) {
finishEditing(false);
}
}.registerCustomShortcutSet(CommonShortcuts.ESCAPE, myInplaceComponent);
myEditors = new ArrayList<>();
JComponent componentToFocus = null;
Font font = null;
if (inplaceContext == null) {
inplaceContext = new InplaceContext();
}
int row = 0;
for (Property property : myProperties) {
JLabel label = new JLabel(property.getName() + ":");
if (font == null) {
font = label.getFont().deriveFont(Font.BOLD);
}
label.setFont(font);
myInplaceComponent.add(label,
new GridConstraints(row, 0, 1, 1, GridConstraints.ANCHOR_WEST, GridConstraints.FILL_NONE, 0, 0, null, null,
null));
PropertyEditor editor = property.getEditor();
myEditors.add(editor);
JComponent component = editor.getComponent(myRadComponent, myDesigner, property.getValue(myRadComponent), inplaceContext);
myInplaceComponent.add(component,
new GridConstraints(row++, 1, 1, 1, GridConstraints.ANCHOR_CENTER, GridConstraints.FILL_HORIZONTAL,
GridConstraints.SIZEPOLICY_CAN_GROW, 0, null, null, null));
if (componentToFocus == null) {
componentToFocus = editor.getPreferredFocusedComponent();
}
}
for (PropertyEditor editor : myEditors) {
editor.addPropertyEditorListener(myEditorListener);
}
Rectangle bounds = myRadComponent.getBounds(this);
Dimension size = myInplaceComponent.getPreferredSize();
myPreferredWidth = Math.max(size.width, bounds.width);
myInplaceComponent.setBounds(bounds.x, bounds.y, myPreferredWidth, size.height);
add(myInplaceComponent);
myDesigner.getSurfaceArea().addSelectionListener(mySelectionListener);
if (componentToFocus == null) {
componentToFocus = IdeFocusTraversalPolicy.getPreferredFocusedComponent(myInplaceComponent);
}
if (componentToFocus == null) {
componentToFocus = myInplaceComponent;
}
if (componentToFocus.requestFocusInWindow()) {
myFocusWatcher.install(myInplaceComponent);
}
else {
grabFocus();
final JComponent finalComponentToFocus = componentToFocus;
ApplicationManager.getApplication().invokeLater(() -> {
finalComponentToFocus.requestFocusInWindow();
myFocusWatcher.install(myInplaceComponent);
});
}
enableEvents(AWTEvent.MOUSE_EVENT_MASK);
repaint();
}
catch (Throwable e) {
LOG.error(e);
}
}
private void finishEditing(boolean commit) {
myDesigner.getSurfaceArea().removeSelectionListener(mySelectionListener);
if (myInplaceComponent != null) {
if (commit) {
myDesigner.getToolProvider().execute(() -> {
int size = myProperties.size();
for (int i = 0; i < size; i++) {
Property property = myProperties.get(i);
Object oldValue = property.getValue(myRadComponent);
Object newValue = myEditors.get(i).getValue();
if (!Comparing.equal(oldValue, newValue)) {
property.setValue(myRadComponent, newValue);
}
}
}, DesignerBundle.message("command.set.property.value"), true);
}
for (PropertyEditor editor : myEditors) {
editor.removePropertyEditorListener(myEditorListener);
}
removeInplaceComponent();
myFocusWatcher.deinstall(myInplaceComponent);
myInplaceComponent = null;
}
myRadComponent = null;
myProperties = null;
myEditors = null;
myDesigner.getPreferredFocusedComponent().requestFocusInWindow();
disableEvents(AWTEvent.MOUSE_EVENT_MASK);
repaint();
}
private void adjustInplaceComponentSize() {
myInplaceComponent.revalidate();
Dimension size = myInplaceComponent.getPreferredSize();
myInplaceComponent.setSize(Math.max(size.width, myPreferredWidth), myInplaceComponent.getHeight());
myInplaceComponent.revalidate();
repaint();
}
private void removeInplaceComponent() {
remove(myInplaceComponent);
}
/**
* When there is an inplace editor we "listen" all mouse event
* and finish editing by any MOUSE_PRESSED or MOUSE_RELEASED event.
* We are acting like yet another glass pane over the standard glass layer.
*/
@Override
protected void processMouseEvent(MouseEvent e) {
if (myInplaceComponent != null && (MouseEvent.MOUSE_PRESSED == e.getID() || MouseEvent.MOUSE_RELEASED == e.getID())) {
finishEditing(true);
}
// [vova] this is very important! Without this code Swing doen't close popup menu on our
// layered pane. Swing adds MouseListeners to all component to close popup. If we do not
// invoke super then we lock all mouse listeners.
super.processMouseEvent(e);
}
public boolean isEditing() {
return myInplaceComponent != null;
}
} | apache-2.0 |
mcgilman/nifi | nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-web/nifi-web-api/src/main/java/org/apache/nifi/web/search/attributematchers/BasicMatcher.java | 1659 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.web.search.attributematchers;
import org.apache.nifi.connectable.Connectable;
import org.apache.nifi.web.search.query.SearchQuery;
import java.util.List;
import static org.apache.nifi.web.search.attributematchers.AttributeMatcher.addIfMatching;
public class BasicMatcher<T extends Connectable> implements AttributeMatcher<T> {
private static final String LABEL_ID = "Id";
private static final String LABEL_VERSION_CONTROL_ID = "Version Control ID";
@Override
public void match(final T component, final SearchQuery query, final List<String> matches) {
final String searchTerm = query.getTerm();
addIfMatching(searchTerm, component.getIdentifier(), LABEL_ID, matches);
addIfMatching(searchTerm, component.getVersionedComponentId().orElse(null), LABEL_VERSION_CONTROL_ID, matches);
}
}
| apache-2.0 |
jwren/intellij-community | java/java-impl/src/com/intellij/codeInsight/editorActions/smartEnter/MissingIfBranchesFixer.java | 2337 | // Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.codeInsight.editorActions.smartEnter;
import com.intellij.openapi.editor.Document;
import com.intellij.openapi.editor.Editor;
import com.intellij.psi.*;
import com.intellij.util.IncorrectOperationException;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
public class MissingIfBranchesFixer implements Fixer {
@Override
public void apply(Editor editor, JavaSmartEnterProcessor processor, PsiElement psiElement) throws IncorrectOperationException {
if (!(psiElement instanceof PsiIfStatement)) return;
PsiIfStatement ifStatement = (PsiIfStatement) psiElement;
final Document doc = editor.getDocument();
final PsiKeyword elseElement = ifStatement.getElseElement();
if (elseElement != null) {
handleBranch(doc, ifStatement, elseElement, ifStatement.getElseBranch());
}
PsiJavaToken rParenth = ifStatement.getRParenth();
assert rParenth != null;
handleBranch(doc, ifStatement, rParenth, ifStatement.getThenBranch());
}
private static void handleBranch(@NotNull Document doc, @NotNull PsiIfStatement ifStatement, @NotNull PsiElement beforeBranch, @Nullable PsiStatement branch) {
if (branch instanceof PsiBlockStatement || beforeBranch.textMatches(PsiKeyword.ELSE) && branch instanceof PsiIfStatement) return;
boolean transformingOneLiner = branch != null && (startLine(doc, beforeBranch) == startLine(doc, branch) ||
startCol(doc, ifStatement) < startCol(doc, branch));
if (!transformingOneLiner) {
doc.insertString(beforeBranch.getTextRange().getEndOffset(), "{}");
}
else {
doc.insertString(beforeBranch.getTextRange().getEndOffset(), "{");
doc.insertString(branch.getTextRange().getEndOffset() + 1, "}");
}
}
private static int startLine(Document doc, @NotNull PsiElement psiElement) {
return doc.getLineNumber(psiElement.getTextRange().getStartOffset());
}
private static int startCol(Document doc, @NotNull PsiElement psiElement) {
int offset = psiElement.getTextRange().getStartOffset();
return offset - doc.getLineStartOffset(doc.getLineNumber(offset));
}
}
| apache-2.0 |
NSAmelchev/ignite | modules/core/src/main/java/org/apache/ignite/internal/processors/cache/version/GridCacheVersionedEntry.java | 2074 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.cache.version;
import org.apache.ignite.internal.processors.cache.CacheObjectValueContext;
import org.jetbrains.annotations.Nullable;
/**
* Cache entry along with version information.
*/
public interface GridCacheVersionedEntry<K, V> {
/**
* Gets entry's key.
*
* @return Entry's key.
*/
public K key();
/**
* Gets entry's value.
*
* @param ctx Object value context.
* @return Entry's value.
*/
@Nullable public V value(CacheObjectValueContext ctx);
/**
* Gets entry's TTL.
*
* @return Entry's TTL.
*/
public long ttl();
/**
* Gets entry's expire time.
*
* @return Entry's expire time.
*/
public long expireTime();
/**
* Gets ID of initiator data center.
*
* @return ID of initiator data center.
*/
public byte dataCenterId();
/**
* Gets entry's topology version in initiator data center.
*
* @return Entry's topology version in initiator data center.
*/
public int topologyVersion();
/**
* Gets entry's order in initiator data center.
*
* @return Entry's order in initiator data center
*/
public long order();
}
| apache-2.0 |
siyuanh/apex-malhar | contrib/src/test/java/org/apache/apex/malhar/contrib/misc/algo/FilterKeysMapTest.java | 3776 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.apex.malhar.contrib.misc.algo;
import java.util.HashMap;
import java.util.Map;
import org.junit.Assert;
import org.junit.Test;
import com.datatorrent.lib.testbench.CollectorTestSink;
/**
* @deprecated
* Functional tests for {@link FilterKeysMap}<p>
*
*/
@Deprecated
public class FilterKeysMapTest
{
@SuppressWarnings("unchecked")
int getTotal(Object o)
{
HashMap<String, Number> map = (HashMap<String, Number>)o;
int ret = 0;
for (Map.Entry<String, Number> e: map.entrySet()) {
ret += e.getValue().intValue();
}
return ret;
}
/**
* Test node logic emits correct results
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testNodeProcessing() throws Exception
{
FilterKeysMap<String,Number> oper = new FilterKeysMap<String,Number>();
CollectorTestSink sortSink = new CollectorTestSink();
oper.filter.setSink(sortSink);
oper.setKey("b");
oper.clearKeys();
String[] keys = new String[3];
keys[0] = "e";
keys[1] = "f";
keys[2] = "blah";
oper.setKey("a");
oper.setKeys(keys);
oper.beginWindow(0);
HashMap<String, Number> input = new HashMap<String, Number>();
input.put("a", 2);
input.put("b", 5);
input.put("c", 7);
input.put("d", 42);
input.put("e", 200);
input.put("f", 2);
oper.data.process(input);
Assert.assertEquals("number emitted tuples", 1, sortSink.collectedTuples.size());
Assert.assertEquals("Total filtered value is ", 204, getTotal(sortSink.collectedTuples.get(0)));
sortSink.clear();
input.clear();
input.put("a", 5);
oper.data.process(input);
Assert.assertEquals("number emitted tuples", 1, sortSink.collectedTuples.size());
Assert.assertEquals("Total filtered value is ", 5, getTotal(sortSink.collectedTuples.get(0)));
sortSink.clear();
input.clear();
input.put("a", 2);
input.put("b", 33);
input.put("f", 2);
oper.data.process(input);
Assert.assertEquals("number emitted tuples", 1, sortSink.collectedTuples.size());
Assert.assertEquals("Total filtered value is ", 4, getTotal(sortSink.collectedTuples.get(0)));
sortSink.clear();
input.clear();
input.put("b", 6);
input.put("a", 2);
input.put("j", 6);
input.put("e", 2);
input.put("dd", 6);
input.put("blah", 2);
input.put("another", 6);
input.put("notmakingit", 2);
oper.data.process(input);
Assert.assertEquals("number emitted tuples", 1, sortSink.collectedTuples.size());
Assert.assertEquals("Total filtered value is ", 6, getTotal(sortSink.collectedTuples.get(0)));
sortSink.clear();
input.clear();
input.put("c", 9);
oper.setInverse(true);
oper.data.process(input);
Assert.assertEquals("number emitted tuples", 1, sortSink.collectedTuples.size());
Assert.assertEquals("Total filtered value is ", 9, getTotal(sortSink.collectedTuples.get(0)));
oper.endWindow();
}
}
| apache-2.0 |
steveloughran/hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java | 5399 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo;
import org.apache.hadoop.mapreduce.v2.api.records.Phase;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.util.Records;
public class CompletedTaskAttempt implements TaskAttempt {
private final TaskAttemptInfo attemptInfo;
private final TaskAttemptId attemptId;
private final TaskAttemptState state;
private final List<String> diagnostics = new ArrayList<String>(2);
private TaskAttemptReport report;
private String localDiagMessage;
CompletedTaskAttempt(TaskId taskId, TaskAttemptInfo attemptInfo) {
this.attemptInfo = attemptInfo;
this.attemptId = TypeConverter.toYarn(attemptInfo.getAttemptId());
if (attemptInfo.getTaskStatus() != null) {
this.state = TaskAttemptState.valueOf(attemptInfo.getTaskStatus());
} else {
this.state = TaskAttemptState.KILLED;
localDiagMessage = "Attmpt state missing from History : marked as KILLED";
diagnostics.add(localDiagMessage);
}
if (attemptInfo.getError() != null) {
diagnostics.add(attemptInfo.getError());
}
}
@Override
public NodeId getNodeId() throws UnsupportedOperationException{
throw new UnsupportedOperationException();
}
@Override
public ContainerId getAssignedContainerID() {
return attemptInfo.getContainerId();
}
@Override
public String getAssignedContainerMgrAddress() {
return attemptInfo.getHostname() + ":" + attemptInfo.getPort();
}
@Override
public String getNodeHttpAddress() {
return attemptInfo.getTrackerName() + ":" + attemptInfo.getHttpPort();
}
@Override
public String getNodeRackName() {
return attemptInfo.getRackname();
}
@Override
public Counters getCounters() {
return attemptInfo.getCounters();
}
@Override
public TaskAttemptId getID() {
return attemptId;
}
@Override
public float getProgress() {
return 1.0f;
}
@Override
public synchronized TaskAttemptReport getReport() {
if (report == null) {
constructTaskAttemptReport();
}
return report;
}
@Override
public Phase getPhase() {
return Phase.CLEANUP;
}
@Override
public TaskAttemptState getState() {
return state;
}
@Override
public boolean isFinished() {
return true;
}
@Override
public List<String> getDiagnostics() {
return diagnostics;
}
@Override
public long getLaunchTime() {
return attemptInfo.getStartTime();
}
@Override
public long getFinishTime() {
return attemptInfo.getFinishTime();
}
@Override
public long getShuffleFinishTime() {
return attemptInfo.getShuffleFinishTime();
}
@Override
public long getSortFinishTime() {
return attemptInfo.getSortFinishTime();
}
@Override
public int getShufflePort() {
return attemptInfo.getShufflePort();
}
private void constructTaskAttemptReport() {
report = Records.newRecord(TaskAttemptReport.class);
report.setTaskAttemptId(attemptId);
report.setTaskAttemptState(state);
report.setProgress(getProgress());
report.setStartTime(attemptInfo.getStartTime());
report.setFinishTime(attemptInfo.getFinishTime());
report.setShuffleFinishTime(attemptInfo.getShuffleFinishTime());
report.setSortFinishTime(attemptInfo.getSortFinishTime());
if (localDiagMessage != null) {
report
.setDiagnosticInfo(attemptInfo.getError() + ", " + localDiagMessage);
} else {
report.setDiagnosticInfo(attemptInfo.getError());
}
// report.setPhase(attemptInfo.get); //TODO
report.setStateString(attemptInfo.getState());
report.setRawCounters(getCounters());
report.setContainerId(attemptInfo.getContainerId());
if (attemptInfo.getHostname() == null) {
report.setNodeManagerHost("UNKNOWN");
} else {
report.setNodeManagerHost(attemptInfo.getHostname());
report.setNodeManagerPort(attemptInfo.getPort());
}
report.setNodeManagerHttpPort(attemptInfo.getHttpPort());
}
}
| apache-2.0 |
automenta/opennars | nars_java.0/io/Symbols.java | 3958 | /*
* Symbols.java
*
* Copyright (C) 2008 Pei Wang
*
* This file is part of Open-NARS.
*
* Open-NARS is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* Open-NARS is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Open-NARS. If not, see <http://www.gnu.org/licenses/>.
*/
package nars.io;
/**
* The ASCII symbols used in I/O.
*/
public class Symbols {
/* sentence type and delimitors */
public static final char JUDGMENT_MARK = '.';
public static final char QUESTION_MARK = '?';
/* variable type */
public static final char VAR_INDEPENDENT = '$';
public static final char VAR_DEPENDENT = '#';
public static final char VAR_QUERY = '?';
/* numerical value delimitors, must be different from the Term delimitors */
public static final char BUDGET_VALUE_MARK = '$';
public static final char TRUTH_VALUE_MARK = '%';
public static final char VALUE_SEPARATOR = ';';
/* CompountTerm delimitors, must use 4 different pairs */
public static final char COMPOUND_TERM_OPENER = '(';
public static final char COMPOUND_TERM_CLOSER = ')';
public static final char STATEMENT_OPENER = '<';
public static final char STATEMENT_CLOSER = '>';
public static final char SET_EXT_OPENER = '{';
public static final char SET_EXT_CLOSER = '}';
public static final char SET_INT_OPENER = '[';
public static final char SET_INT_CLOSER = ']';
/* special characors in argument list */
public static final char ARGUMENT_SEPARATOR = ',';
public static final char IMAGE_PLACE_HOLDER = '_';
/* CompountTerm operators, length = 1 */
public static final String INTERSECTION_EXT_OPERATOR = "&";
public static final String INTERSECTION_INT_OPERATOR = "|";
public static final String DIFFERENCE_EXT_OPERATOR = "-";
public static final String DIFFERENCE_INT_OPERATOR = "~";
public static final String PRODUCT_OPERATOR = "*";
public static final String IMAGE_EXT_OPERATOR = "/";
public static final String IMAGE_INT_OPERATOR = "\\";
/* CompoundStatement operators, length = 2 */
public static final String NEGATION_OPERATOR = "--";
public static final String DISJUNCTION_OPERATOR = "||";
public static final String CONJUNCTION_OPERATOR = "&&";
/* built-in relations, length = 3 */
public static final String INHERITANCE_RELATION = "-->";
public static final String SIMILARITY_RELATION = "<->";
public static final String INSTANCE_RELATION = "{--";
public static final String PROPERTY_RELATION = "--]";
public static final String INSTANCE_PROPERTY_RELATION = "{-]";
public static final String IMPLICATION_RELATION = "==>";
public static final String EQUIVALENCE_RELATION = "<=>";
/* experience line prefix */
public static final String INPUT_LINE = "IN";
public static final String OUTPUT_LINE = "OUT";
public static final char PREFIX_MARK = ':';
public static final char RESET_MARK = '*';
public static final char COMMENT_MARK = '/';
/* Stamp, display only */
public static final char STAMP_OPENER = '{';
public static final char STAMP_CLOSER = '}';
public static final char STAMP_SEPARATOR = ';';
public static final char STAMP_STARTER = ':';
/* TermLink type, display only */
public static final String TO_COMPONENT_1 = " @(";
public static final String TO_COMPONENT_2 = ")_ ";
public static final String TO_COMPOUND_1 = " _@(";
public static final String TO_COMPOUND_2 = ") ";
}
| gpl-2.0 |
MjAbuz/exist | extensions/fluent/test/src/org/exist/fluent/ListenerManagerTest.java | 17288 | package org.exist.fluent;
import static org.junit.Assert.*;
import org.hamcrest.*;
import org.jmock.Expectations;
import org.jmock.Mockery;
import org.jmock.api.Action;
import org.jmock.api.Invocation;
import org.jmock.integration.junit4.*;
import org.junit.*;
import org.junit.runner.RunWith;
@RunWith(JMock.class)
public class ListenerManagerTest extends DatabaseTestCase {
private Mockery context = new JUnit4Mockery();
private Document.Listener documentListener;
private Folder.Listener folderListener;
@Before public void prepareMocks() {
documentListener = context.mock(Document.Listener.class, "documentListener");
folderListener = context.mock(Folder.Listener.class, "folderListener");
}
@After public void unregisterMocks() throws Exception {
if (documentListener != null) ListenerManager.INSTANCE.remove(documentListener);
if (folderListener != null) ListenerManager.INSTANCE.remove(folderListener);
documentListener = null;
folderListener = null;
}
private Matcher<Document.Event> eqDelayedDoc(final Document.Event ev) {
return new BaseMatcher<Document.Event>() {
public void describeTo(Description desc) {
desc.appendText("eqDelayedDoc(").appendValue(ev).appendText(")");
}
public boolean matches(Object o) {
return new Document.Event(ev.trigger, ev.path, db.getDocument(ev.path)).equals(o);
}
};
}
private Action checkDocumentExists(final String path, final boolean shouldExist) {
return new Action() {
public void describeTo(Description desc) {
desc.appendText("check that document '" + path + "' " + (shouldExist ? "exists" : "does not exist"));
}
public Object invoke(Invocation inv) throws Throwable {
try {
db.getDocument(path);
if (!shouldExist) fail("document '" + path + "' exists but shouldn't");
} catch (DatabaseException e) {
if (shouldExist) fail("document '" + path + "' doesn't exist but should");
}
return null;
}
};
}
private Action checkFolderExists(final String path, final boolean shouldExist) {
return new Action() {
public void describeTo(Description desc) {
desc.appendText("check that folder '" + path + "' " + (shouldExist ? "exists" : "does not exist"));
}
public Object invoke(Invocation inv) throws Throwable {
try {
db.getFolder(path);
if (!shouldExist) fail("folder '" + path + "' exists but shouldn't");
} catch (DatabaseException e) {
if (shouldExist) fail("folder '" + path + "' doesn't exist but should");
}
return null;
}
};
}
private Action checkDocumentStamp(final String expectedStamp) {
return new Action() {
public void describeTo(Description desc) {
desc.appendText("check that event document is stamped with '" + expectedStamp + "'");
}
public Object invoke(Invocation inv) throws Throwable {
XMLDocument doc = ((Document.Event) inv.getParameter(0)).document.xml();
assertNotNull("event document is null", doc);
assertEquals(expectedStamp, doc.query().single("/test/@stamp").value());
return null;
}
};
}
private XMLDocument createDocument(String path) {
return createDocument(path, null);
}
private XMLDocument createDocument(String path, String stamp) {
int k = path.lastIndexOf('/');
assert k > 0;
Folder folder = db.createFolder(path.substring(0, k));
return folder.documents().build(Name.overwrite(db, path.substring(k + 1)))
.elem("test").attrIf(stamp != null, "stamp", stamp).end("test")
.commit();
}
@Test public void listenDocumentsBeforeCreateDocument1() {
final String docPath = "/top/test.xml";
final Document.Event ev = new Document.Event(Trigger.BEFORE_CREATE, docPath, null);
Folder top = db.createFolder("/top");
context.checking(new Expectations() {{
one(documentListener).handle(ev); will(checkDocumentExists(docPath, false));
}});
top.documents().listeners().add(Trigger.BEFORE_CREATE, documentListener);
createDocument(docPath);
createDocument("/elsewhere/test.xml");
createDocument("/top/deeper/test.xml");
}
@Test public void listenDocumentsBeforeCreateDocument2() {
final String docPath = "/top/test.xml";
Folder top = db.createFolder("/top");
context.checking(new Expectations() {{
never(documentListener).handle(with(any(Document.Event.class)));
}});
top.documents().listeners().add(Trigger.BEFORE_STORE, documentListener);
top.documents().listeners().remove(documentListener);
createDocument(docPath);
}
@Test public void listenDocumentsAfterCreateDocument1() {
final String docPath = "/top/test.xml";
XMLDocument doc = createDocument(docPath);
final Document.Event ev = new Document.Event(Trigger.AFTER_CREATE, docPath, doc);
doc.delete();
Folder top = db.createFolder("/top");
context.checking(new Expectations() {{
one(documentListener).handle(with(eqDelayedDoc(ev))); will(checkDocumentExists(docPath, true));
}});
top.documents().listeners().add(Trigger.AFTER_CREATE, documentListener);
createDocument(docPath);
createDocument("/elsewhere/test.xml");
createDocument("/top/deeper/test.xml");
}
@Test public void listenDocumentsAfterCreateDocument2() {
final String docPath = "/top/test.xml";
Folder top = db.createFolder("/top");
context.checking(new Expectations() {{
never(documentListener).handle(with(any(Document.Event.class)));
}});
top.documents().listeners().add(Trigger.AFTER_STORE, documentListener);
top.documents().listeners().remove(documentListener);
createDocument(docPath);
}
@Test public void listenFolderBeforeCreateDocument1() {
final String docPath = "/top/test2.xml";
final Document.Event ev = new Document.Event(Trigger.BEFORE_CREATE, docPath, null);
Folder top = db.createFolder("/top");
context.checking(new Expectations() {{
one(documentListener).handle(ev); will(checkDocumentExists(docPath, false));
}});
top.listeners().add(Trigger.BEFORE_CREATE, documentListener);
createDocument(docPath);
createDocument("/elsewhere/test.xml");
}
@Test public void listenFolderBeforeCreateDocument2() {
final String docPath = "/top/test2.xml";
Folder top = db.createFolder("/top");
context.checking(new Expectations() {{
never(documentListener).handle(with(any(Document.Event.class)));
}});
top.listeners().add(Trigger.BEFORE_STORE, documentListener);
top.listeners().remove(documentListener);
createDocument(docPath);
}
@Test public void listenFolderAfterCreateDocument1() {
final String docPath = "/top/test2.xml";
XMLDocument doc = createDocument(docPath);
final Document.Event ev = new Document.Event(Trigger.AFTER_CREATE, docPath, doc);
doc.delete();
Folder top = db.createFolder("/top");
context.checking(new Expectations() {{
one(documentListener).handle(with(eqDelayedDoc(ev))); will(checkDocumentExists(docPath, true));
}});
top.listeners().add(Trigger.AFTER_CREATE, documentListener);
createDocument(docPath);
createDocument("/elsewhere/test.xml");
}
@Test public void listenFolderAfterCreateDocument2() {
final String docPath = "/top/test2.xml";
Folder top = db.createFolder("/top");
context.checking(new Expectations() {{
never(documentListener).handle(with(any(Document.Event.class)));
}});
top.listeners().add(Trigger.AFTER_STORE, documentListener);
top.listeners().remove(documentListener);
createDocument(docPath);
}
@Test public void listenDocumentsBeforeUpdateDocument1() {
final String docPath = "/top/test.xml";
XMLDocument doc = createDocument(docPath, "before");
final Document.Event ev = new Document.Event(Trigger.BEFORE_UPDATE, docPath, doc);
Folder top = db.createFolder("/top");
context.checking(new Expectations() {{
one(documentListener).handle(ev); will(checkDocumentStamp("before"));
}});
top.documents().listeners().add(Trigger.BEFORE_UPDATE, documentListener);
createDocument(docPath, "after");
createDocument("/elsewhere/test.xml");
createDocument("/top/deeper/test.xml");
}
@Test public void listenDocumentsAfterUpdateDocument1() {
final String docPath = "/top/test.xml";
XMLDocument doc = createDocument(docPath, "before");
final Document.Event ev = new Document.Event(Trigger.AFTER_UPDATE, docPath, doc);
Folder top = db.createFolder("/top");
context.checking(new Expectations() {{
one(documentListener).handle(ev); will(checkDocumentStamp("after"));
}});
top.documents().listeners().add(Trigger.AFTER_UPDATE, documentListener);
createDocument(docPath, "after");
createDocument("/elsewhere/test.xml");
createDocument("/top/deeper/test.xml");
}
@Test public void listenFolderBeforeUpdateDocument1() {
final String docPath = "/top/test.xml";
XMLDocument doc = createDocument(docPath, "before");
final Document.Event ev = new Document.Event(Trigger.BEFORE_UPDATE, docPath, doc);
Folder top = db.createFolder("/top");
context.checking(new Expectations() {{
one(documentListener).handle(ev); will(checkDocumentStamp("before"));
}});
top.listeners().add(Trigger.BEFORE_UPDATE, documentListener);
createDocument(docPath, "after");
createDocument("/elsewhere/test.xml");
}
@Test public void listenFolderAfterUpdateDocument1() {
final String docPath = "/top/test.xml";
XMLDocument doc = createDocument(docPath, "before");
final Document.Event ev = new Document.Event(Trigger.AFTER_UPDATE, docPath, doc);
Folder top = db.createFolder("/top");
context.checking(new Expectations() {{
one(documentListener).handle(ev); will(checkDocumentStamp("after"));
}});
top.listeners().add(Trigger.AFTER_UPDATE, documentListener);
createDocument(docPath, "after");
createDocument("/elsewhere/test.xml");
}
@Test public void listenFolderDeepBeforeCreateDocument1() {
final String docPath = "/top/middle/test2.xml";
final Document.Event ev = new Document.Event(Trigger.BEFORE_CREATE, docPath, null);
Folder top = db.createFolder("/top");
context.checking(new Expectations() {{
one(documentListener).handle(ev); will(checkDocumentExists(docPath, false));
}});
top.listeners().add(Trigger.BEFORE_CREATE, documentListener);
createDocument(docPath);
createDocument("/elsewhere/test.xml");
}
@Test public void listenDocumentBeforeUpdateDocument1() {
final String docPath = "/top/test.xml";
XMLDocument doc = createDocument(docPath, "before");
final Document.Event ev = new Document.Event(Trigger.BEFORE_UPDATE, docPath, doc);
context.checking(new Expectations() {{
one(documentListener).handle(ev); will(checkDocumentStamp("before"));
}});
doc.listeners().add(Trigger.BEFORE_UPDATE, documentListener);
createDocument(docPath, "after");
createDocument("/elsewhere/test.xml");
createDocument("/top/test2.xml");
}
@Test public void listenDocumentAfterUpdateDocument1() {
final String docPath = "/top/test.xml";
XMLDocument doc = createDocument(docPath, "before");
final Document.Event ev = new Document.Event(Trigger.AFTER_UPDATE, docPath, doc);
context.checking(new Expectations() {{
one(documentListener).handle(ev); will(checkDocumentStamp("after"));
}});
doc.listeners().add(Trigger.AFTER_UPDATE, documentListener);
createDocument(docPath, "after");
createDocument("/elsewhere/test.xml");
createDocument("/top/test2.xml");
}
@Test public void listenDocumentsBeforeDeleteDocument1() {
final String docPath = "/top/test.xml";
XMLDocument doc = createDocument(docPath);
final Document.Event ev = new Document.Event(Trigger.BEFORE_REMOVE, docPath, doc);
context.checking(new Expectations() {{
one(documentListener).handle(ev); will(checkDocumentExists(docPath, true));
}});
Folder top = db.createFolder("/top");
top.documents().listeners().add(Trigger.BEFORE_REMOVE, documentListener);
doc.delete();
createDocument("/elsewhere/test.xml").delete();
createDocument("/top/deeper/test.xml").delete();
}
@Test public void listenDocumentsBeforeDeleteDocument2() {
final String docPath = "/top/test.xml";
XMLDocument doc = createDocument(docPath, "before");
doc.delete();
final Document.Event ev = new Document.Event(Trigger.BEFORE_REMOVE, docPath, doc);
context.checking(new Expectations() {{
one(documentListener).handle(with(eqDelayedDoc(ev))); will(checkDocumentExists(docPath, true));
}});
Folder top = db.createFolder("/top");
top.documents().listeners().add(Trigger.BEFORE_REMOVE, documentListener);
createDocument(docPath).delete();
createDocument("/elsewhere/test.xml").delete();
createDocument("/top/deeper/test.xml").delete();
}
@Test public void listenDocumentsAfterDeleteDocument1() {
final String docPath = "/top/test.xml";
final Document.Event ev = new Document.Event(Trigger.AFTER_REMOVE, docPath, null);
context.checking(new Expectations() {{
one(documentListener).handle(ev); will(checkDocumentExists(docPath, false));
}});
Folder top = db.createFolder("/top");
top.documents().listeners().add(Trigger.AFTER_REMOVE, documentListener);
createDocument(docPath).delete();
createDocument("/elsewhere/test.xml").delete();
createDocument("/top/deeper/test.xml").delete();
}
@Test public void listenFolderBeforeDeleteDocument1() {
final String docPath = "/top/test.xml";
XMLDocument doc = createDocument(docPath);
final Document.Event ev = new Document.Event(Trigger.BEFORE_REMOVE, docPath, doc);
context.checking(new Expectations() {{
one(documentListener).handle(ev); will(checkDocumentExists(docPath, true));
}});
Folder top = db.createFolder("/top");
top.listeners().add(Trigger.BEFORE_REMOVE, documentListener);
doc.delete();
createDocument("/elsewhere/test.xml").delete();
}
@Test public void listenFolderBeforeDeleteDocument2() {
final String docPath = "/top/deeper/test.xml";
XMLDocument doc = createDocument(docPath);
final Document.Event ev = new Document.Event(Trigger.BEFORE_REMOVE, docPath, doc);
context.checking(new Expectations() {{
one(documentListener).handle(ev); will(checkDocumentExists(docPath, true));
}});
Folder top = db.createFolder("/top");
top.listeners().add(Trigger.BEFORE_REMOVE, documentListener);
doc.delete();
createDocument("/elsewhere/test.xml").delete();
}
@Test public void listenFolderAfterDeleteDocument1() {
final String docPath = "/top/test.xml";
XMLDocument doc = createDocument(docPath);
final Document.Event ev = new Document.Event(Trigger.AFTER_REMOVE, docPath, null);
context.checking(new Expectations() {{
one(documentListener).handle(ev); will(checkDocumentExists(docPath, false));
}});
Folder top = db.createFolder("/top");
top.listeners().add(Trigger.AFTER_REMOVE, documentListener);
doc.delete();
createDocument("/elsewhere/test.xml").delete();
}
@Test public void listenFolderAfterDeleteDocument2() {
final String docPath = "/top/deeper/test.xml";
XMLDocument doc = createDocument(docPath);
final Document.Event ev = new Document.Event(Trigger.AFTER_REMOVE, docPath, null);
context.checking(new Expectations() {{
one(documentListener).handle(ev); will(checkDocumentExists(docPath, false));
}});
Folder top = db.createFolder("/top");
top.listeners().add(Trigger.AFTER_REMOVE, documentListener);
doc.delete();
createDocument("/elsewhere/test.xml").delete();
}
@Test public void listenDocumentBeforeDeleteDocument1() {
final String docPath = "/top/test.xml";
XMLDocument doc = createDocument(docPath);
final Document.Event ev = new Document.Event(Trigger.BEFORE_REMOVE, docPath, doc);
context.checking(new Expectations() {{
one(documentListener).handle(ev); will(checkDocumentExists(docPath, true));
}});
doc.listeners().add(Trigger.BEFORE_REMOVE, documentListener);
doc.delete();
createDocument("/elsewhere/test.xml").delete();
createDocument("/top/deeper/test.xml").delete();
createDocument("/top/test2.xml").delete();
}
@Test public void listenDocumentAfterDeleteDocument1() {
final String docPath = "/top/test.xml";
XMLDocument doc = createDocument(docPath);
final Document.Event ev = new Document.Event(Trigger.AFTER_REMOVE, docPath, null);
context.checking(new Expectations() {{
one(documentListener).handle(ev); will(checkDocumentExists(docPath, false));
}});
doc.listeners().add(Trigger.AFTER_REMOVE, documentListener);
doc.delete();
createDocument("/elsewhere/test.xml").delete();
createDocument("/top/deeper/test.xml").delete();
createDocument("/top/test2.xml").delete();
}
@Test public void listenBeforeCreateFolder1() {
final String folderPath = "/top/child";
final Folder.Event ev = new Folder.Event(Trigger.BEFORE_CREATE, folderPath, null);
context.checking(new Expectations() {{
one(folderListener).handle(ev); will(checkFolderExists(folderPath, false));
}});
Folder top = db.createFolder("/top");
top.listeners().add(Trigger.BEFORE_CREATE, folderListener);
top.children().create("child");
}
@Test public void listenBeforeCreateFolder2() {
final String folderPath = "/top/middle/child";
final Folder.Event ev = new Folder.Event(Trigger.BEFORE_CREATE, folderPath, null);
context.checking(new Expectations() {{
one(folderListener).handle(ev); will(checkFolderExists(folderPath, false));
}});
Folder top = db.createFolder("/top");
Folder middle = db.createFolder("/top/middle");
top.listeners().add(Trigger.BEFORE_CREATE, folderListener);
middle.children().create("child");
}
}
| lgpl-2.1 |
marsorp/blog | presto166/presto-hive/src/main/java/com/facebook/presto/hive/HiveRecordWriter.java | 6689 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.hive;
import com.facebook.presto.hive.HiveWriteUtils.FieldSetter;
import com.facebook.presto.hive.metastore.StorageFormat;
import com.facebook.presto.spi.PrestoException;
import com.facebook.presto.spi.block.Block;
import com.facebook.presto.spi.type.Type;
import com.facebook.presto.spi.type.TypeManager;
import com.google.common.base.Splitter;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.Serializer;
import org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe;
import org.apache.hadoop.hive.serde2.columnar.OptimizedLazyBinaryColumnarSerde;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.SettableStructObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.mapred.JobConf;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import static com.facebook.presto.hive.HiveErrorCode.HIVE_WRITER_CLOSE_ERROR;
import static com.facebook.presto.hive.HiveErrorCode.HIVE_WRITER_DATA_ERROR;
import static com.facebook.presto.hive.HiveType.toHiveTypes;
import static com.facebook.presto.hive.HiveWriteUtils.createFieldSetter;
import static com.facebook.presto.hive.HiveWriteUtils.createRecordWriter;
import static com.facebook.presto.hive.HiveWriteUtils.getRowColumnInspectors;
import static com.google.common.base.MoreObjects.toStringHelper;
import static java.util.Objects.requireNonNull;
import static java.util.stream.Collectors.toList;
import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_COLUMNS;
import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_COLUMN_TYPES;
import static org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory.getStandardStructObjectInspector;
public class HiveRecordWriter
{
private final Path path;
private final int fieldCount;
@SuppressWarnings("deprecation")
private final Serializer serializer;
private final RecordWriter recordWriter;
private final SettableStructObjectInspector tableInspector;
private final List<StructField> structFields;
private final Object row;
private final FieldSetter[] setters;
public HiveRecordWriter(
Path path,
List<String> inputColumnNames,
StorageFormat storageFormat,
Properties schema,
TypeManager typeManager,
JobConf conf)
{
this.path = requireNonNull(path, "path is null");
// existing tables may have columns in a different order
List<String> fileColumnNames = Splitter.on(',').trimResults().omitEmptyStrings().splitToList(schema.getProperty(META_TABLE_COLUMNS, ""));
List<Type> fileColumnTypes = toHiveTypes(schema.getProperty(META_TABLE_COLUMN_TYPES, "")).stream()
.map(hiveType -> hiveType.getType(typeManager))
.collect(toList());
fieldCount = fileColumnNames.size();
String serDe = storageFormat.getSerDe();
if (serDe.equals(LazyBinaryColumnarSerDe.class.getName())) {
serDe = OptimizedLazyBinaryColumnarSerde.class.getName();
}
serializer = initializeSerializer(conf, schema, serDe);
recordWriter = createRecordWriter(path, conf, schema, storageFormat.getOutputFormat());
List<ObjectInspector> objectInspectors = getRowColumnInspectors(fileColumnTypes);
tableInspector = getStandardStructObjectInspector(fileColumnNames, objectInspectors);
// reorder (and possibly reduce) struct fields to match input
structFields = ImmutableList.copyOf(inputColumnNames.stream()
.map(tableInspector::getStructFieldRef)
.collect(toList()));
row = tableInspector.create();
setters = new FieldSetter[structFields.size()];
for (int i = 0; i < setters.length; i++) {
setters[i] = createFieldSetter(tableInspector, row, structFields.get(i), fileColumnTypes.get(structFields.get(i).getFieldID()));
}
}
public void addRow(Block[] columns, int position)
{
for (int field = 0; field < fieldCount; field++) {
if (columns[field].isNull(position)) {
tableInspector.setStructFieldData(row, structFields.get(field), null);
}
else {
setters[field].setField(columns[field], position);
}
}
try {
recordWriter.write(serializer.serialize(row, tableInspector));
}
catch (SerDeException | IOException e) {
throw new PrestoException(HIVE_WRITER_DATA_ERROR, e);
}
}
public void commit()
{
try {
recordWriter.close(false);
}
catch (IOException e) {
throw new PrestoException(HIVE_WRITER_CLOSE_ERROR, "Error committing write to Hive", e);
}
}
public void rollback()
{
try {
recordWriter.close(true);
}
catch (IOException e) {
throw new PrestoException(HIVE_WRITER_CLOSE_ERROR, "Error rolling back write to Hive", e);
}
}
@SuppressWarnings("deprecation")
private static Serializer initializeSerializer(Configuration conf, Properties properties, String serializerName)
{
try {
Serializer result = (Serializer) Class.forName(serializerName).getConstructor().newInstance();
result.initialize(conf, properties);
return result;
}
catch (SerDeException | ReflectiveOperationException e) {
throw Throwables.propagate(e);
}
}
@Override
public String toString()
{
return toStringHelper(this)
.add("path", path)
.toString();
}
}
| apache-2.0 |
SourceStudyNotes/log4j2 | src/main/java/org/apache/logging/log4j/core/config/xml/XmlConfigurationFactory.java | 1966 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache license, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the license for the specific language governing permissions and
* limitations under the license.
*/
package org.apache.logging.log4j.core.config.xml;
import org.apache.logging.log4j.core.config.Configuration;
import org.apache.logging.log4j.core.config.ConfigurationFactory;
import org.apache.logging.log4j.core.config.ConfigurationSource;
import org.apache.logging.log4j.core.config.Order;
import org.apache.logging.log4j.core.config.plugins.Plugin;
/**
* Factory to construct an XmlConfiguration.
*/
@Plugin(name = "XmlConfigurationFactory", category = ConfigurationFactory.CATEGORY)
@Order(5)
public class XmlConfigurationFactory extends ConfigurationFactory {
/**
* Valid file extensions for XML files.
*/
public static final String[] SUFFIXES = new String[] {".xml", "*"};
/**
* Returns the Configuration.
* @param source The InputSource.
* @return The Configuration.
*/
@Override
public Configuration getConfiguration(final ConfigurationSource source) {
return new XmlConfiguration(source);
}
/**
* Returns the file suffixes for XML files.
* @return An array of File extensions.
*/
@Override
public String[] getSupportedTypes() {
return SUFFIXES;
}
}
| apache-2.0 |
asedunov/intellij-community | platform/core-impl/src/com/intellij/psi/impl/PsiDocumentTransactionListener.java | 1203 | /*
* Copyright 2000-2014 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* @author max
*/
package com.intellij.psi.impl;
import com.intellij.openapi.editor.Document;
import com.intellij.psi.PsiFile;
import com.intellij.util.messages.Topic;
import org.jetbrains.annotations.NotNull;
public interface PsiDocumentTransactionListener {
Topic<PsiDocumentTransactionListener> TOPIC =
new Topic<>("psi.DocumentTransactionListener", PsiDocumentTransactionListener.class, Topic.BroadcastDirection.TO_PARENT);
void transactionStarted(@NotNull Document document, @NotNull PsiFile file);
void transactionCompleted(@NotNull Document document, @NotNull PsiFile file);
}
| apache-2.0 |
SOTEDev/Nukkit | src/main/java/cn/nukkit/event/inventory/InventoryCloseEvent.java | 601 | package cn.nukkit.event.inventory;
import cn.nukkit.Player;
import cn.nukkit.event.HandlerList;
import cn.nukkit.inventory.Inventory;
/**
* author: Box
* Nukkit Project
*/
public class InventoryCloseEvent extends InventoryEvent {
private static final HandlerList handlers = new HandlerList();
public static HandlerList getHandlers() {
return handlers;
}
private final Player who;
public InventoryCloseEvent(Inventory inventory, Player who) {
super(inventory);
this.who = who;
}
public Player getPlayer() {
return this.who;
}
}
| gpl-3.0 |
bihealth/cbioportal | core/src/main/java/org/mskcc/cbio/portal/authentication/googleplus/PortalUserDetailsService.java | 5483 | /*
* Copyright (c) 2015 Memorial Sloan-Kettering Cancer Center.
*
* This library is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY, WITHOUT EVEN THE IMPLIED WARRANTY OF MERCHANTABILITY OR FITNESS
* FOR A PARTICULAR PURPOSE. The software and documentation provided hereunder
* is on an "as is" basis, and Memorial Sloan-Kettering Cancer Center has no
* obligations to provide maintenance, support, updates, enhancements or
* modifications. In no event shall Memorial Sloan-Kettering Cancer Center be
* liable to any party for direct, indirect, special, incidental or
* consequential damages, including lost profits, arising out of the use of this
* software and its documentation, even if Memorial Sloan-Kettering Cancer
* Center has been advised of the possibility of such damage.
*/
/*
* This file is part of cBioPortal.
*
* cBioPortal is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.mskcc.cbio.portal.authentication.googleplus;
import org.mskcc.cbio.portal.model.User;
import org.mskcc.cbio.portal.model.UserAuthorities;
import org.mskcc.cbio.portal.dao.PortalUserDAO;
import org.mskcc.cbio.portal.authentication.PortalUserDetails;
import org.mskcc.cbio.portal.util.DynamicState;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.google.common.base.Strings;
import com.google.inject.internal.Preconditions;
import org.springframework.security.core.userdetails.*;
import org.springframework.security.core.GrantedAuthority;
import org.springframework.security.core.authority.AuthorityUtils;
import java.util.List;
/**
* Responsible for verifying that a social site user name has been registered in the
* portal database For registered users, an instance of GoogleplusUserDetails is
* completed and returned. Null is returned for non-registered users
*
* Implementation based on code in OpenIDUserDetailsService
*
* @author criscuof
*/
public class PortalUserDetailsService implements UserDetailsService {
private static final Log log = LogFactory.getLog(PortalUserDetailsService.class);
// ref to our user dao
private final PortalUserDAO portalUserDAO;
/**
* Constructor.
*
* Takes a ref to PortalUserDAO used to authenticate registered users in the
* database.
*
* @param portalUserDAO PortalUserDAO
*/
public PortalUserDetailsService(PortalUserDAO portalUserDAO) {
this.portalUserDAO = portalUserDAO;
}
@Override
public UserDetails loadUserByUsername(String username) throws UsernameNotFoundException {
Preconditions.checkArgument(!Strings.isNullOrEmpty(username), "A username is required");
// set the username into the global state so other components can find out who
// logged in or tried to log in most recently
DynamicState.INSTANCE.setCurrentUser(username);
if (log.isDebugEnabled()) {
log.debug("loadUserByUsername(), attempting to fetch portal user, email: " + username);
}
PortalUserDetails toReturn = null;
User user = null;
try {
user = portalUserDAO.getPortalUser(username);
} catch (Exception e ){
log.debug("User " +username +" was not found in the cbio users table");
}
if (user != null && user.isEnabled()) {
if (log.isDebugEnabled()) {
log.debug("loadUserByUsername(), attempting to fetch portal user authorities, username: " + username);
}
UserAuthorities authorities = portalUserDAO.getPortalUserAuthorities(username);
if (authorities != null) {
List<GrantedAuthority> grantedAuthorities
= AuthorityUtils.createAuthorityList(
authorities.getAuthorities().toArray(new String[authorities.getAuthorities().size()]));
toReturn = new PortalUserDetails(username, grantedAuthorities);
toReturn.setEmail(user.getEmail());
toReturn.setName(user.getName());
}
}
// outta here
if (toReturn == null) {
if (log.isDebugEnabled()) {
log.debug("loadUserByUsername(), user and/or user authorities is null, user name: " +username);
}
// set the failedUser & currentUser attributes
DynamicState.INSTANCE.setCurrentUser("");
DynamicState.INSTANCE.setFailedUser(username);
// use the Exception message to attache the username to the request object
throw new UsernameNotFoundException(username);
}
else {
if (log.isDebugEnabled()) {
log.debug("loadUserByUsername(), successfully authenticated user, user name: " + username);
}
return toReturn;
}
}
}
| agpl-3.0 |
lwriemen/bridgepoint | src/org.xtuml.bp.ui.canvas/src/org/xtuml/bp/ui/canvas/util/ConnectorUtil.java | 2191 | //========================================================================
//
//File: $RCSfile: ConnectorUtil.java,v $
//Version: $Revision: 1.11 $
//Modified: $Date: 2013/01/10 23:19:03 $
//
//(c) Copyright 2005-2014 by Mentor Graphics Corp. All rights reserved.
//
//========================================================================
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy
// of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
//========================================================================
package org.xtuml.bp.ui.canvas.util;
import org.xtuml.bp.core.Association_c;
import org.xtuml.bp.core.common.ClassQueryInterface_c;
import org.xtuml.bp.ui.canvas.Connector_c;
import org.xtuml.bp.ui.canvas.FloatingText_c;
import org.xtuml.bp.ui.canvas.GraphicalElement_c;
import org.xtuml.bp.ui.canvas.Ooaofgraphics;
/**
* Contains utility methods applicable to connector instances.
*/
public class ConnectorUtil
{
/**
* Returns the connector-text of the given association's given end,
* under the given graphics-root.
*/
public static FloatingText_c getText(
Ooaofgraphics graphicsRoot, final Association_c assoc, final int end)
{
GraphicalElement_c element =
GraphicalElementUtil.getRepresentingElement(graphicsRoot, assoc);
Connector_c connector = Connector_c.getOneGD_CONOnR2(element);
return FloatingText_c.getOneGD_CTXTOnR8(connector,
new ClassQueryInterface_c() {
public boolean evaluate(Object candidate) {
return ((FloatingText_c)candidate).getEnd() == end;
}
});
}
}
| apache-2.0 |
smgoller/geode | geode-core/src/main/java/org/apache/geode/IncompatibleSystemException.java | 1567 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode;
/**
* An <code>IncompatibleSystemException</code> is thrown when a new GemFire process tries to connect
* to an existing distributed system and its version is not the same as that of the distributed
* system. In this case the new member is not allowed to connect to the distributed system.
* <p>
* As of GemFire 5.0 this exception should be named IncompatibleDistributedSystemException
*/
public class IncompatibleSystemException extends GemFireException {
private static final long serialVersionUID = -6852188720149734350L;
////////////////////// Constructors //////////////////////
/**
* Creates a new <code>IncompatibleSystemException</code>.
*/
public IncompatibleSystemException(String message) {
super(message);
}
}
| apache-2.0 |
ltangvald/mysql | storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordDeleteOperationImpl.java | 2008 | /*
* Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
package com.mysql.clusterj.tie;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import com.mysql.clusterj.core.store.Table;
public class NdbRecordDeleteOperationImpl extends NdbRecordOperationImpl {
public NdbRecordDeleteOperationImpl(
ClusterTransactionImpl clusterTransaction, Table storeTable) {
super(clusterTransaction, storeTable);
this.ndbRecordKeys = clusterTransaction.getCachedNdbRecordImpl(storeTable);
this.keyBufferSize = ndbRecordKeys.getBufferSize();
this.numberOfColumns = ndbRecordKeys.getNumberOfColumns();
resetMask();
}
public void beginDefinition() {
// allocate a buffer for the operation data
keyBuffer = ndbRecordKeys.newBuffer();
// use platform's native byte ordering
keyBuffer.order(ByteOrder.nativeOrder());
}
public void endDefinition() {
// create the delete operation
ndbOperation = delete(clusterTransaction);
clusterTransaction.postExecuteCallback(new Runnable() {
public void run() {
freeResourcesAfterExecute();
}
});
}
@Override
public String toString() {
return " delete " + tableName;
}
}
| gpl-2.0 |
sonamuthu/rice-1 | rice-middleware/impl/src/main/java/org/kuali/rice/kew/engine/node/var/schemes/LiteralScheme.java | 1384 | /**
* Copyright 2005-2015 The Kuali Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ecl2.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kuali.rice.kew.engine.node.var.schemes;
import org.kuali.rice.kew.engine.RouteContext;
import org.kuali.rice.kew.engine.node.var.Property;
import org.kuali.rice.kew.engine.node.var.PropertyScheme;
/**
* A property scheme that just returns the literal value of the locator portion.
*
* @author Kuali Rice Team (rice.collab@kuali.org)
*/
public final class LiteralScheme implements PropertyScheme {
public String getName() {
return "literal";
}
public String getShortName() {
return "lit";
}
public Object load(Property property, RouteContext context) {
// just returns the literal text
return property.locator;
}
public String toString() {
return "[LiteralScheme]";
}
}
| apache-2.0 |
jdahlstrom/vaadin.react | uitest/src/main/java/com/vaadin/tests/components/grid/GridInTabSheet.java | 3553 | /*
* Copyright 2000-2014 Vaadin Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.vaadin.tests.components.grid;
import com.vaadin.server.VaadinRequest;
import com.vaadin.tests.components.AbstractTestUI;
import com.vaadin.ui.Button;
import com.vaadin.ui.Button.ClickEvent;
import com.vaadin.ui.Grid;
import com.vaadin.ui.Grid.CellReference;
import com.vaadin.ui.Grid.CellStyleGenerator;
import com.vaadin.ui.Grid.SelectionMode;
import com.vaadin.ui.Label;
import com.vaadin.ui.TabSheet;
public class GridInTabSheet extends AbstractTestUI {
@Override
protected void setup(VaadinRequest request) {
TabSheet sheet = new TabSheet();
final Grid grid = new Grid();
grid.setSelectionMode(SelectionMode.MULTI);
grid.addColumn("count", Integer.class);
for (Integer i = 0; i < 3; ++i) {
grid.addRow(i);
}
sheet.addTab(grid, "Grid");
sheet.addTab(new Label("Hidden"), "Label");
addComponent(sheet);
addComponent(new Button("Add row to Grid", new Button.ClickListener() {
private Integer k = 0;
@Override
public void buttonClick(ClickEvent event) {
grid.addRow(100 + (k++));
}
}));
addComponent(new Button("Remove row from Grid",
new Button.ClickListener() {
private Integer k = 0;
@Override
public void buttonClick(ClickEvent event) {
Object firstItemId = grid.getContainerDataSource()
.firstItemId();
if (firstItemId != null) {
grid.getContainerDataSource().removeItem(
firstItemId);
}
}
}));
addComponent(new Button("Add CellStyleGenerator",
new Button.ClickListener() {
@Override
public void buttonClick(ClickEvent event) {
grid.setCellStyleGenerator(new CellStyleGenerator() {
@Override
public String getStyle(CellReference cellReference) {
int rowIndex = ((Integer) cellReference
.getItemId()).intValue();
Object propertyId = cellReference
.getPropertyId();
if (rowIndex % 4 == 1) {
return null;
} else if (rowIndex % 4 == 3
&& "Column 1".equals(propertyId)) {
return null;
}
return propertyId.toString().replace(' ', '_');
}
});
}
}));
}
}
| apache-2.0 |
zstackorg/zstack | header/src/main/java/org/zstack/header/configuration/PythonApiBindingWriter.java | 134 | package org.zstack.header.configuration;
/**
*/
public interface PythonApiBindingWriter {
void writePython(StringBuilder sb);
}
| apache-2.0 |
azhagiripanneerselvam/jenkins-demo | target/greenhouse-1.0.0.BUILD-SNAPSHOT/WEB-INF/classes/com/springsource/greenhouse/invite/InviteAcceptAction.java | 1730 | /*
* Copyright 2010 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.springsource.greenhouse.invite;
import org.joda.time.DateTime;
import com.springsource.greenhouse.account.Account;
import com.springsource.greenhouse.activity.action.Action;
import com.springsource.greenhouse.utils.Location;
/**
* Indicates that a invitee has accepted his or her invite by signing up as a member of our community.
* @author Keith Donald
*/
public final class InviteAcceptAction extends Action {
private final Long sentBy;
private final DateTime sentTime;
public InviteAcceptAction(Long id, DateTime time, Account account, Location location, Long sentBy, DateTime sentTime) {
super(id, time, account, location);
this.sentBy = sentBy;
this.sentTime = sentTime;
}
/**
* The id of the member account that sent the invite.
*/
public Long getSentBy() {
return sentBy;
}
/**
* The time the invite was originally sent.
*/
public DateTime getSentTime() {
return sentTime;
}
public String toString() {
// TODO Account.gender needs to be preserved for his/her text
return getAccount().getFullName() + " accepted his Greenhouse invitation";
}
}
| apache-2.0 |
nwnpallewela/devstudio-tooling-esb | plugins/org.wso2.developerstudio.eclipse.gmf.esb/src/org/wso2/developerstudio/eclipse/gmf/esb/MergeNodeSecondInputConnector.java | 487 | /**
* <copyright>
* </copyright>
*
* $Id$
*/
package org.wso2.developerstudio.eclipse.gmf.esb;
/**
* <!-- begin-user-doc -->
* A representation of the model object '<em><b>Merge Node Second Input Connector</b></em>'.
* <!-- end-user-doc -->
*
*
* @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getMergeNodeSecondInputConnector()
* @model
* @generated
*/
public interface MergeNodeSecondInputConnector extends InputConnector {
} // MergeNodeSecondInputConnector
| apache-2.0 |
izzizz/pinot | pinot-transport/src/main/java/com/linkedin/pinot/transport/common/ReplicaSelectionGranularity.java | 1456 | /**
* Copyright (C) 2014-2015 LinkedIn Corp. (pinot-core@linkedin.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.linkedin.pinot.transport.common;
/**
* Determines at what level the selection for nodes (replica) has to happen
*/
public enum ReplicaSelectionGranularity {
/**
* For each segmentId in the request, the replica-selection policy is applied to get the node.
* If the selection policy is random or round-robin, then this granularity would likely increase
* the fan-out of the scatter request since there is a greater chance all replicas will be queried
**/
SEGMENT_ID,
/**
* For each segmentId-group in the request, the replica-selection policy is applied to get the node.
* This will likely reduce the fan-out as all segmentIds in the segmentId group goes to the same service.
* This is assuming the services hosting each segmentId-groups are disjoint.
**/
SEGMENT_ID_SET
}
| apache-2.0 |
apache/flink | flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceWithStreamReshardingTest.java | 14368 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.kinesis.manualtests;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.configuration.ConfigConstants;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.MemorySize;
import org.apache.flink.configuration.TaskManagerOptions;
import org.apache.flink.runtime.testutils.MiniClusterResource;
import org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration;
import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
import org.apache.flink.streaming.connectors.kinesis.testutils.ExactlyOnceValidatingConsumerThread;
import org.apache.flink.streaming.connectors.kinesis.testutils.KinesisShardIdGenerator;
import org.apache.flink.streaming.connectors.kinesis.util.AWSUtil;
import com.amazonaws.services.kinesis.AmazonKinesis;
import com.amazonaws.services.kinesis.model.DescribeStreamResult;
import com.amazonaws.services.kinesis.model.LimitExceededException;
import com.amazonaws.services.kinesis.model.PutRecordsRequest;
import com.amazonaws.services.kinesis.model.PutRecordsRequestEntry;
import com.amazonaws.services.kinesis.model.PutRecordsResult;
import org.apache.commons.lang3.RandomStringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.ByteBuffer;
import java.util.HashSet;
import java.util.Properties;
import java.util.Random;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicReference;
/**
* This test first starts a data generator, producing data into kinesis. Then, it starts a consuming
* topology, ensuring that all records up to a certain point have been seen. While the data
* generator and consuming topology is running, the kinesis stream is resharded two times.
*
* <p>Invocation: --region eu-central-1 --accessKey X --secretKey X
*/
public class ManualExactlyOnceWithStreamReshardingTest {
private static final Logger LOG =
LoggerFactory.getLogger(ManualExactlyOnceWithStreamReshardingTest.class);
static final int TOTAL_EVENT_COUNT =
20000; // a large enough record count so we can test resharding
public static void main(String[] args) throws Exception {
final ParameterTool pt = ParameterTool.fromArgs(args);
LOG.info("Starting exactly once with stream resharding test");
final String streamName = "flink-test-" + UUID.randomUUID().toString();
final String accessKey = pt.getRequired("accessKey");
final String secretKey = pt.getRequired("secretKey");
final String region = pt.getRequired("region");
final Properties configProps = new Properties();
configProps.setProperty(ConsumerConfigConstants.AWS_ACCESS_KEY_ID, accessKey);
configProps.setProperty(ConsumerConfigConstants.AWS_SECRET_ACCESS_KEY, secretKey);
configProps.setProperty(ConsumerConfigConstants.AWS_REGION, region);
configProps.setProperty(ConsumerConfigConstants.SHARD_DISCOVERY_INTERVAL_MILLIS, "0");
final AmazonKinesis client = AWSUtil.createKinesisClient(configProps);
// the stream is first created with 1 shard
client.createStream(streamName, 1);
// wait until stream has been created
DescribeStreamResult status = client.describeStream(streamName);
LOG.info("status {}", status);
while (!status.getStreamDescription().getStreamStatus().equals("ACTIVE")) {
status = client.describeStream(streamName);
LOG.info("Status of stream {}", status);
Thread.sleep(1000);
}
final Configuration flinkConfig = new Configuration();
flinkConfig.set(TaskManagerOptions.MANAGED_MEMORY_SIZE, MemorySize.parse("16m"));
flinkConfig.setString(ConfigConstants.RESTART_STRATEGY_FIXED_DELAY_DELAY, "0 s");
MiniClusterResource flink =
new MiniClusterResource(
new MiniClusterResourceConfiguration.Builder()
.setNumberTaskManagers(1)
.setNumberSlotsPerTaskManager(8)
.setConfiguration(flinkConfig)
.build());
flink.before();
final int flinkPort = flink.getRestAddres().getPort();
try {
// we have to use a manual generator here instead of the FlinkKinesisProducer
// because the FlinkKinesisProducer currently has a problem where records will be resent
// to a shard
// when resharding happens; this affects the consumer exactly-once validation test and
// will never pass
final AtomicReference<Throwable> producerError = new AtomicReference<>();
Runnable manualGenerate =
new Runnable() {
@Override
public void run() {
AmazonKinesis client = AWSUtil.createKinesisClient(configProps);
int count = 0;
final int batchSize = 30;
while (true) {
try {
Thread.sleep(10);
Set<PutRecordsRequestEntry> batch = new HashSet<>();
for (int i = count; i < count + batchSize; i++) {
if (i >= TOTAL_EVENT_COUNT) {
break;
}
batch.add(
new PutRecordsRequestEntry()
.withData(
ByteBuffer.wrap(
((i)
+ "-"
+ RandomStringUtils
.randomAlphabetic(
12))
.getBytes(
ConfigConstants
.DEFAULT_CHARSET)))
.withPartitionKey(
UUID.randomUUID().toString()));
}
count += batchSize;
PutRecordsResult result =
client.putRecords(
new PutRecordsRequest()
.withStreamName(streamName)
.withRecords(batch));
// the putRecords() operation may have failing records; to keep
// this test simple
// instead of retrying on failed records, we simply pass on a
// runtime exception
// and let this test fail
if (result.getFailedRecordCount() > 0) {
producerError.set(
new RuntimeException(
"The producer has failed records in one of the put batch attempts."));
break;
}
if (count >= TOTAL_EVENT_COUNT) {
break;
}
} catch (Exception e) {
producerError.set(e);
}
}
}
};
Thread producerThread = new Thread(manualGenerate);
producerThread.start();
final AtomicReference<Throwable> consumerError = new AtomicReference<>();
Thread consumerThread =
ExactlyOnceValidatingConsumerThread.create(
TOTAL_EVENT_COUNT,
10000,
2,
500,
500,
accessKey,
secretKey,
region,
streamName,
consumerError,
flinkPort,
flinkConfig);
consumerThread.start();
// reshard the Kinesis stream while the producer / and consumers are running
Runnable splitShard =
new Runnable() {
@Override
public void run() {
try {
// first, split shard in the middle of the hash range
Thread.sleep(5000);
LOG.info("Splitting shard ...");
client.splitShard(
streamName,
KinesisShardIdGenerator.generateFromShardOrder(0),
"170141183460469231731687303715884105727");
// wait until the split shard operation finishes updating ...
DescribeStreamResult status;
Random rand = new Random();
do {
status = null;
while (status == null) {
// retry until we get status
try {
status = client.describeStream(streamName);
} catch (LimitExceededException lee) {
LOG.warn(
"LimitExceededException while describing stream ... retrying ...");
Thread.sleep(rand.nextInt(1200));
}
}
} while (!status.getStreamDescription()
.getStreamStatus()
.equals("ACTIVE"));
// then merge again
Thread.sleep(7000);
LOG.info("Merging shards ...");
client.mergeShards(
streamName,
KinesisShardIdGenerator.generateFromShardOrder(1),
KinesisShardIdGenerator.generateFromShardOrder(2));
} catch (InterruptedException iex) {
//
}
}
};
Thread splitShardThread = new Thread(splitShard);
splitShardThread.start();
boolean deadlinePassed = false;
long deadline =
System.currentTimeMillis() + (1000 * 5 * 60); // wait at most for five minutes
// wait until both producer and consumer finishes, or an unexpected error is thrown
while ((consumerThread.isAlive() || producerThread.isAlive())
&& (producerError.get() == null && consumerError.get() == null)) {
Thread.sleep(1000);
if (System.currentTimeMillis() >= deadline) {
LOG.warn("Deadline passed");
deadlinePassed = true;
break; // enough waiting
}
}
if (producerThread.isAlive()) {
producerThread.interrupt();
}
if (consumerThread.isAlive()) {
consumerThread.interrupt();
}
if (producerError.get() != null) {
LOG.info("+++ TEST failed! +++");
throw new RuntimeException("Producer failed", producerError.get());
}
if (consumerError.get() != null) {
LOG.info("+++ TEST failed! +++");
throw new RuntimeException("Consumer failed", consumerError.get());
}
if (!deadlinePassed) {
LOG.info("+++ TEST passed! +++");
} else {
LOG.info("+++ TEST failed! +++");
}
} finally {
client.deleteStream(streamName);
client.shutdown();
// stopping flink
flink.after();
}
}
}
| apache-2.0 |
jdahlstrom/vaadin.react | shared/src/main/java/com/vaadin/shared/ui/video/VideoState.java | 790 | /*
* Copyright 2000-2014 Vaadin Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.vaadin.shared.ui.video;
import com.vaadin.shared.ui.AbstractMediaState;
public class VideoState extends AbstractMediaState {
{
primaryStyleName = "v-video";
}
}
| apache-2.0 |
leehz/BlackLight | blacklight-base/src/main/java/info/papdt/blacklight/model/RepostListModel.java | 2462 | /*
* Copyright (C) 2014 Peter Cai
*
* This file is part of BlackLight
*
* BlackLight is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* BlackLight is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with BlackLight. If not, see <http://www.gnu.org/licenses/>.
*/
package info.papdt.blacklight.model;
import android.os.Parcel;
import android.os.Parcelable;
import java.util.ArrayList;
import java.util.List;
/*
A list of reposts
*/
public class RepostListModel extends MessageListModel
{
private List<MessageModel> reposts = new ArrayList<MessageModel>();
@Override
public int getSize() {
return reposts.size();
}
@Override
public MessageModel get(int position) {
return reposts.get(position);
}
@Override
public List<? extends MessageModel> getList() {
return reposts;
}
@Override
public void addAll(boolean toTop, MessageListModel values) {
if (values != null && values.getSize() > 0) {
for (MessageModel msg : values.getList()) {
if (!reposts.contains(msg)) {
reposts.add(toTop ? values.getList().indexOf(msg) : reposts.size(), msg);
}
}
total_number = values.total_number;
}
}
@Override
public void addAll(boolean toTop, boolean friendsOnly, MessageListModel values, String myUid) {
addAll(toTop, values);
}
@Override
public void writeToParcel(Parcel dest, int flags) {
dest.writeInt(total_number);
dest.writeString(previous_cursor);
dest.writeString(next_cursor);
dest.writeTypedList(reposts);
}
public static final Parcelable.Creator<MessageListModel> CREATOR = new Parcelable.Creator<MessageListModel>() {
@Override
public RepostListModel createFromParcel(Parcel in) {
RepostListModel ret = new RepostListModel();
ret.total_number = in.readInt();
ret.previous_cursor = in.readString();
ret.next_cursor = in.readString();
in.readTypedList(ret.reposts, MessageModel.CREATOR);
return ret;
}
@Override
public RepostListModel[] newArray(int size) {
return new RepostListModel[size];
}
};
}
| gpl-3.0 |
nishantmonu51/druid | core/src/test/java/org/apache/druid/guice/ConditionalMultibindTest.java | 13137 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.guice;
import com.google.common.base.Predicates;
import com.google.common.collect.ImmutableSet;
import com.google.inject.Binder;
import com.google.inject.BindingAnnotation;
import com.google.inject.Guice;
import com.google.inject.Inject;
import com.google.inject.Injector;
import com.google.inject.Key;
import com.google.inject.Module;
import com.google.inject.TypeLiteral;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import java.util.HashSet;
import java.util.Properties;
import java.util.Set;
/**
*/
public class ConditionalMultibindTest
{
private static final String ANIMAL_TYPE = "animal.type";
private Properties props;
@Before
public void setUp()
{
props = new Properties();
}
@Test
public void testMultiConditionalBind_cat()
{
props.setProperty("animal.type", "cat");
Injector injector = Guice.createInjector(new Module()
{
@Override
public void configure(Binder binder)
{
ConditionalMultibind.create(props, binder, Animal.class)
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("cat"), Cat.class)
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("dog"), Dog.class);
}
});
Set<Animal> animalSet = injector.getInstance(Key.get(new TypeLiteral<Set<Animal>>()
{
}));
Assert.assertEquals(1, animalSet.size());
Assert.assertEquals(animalSet, ImmutableSet.<Animal>of(new Cat()));
}
@Test
public void testMultiConditionalBind_cat_dog()
{
props.setProperty("animal.type", "pets");
Injector injector = Guice.createInjector(new Module()
{
@Override
public void configure(Binder binder)
{
ConditionalMultibind.create(props, binder, Animal.class)
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("pets"), Cat.class)
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("pets"), Dog.class);
}
});
Set<Animal> animalSet = injector.getInstance(Key.get(new TypeLiteral<Set<Animal>>()
{
}));
Assert.assertEquals(2, animalSet.size());
Assert.assertEquals(animalSet, ImmutableSet.of(new Cat(), new Dog()));
}
@Test
public void testMultiConditionalBind_cat_dog_non_continuous_syntax()
{
props.setProperty("animal.type", "pets");
Injector injector = Guice.createInjector(new Module()
{
@Override
public void configure(Binder binder)
{
ConditionalMultibind.create(props, binder, Animal.class)
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("pets"), Cat.class);
ConditionalMultibind.create(props, binder, Animal.class)
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("pets"), Dog.class);
}
});
Set<Animal> animalSet = injector.getInstance(Key.get(new TypeLiteral<Set<Animal>>()
{
}));
Assert.assertEquals(2, animalSet.size());
Assert.assertEquals(animalSet, ImmutableSet.of(new Cat(), new Dog()));
}
@Test
public void testMultiConditionalBind_multiple_modules()
{
props.setProperty("animal.type", "pets");
Injector injector = Guice.createInjector(
new Module()
{
@Override
public void configure(Binder binder)
{
ConditionalMultibind.create(props, binder, Animal.class)
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("pets"), Cat.class)
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("pets"), Dog.class);
}
},
new Module()
{
@Override
public void configure(Binder binder)
{
ConditionalMultibind.create(props, binder, Animal.class)
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("not_match"), Tiger.class)
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("pets"), Fish.class);
}
}
);
Set<Animal> animalSet = injector.getInstance(Key.get(new TypeLiteral<Set<Animal>>()
{
}));
Assert.assertEquals(3, animalSet.size());
Assert.assertEquals(animalSet, ImmutableSet.of(new Cat(), new Dog(), new Fish()));
}
@Test
public void testMultiConditionalBind_multiple_modules_with_annotation()
{
props.setProperty("animal.type", "pets");
Injector injector = Guice.createInjector(
new Module()
{
@Override
public void configure(Binder binder)
{
ConditionalMultibind.create(props, binder, Animal.class, SanDiego.class)
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("pets"), Cat.class)
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("pets"), Dog.class);
}
},
new Module()
{
@Override
public void configure(Binder binder)
{
ConditionalMultibind.create(props, binder, Animal.class, SanDiego.class)
.addBinding(new Bird())
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("pets"), Tiger.class);
ConditionalMultibind.create(props, binder, Animal.class, SanJose.class)
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("pets"), Fish.class);
}
}
);
Set<Animal> animalSet_1 = injector.getInstance(Key.get(new TypeLiteral<Set<Animal>>()
{
}, SanDiego.class));
Assert.assertEquals(4, animalSet_1.size());
Assert.assertEquals(animalSet_1, ImmutableSet.of(new Bird(), new Cat(), new Dog(), new Tiger()));
Set<Animal> animalSet_2 = injector.getInstance(Key.get(new TypeLiteral<Set<Animal>>()
{
}, SanJose.class));
Assert.assertEquals(1, animalSet_2.size());
Assert.assertEquals(animalSet_2, ImmutableSet.<Animal>of(new Fish()));
}
@Test
public void testMultiConditionalBind_inject()
{
props.setProperty("animal.type", "pets");
Injector injector = Guice.createInjector(
new Module()
{
@Override
public void configure(Binder binder)
{
ConditionalMultibind.create(props, binder, Animal.class)
.addBinding(Bird.class)
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("pets"), Cat.class)
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("pets"), Dog.class);
}
},
new Module()
{
@Override
public void configure(Binder binder)
{
ConditionalMultibind.create(props, binder, Animal.class)
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("not_match"), Tiger.class)
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("pets"), Fish.class);
}
}
);
PetShotAvails shop = new PetShotAvails();
injector.injectMembers(shop);
Assert.assertEquals(4, shop.animals.size());
Assert.assertEquals(shop.animals, ImmutableSet.of(new Bird(), new Cat(), new Dog(), new Fish()));
}
@Test
public void testMultiConditionalBind_typeLiteral()
{
props.setProperty("animal.type", "pets");
final Set<Animal> set1 = ImmutableSet.of(new Dog(), new Tiger());
final Set<Animal> set2 = ImmutableSet.of(new Cat(), new Fish());
final Set<Animal> set3 = ImmutableSet.of(new Cat());
final Set<Animal> union = new HashSet<>();
union.addAll(set1);
union.addAll(set2);
final Zoo<Animal> zoo1 = new Zoo<>(set1);
final Zoo<Animal> zoo2 = new Zoo<>();
Injector injector = Guice.createInjector(
new Module()
{
@Override
public void configure(Binder binder)
{
ConditionalMultibind
.create(props, binder, new TypeLiteral<Set<Animal>>() {})
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("pets"), set1)
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("pets"), set2);
ConditionalMultibind
.create(props, binder, new TypeLiteral<Zoo<Animal>>() {})
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("pets"), zoo1);
}
},
new Module()
{
@Override
public void configure(Binder binder)
{
ConditionalMultibind
.create(props, binder, new TypeLiteral<Set<Animal>>() {})
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("pets"), set3);
ConditionalMultibind
.create(props, binder, new TypeLiteral<Set<Animal>>() {}, SanDiego.class)
.addConditionBinding(ANIMAL_TYPE, Predicates.equalTo("pets"), union);
ConditionalMultibind
.create(props, binder, new TypeLiteral<Zoo<Animal>>() {})
.addBinding(new TypeLiteral<Zoo<Animal>>() {});
}
}
);
Set<Set<Animal>> actualAnimalSet = injector.getInstance(Key.get(new TypeLiteral<Set<Set<Animal>>>() {}));
Assert.assertEquals(3, actualAnimalSet.size());
Assert.assertEquals(ImmutableSet.of(set1, set2, set3), actualAnimalSet);
actualAnimalSet = injector.getInstance(Key.get(new TypeLiteral<Set<Set<Animal>>>() {}, SanDiego.class));
Assert.assertEquals(1, actualAnimalSet.size());
Assert.assertEquals(ImmutableSet.of(union), actualAnimalSet);
final Set<Zoo<Animal>> actualZooSet = injector.getInstance(Key.get(new TypeLiteral<Set<Zoo<Animal>>>() {}));
Assert.assertEquals(2, actualZooSet.size());
Assert.assertEquals(ImmutableSet.of(zoo1, zoo2), actualZooSet);
}
abstract static class Animal
{
private final String type;
Animal(String type)
{
this.type = type;
}
@Override
public String toString()
{
return "Animal{" +
"type='" + type + '\'' +
'}';
}
@Override
public boolean equals(Object o)
{
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Animal animal = (Animal) o;
return type != null ? type.equals(animal.type) : animal.type == null;
}
@Override
public int hashCode()
{
return type != null ? type.hashCode() : 0;
}
}
static class PetShotAvails
{
@Inject
Set<Animal> animals;
}
static class Dog extends Animal
{
Dog()
{
super("dog");
}
}
static class Cat extends Animal
{
Cat()
{
super("cat");
}
}
static class Fish extends Animal
{
Fish()
{
super("fish");
}
}
static class Tiger extends Animal
{
Tiger()
{
super("tiger");
}
}
static class Bird extends Animal
{
Bird()
{
super("bird");
}
}
static class Zoo<T>
{
Set<T> animals;
public Zoo()
{
animals = new HashSet<>();
}
public Zoo(Set<T> animals)
{
this.animals = animals;
}
@Override
public boolean equals(Object o)
{
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Zoo<?> zoo = (Zoo<?>) o;
return animals != null ? animals.equals(zoo.animals) : zoo.animals == null;
}
@Override
public int hashCode()
{
return animals != null ? animals.hashCode() : 0;
}
@Override
public String toString()
{
return "Zoo{" +
"animals=" + animals +
'}';
}
}
@Target({ElementType.FIELD, ElementType.PARAMETER, ElementType.METHOD})
@Retention(RetentionPolicy.RUNTIME)
@BindingAnnotation
@interface SanDiego
{
}
@Target({ElementType.FIELD, ElementType.PARAMETER, ElementType.METHOD})
@Retention(RetentionPolicy.RUNTIME)
@BindingAnnotation
@interface SanJose
{
}
}
| apache-2.0 |
nleite/sling | tooling/support/provisioning-model/src/main/java/org/apache/sling/provisioning/model/KeyValueMap.java | 2707 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.sling.provisioning.model;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.TreeMap;
/**
* Helper class to hold key value pairs.
*/
public class KeyValueMap<T>
extends Commentable
implements Iterable<Map.Entry<String, T>> {
/** The map holding the actual key value pairs. */
private final Map<String, T> properties = new TreeMap<String, T>();
/**
* Get an item from the map.
* @param key The key of the item.
* @return The item or {@code null}.
*/
public T get(final String key) {
return this.properties.get(key);
}
/**
* Put an item in the map
* @param key The key of the item.
* @param value The value
*/
public void put(final String key, final T value) {
this.properties.put(key, value);
}
/**
* Remove an item from the map
* @param key The key of the item.
* @return The previously stored value for the key or {@code null}.
* @since 1.1
*/
public T remove(final String key) {
return this.properties.remove(key);
}
/**
* Put all items from the other map in this map
* @param map The other map
*/
public void putAll(final KeyValueMap<T> map) {
this.properties.putAll(map.properties);
}
@Override
public Iterator<Entry<String, T>> iterator() {
return this.properties.entrySet().iterator();
}
/**
* Check whether this map is empty.
* @return {@code true} if the map is empty.
*/
public boolean isEmpty() {
return this.properties.isEmpty();
}
@Override
public String toString() {
return properties.toString();
}
/**
* Get the size of the map.
* @return The size of the map.
* @since 1.1
*/
public int size() {
return this.properties.size();
}
}
| apache-2.0 |
logzio/camel | components/camel-jcr/src/test/java/org/apache/camel/component/jcr/JcrProducerTest.java | 2349 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.jcr;
import javax.jcr.Node;
import javax.jcr.Session;
import org.apache.camel.Exchange;
import org.apache.camel.builder.RouteBuilder;
import org.junit.Test;
public class JcrProducerTest extends JcrRouteTestSupport {
@Test
public void testJcrProducer() throws Exception {
Exchange exchange = createExchangeWithBody("<hello>world!</hello>");
Exchange out = template.send("direct:a", exchange);
assertNotNull(out);
String uuid = out.getOut().getBody(String.class);
Session session = openSession();
try {
Node node = session.getNodeByIdentifier(uuid);
assertNotNull(node);
assertEquals("/home/test/node", node.getPath());
assertEquals("<hello>world!</hello>", node.getProperty("my.contents.property").getString());
} finally {
if (session != null && session.isLive()) {
session.logout();
}
}
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
// START SNIPPET: jcr-create-node
from("direct:a").setHeader(JcrConstants.JCR_NODE_NAME, constant("node"))
.setHeader("my.contents.property", body())
.to("jcr://user:pass@repository/home/test");
// END SNIPPET: jcr-create-node
}
};
}
}
| apache-2.0 |
kuzemchik/presto | presto-hive/src/main/java/com/facebook/presto/hive/orc/OrcPageSourceFactory.java | 7596 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.hive.orc;
import com.facebook.presto.hive.HiveColumnHandle;
import com.facebook.presto.hive.HivePageSourceFactory;
import com.facebook.presto.hive.HivePartitionKey;
import com.facebook.presto.orc.OrcDataSource;
import com.facebook.presto.orc.OrcPredicate;
import com.facebook.presto.orc.OrcReader;
import com.facebook.presto.orc.OrcRecordReader;
import com.facebook.presto.orc.TupleDomainOrcPredicate;
import com.facebook.presto.orc.TupleDomainOrcPredicate.ColumnReference;
import com.facebook.presto.orc.metadata.MetadataReader;
import com.facebook.presto.orc.metadata.OrcMetadataReader;
import com.facebook.presto.spi.ConnectorPageSource;
import com.facebook.presto.spi.ConnectorSession;
import com.facebook.presto.spi.PrestoException;
import com.facebook.presto.spi.TupleDomain;
import com.facebook.presto.spi.type.Type;
import com.facebook.presto.spi.type.TypeManager;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import io.airlift.units.DataSize;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.io.orc.OrcSerde;
import org.joda.time.DateTimeZone;
import javax.inject.Inject;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.List;
import java.util.Optional;
import java.util.Properties;
import static com.facebook.presto.hive.HiveErrorCode.HIVE_CANNOT_OPEN_SPLIT;
import static com.facebook.presto.hive.HiveErrorCode.HIVE_MISSING_DATA;
import static com.facebook.presto.hive.HiveSessionProperties.getOrcMaxBufferSize;
import static com.facebook.presto.hive.HiveSessionProperties.getOrcMaxMergeDistance;
import static com.facebook.presto.hive.HiveSessionProperties.getOrcStreamBufferSize;
import static com.facebook.presto.hive.HiveSessionProperties.isOptimizedReaderEnabled;
import static com.facebook.presto.hive.HiveUtil.isDeserializerClass;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Strings.nullToEmpty;
import static java.lang.String.format;
public class OrcPageSourceFactory
implements HivePageSourceFactory
{
private final TypeManager typeManager;
@Inject
public OrcPageSourceFactory(TypeManager typeManager)
{
this.typeManager = checkNotNull(typeManager, "typeManager is null");
}
@Override
public Optional<? extends ConnectorPageSource> createPageSource(
Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
Properties schema,
List<HiveColumnHandle> columns,
List<HivePartitionKey> partitionKeys,
TupleDomain<HiveColumnHandle> effectivePredicate,
DateTimeZone hiveStorageTimeZone)
{
if (!isOptimizedReaderEnabled(session)) {
return Optional.empty();
}
if (!isDeserializerClass(schema, OrcSerde.class)) {
return Optional.empty();
}
return Optional.of(createOrcPageSource(
new OrcMetadataReader(),
configuration,
path,
start,
length,
columns,
partitionKeys,
effectivePredicate,
hiveStorageTimeZone,
typeManager,
getOrcMaxMergeDistance(session),
getOrcMaxBufferSize(session),
getOrcStreamBufferSize(session)));
}
public static OrcPageSource createOrcPageSource(MetadataReader metadataReader,
Configuration configuration,
Path path,
long start,
long length,
List<HiveColumnHandle> columns,
List<HivePartitionKey> partitionKeys,
TupleDomain<HiveColumnHandle> effectivePredicate,
DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
DataSize maxMergeDistance,
DataSize maxBufferSize,
DataSize streamBufferSize)
{
OrcDataSource orcDataSource;
try {
FileSystem fileSystem = path.getFileSystem(configuration);
long size = fileSystem.getFileStatus(path).getLen();
FSDataInputStream inputStream = fileSystem.open(path);
orcDataSource = new HdfsOrcDataSource(path.toString(), size, maxMergeDistance, maxBufferSize, streamBufferSize, inputStream);
}
catch (Exception e) {
if (nullToEmpty(e.getMessage()).trim().equals("Filesystem closed") ||
e instanceof FileNotFoundException) {
throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, e);
}
throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, splitError(e, path, start, length), e);
}
ImmutableMap.Builder<Integer, Type> includedColumns = ImmutableMap.builder();
ImmutableList.Builder<ColumnReference<HiveColumnHandle>> columnReferences = ImmutableList.builder();
for (HiveColumnHandle column : columns) {
if (!column.isPartitionKey()) {
Type type = typeManager.getType(column.getTypeSignature());
includedColumns.put(column.getHiveColumnIndex(), type);
columnReferences.add(new ColumnReference<>(column, column.getHiveColumnIndex(), type));
}
}
OrcPredicate predicate = new TupleDomainOrcPredicate<>(effectivePredicate, columnReferences.build());
try {
OrcReader reader = new OrcReader(orcDataSource, metadataReader);
OrcRecordReader recordReader = reader.createRecordReader(
includedColumns.build(),
predicate,
start,
length,
hiveStorageTimeZone);
return new OrcPageSource(
recordReader,
orcDataSource,
partitionKeys,
columns,
hiveStorageTimeZone,
typeManager);
}
catch (Exception e) {
try {
orcDataSource.close();
}
catch (IOException ignored) {
}
if (e instanceof PrestoException) {
throw (PrestoException) e;
}
String message = splitError(e, path, start, length);
if (e.getClass().getSimpleName().equals("BlockMissingException")) {
throw new PrestoException(HIVE_MISSING_DATA, message, e);
}
throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, message, e);
}
}
private static String splitError(Throwable t, Path path, long start, long length)
{
return format("Error opening Hive split %s (offset=%s, length=%s): %s", path, start, length, t.getMessage());
}
}
| apache-2.0 |
liukd/Ninja-1 | Ninja/src/io/github/mthli/Ninja/View/NinjaToast.java | 848 | package io.github.mthli.Ninja.View;
import android.content.Context;
import android.os.Handler;
import android.widget.Toast;
public class NinjaToast {
private static Toast toast;
private static Handler handler = new Handler();
private static Runnable runnable = new Runnable() {
@Override
public void run() {
toast.cancel();
}
};
public static void show(Context context, int stringResId) {
show(context, context.getString(stringResId));
}
public static void show(Context context, String text) {
handler.removeCallbacks(runnable);
if (toast != null) {
toast.setText(text);
} else {
toast = Toast.makeText(context, text, Toast.LENGTH_SHORT);
}
handler.postDelayed(runnable, 2000);
toast.show();
}
}
| apache-2.0 |
andyao/dagger | core/src/test/java/dagger/internal/InstanceFactoryTest.java | 1425 | /*
* Copyright (C) 2014 Google, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dagger.internal;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
import static com.google.common.truth.Truth.assert_;
@RunWith(JUnit4.class)
public final class InstanceFactoryTest {
@Rule public final ExpectedException thrown = ExpectedException.none();
@Test public void instanceFactory() {
Object instance = new Object();
Factory<Object> factory = InstanceFactory.create(instance);
assert_().that(factory.get()).isEqualTo(instance);
assert_().that(factory.get()).isEqualTo(instance);
assert_().that(factory.get()).isEqualTo(instance);
}
@Test public void create_throwsNullPointerException() {
thrown.expect(NullPointerException.class);
InstanceFactory.create(null);
}
}
| apache-2.0 |
unforgiven250/charts4j | src/main/java/com/googlecode/charts4j/parameters/package-info.java | 1363 | /**
*
* The MIT License
*
* Copyright (c) 2011 the original author or authors.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* <b>com.googlecode.charts4j.parameters package for Charts4J internal use only.</b>
*
* @author Julien Chastang (julien.c.chastang at gmail dot com)
*/
package com.googlecode.charts4j.parameters;
| mit |
thklaus/rstudio | src/gwt/src/org/rstudio/studio/client/workbench/views/source/events/CodeBrowserFinishedHandler.java | 845 | /*
* CodeBrowserFinishedHandler.java
*
* Copyright (C) 2009-12 by RStudio, Inc.
*
* Unless you have received this program directly from RStudio pursuant
* to the terms of a commercial license agreement with RStudio, then
* this program is licensed to you under the terms of version 3 of the
* GNU Affero General Public License. This program is distributed WITHOUT
* ANY EXPRESS OR IMPLIED WARRANTY, INCLUDING THOSE OF NON-INFRINGEMENT,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Please refer to the
* AGPL (http://www.gnu.org/licenses/agpl-3.0.txt) for more details.
*
*/
package org.rstudio.studio.client.workbench.views.source.events;
import com.google.gwt.event.shared.EventHandler;
public interface CodeBrowserFinishedHandler extends EventHandler
{
void onCodeBrowserFinished(CodeBrowserFinishedEvent event);
}
| agpl-3.0 |
lynus/hadoop-over-rdma | src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestCapBasedLoadManager.java | 5621 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.TaskStatus.State;
import junit.framework.TestCase;
/**
* Exercise the canAssignMap and canAssignReduce methods in
* CapBasedLoadManager.
*/
public class TestCapBasedLoadManager extends TestCase {
/**
* Returns a running MapTaskStatus.
*/
private TaskStatus getRunningMapTaskStatus() {
TaskStatus ts = new MapTaskStatus();
ts.setRunState(State.RUNNING);
return ts;
}
/**
* Returns a running ReduceTaskStatus.
*/
private TaskStatus getRunningReduceTaskStatus() {
TaskStatus ts = new ReduceTaskStatus();
ts.setRunState(State.RUNNING);
return ts;
}
/**
* Returns a TaskTrackerStatus with the specified statistics.
* @param mapCap The capacity of map tasks
* @param reduceCap The capacity of reduce tasks
* @param runningMap The number of running map tasks
* @param runningReduce The number of running reduce tasks
*/
private TaskTrackerStatus getTaskTrackerStatus(int mapCap, int reduceCap,
int runningMap, int runningReduce) {
List<TaskStatus> ts = new ArrayList<TaskStatus>();
for (int i = 0; i < runningMap; i++) {
ts.add(getRunningMapTaskStatus());
}
for (int i = 0; i < runningReduce; i++) {
ts.add(getRunningReduceTaskStatus());
}
TaskTrackerStatus tracker = new TaskTrackerStatus("tracker",
"tracker_host", 1234, ts, 0, 0, mapCap, reduceCap);
return tracker;
}
/**
* A single test of canAssignMap.
*/
private void oneTestCanAssignMap(float maxDiff, int mapCap, int runningMap,
int totalMapSlots, int totalRunnableMap, int expectedAssigned) {
CapBasedLoadManager manager = new CapBasedLoadManager();
Configuration conf = new Configuration();
conf.setFloat("mapred.fairscheduler.load.max.diff", maxDiff);
manager.setConf(conf);
TaskTrackerStatus ts = getTaskTrackerStatus(mapCap, 1, runningMap, 1);
int numAssigned = 0;
while (manager.canAssignMap(ts, totalRunnableMap, totalMapSlots, numAssigned)) {
numAssigned++;
}
assertEquals( "When maxDiff=" + maxDiff + ", with totalRunnableMap="
+ totalRunnableMap + " and totalMapSlots=" + totalMapSlots
+ ", a tracker with runningMap=" + runningMap + " and mapCap="
+ mapCap + " should be able to assign " + expectedAssigned + " maps",
expectedAssigned, numAssigned);
}
/**
* Test canAssignMap method.
*/
public void testCanAssignMap() {
oneTestCanAssignMap(0.0f, 5, 0, 50, 1, 1);
oneTestCanAssignMap(0.0f, 5, 1, 50, 10, 0);
// 20% load + 20% diff = 40% of available slots, but rounds
// up with floating point error: so we get 3/5 slots on TT.
// 1 already taken, so assigns 2 more
oneTestCanAssignMap(0.2f, 5, 1, 50, 10, 2);
oneTestCanAssignMap(0.0f, 5, 1, 50, 11, 1);
oneTestCanAssignMap(0.0f, 5, 2, 50, 11, 0);
oneTestCanAssignMap(0.3f, 5, 2, 50, 6, 1);
oneTestCanAssignMap(1.0f, 5, 5, 50, 50, 0);
}
/**
* A single test of canAssignReduce.
*/
private void oneTestCanAssignReduce(float maxDiff, int reduceCap,
int runningReduce, int totalReduceSlots, int totalRunnableReduce,
int expectedAssigned) {
CapBasedLoadManager manager = new CapBasedLoadManager();
Configuration conf = new Configuration();
conf.setFloat("mapred.fairscheduler.load.max.diff", maxDiff);
manager.setConf(conf);
TaskTrackerStatus ts = getTaskTrackerStatus(1, reduceCap, 1,
runningReduce);
int numAssigned = 0;
while (manager.canAssignReduce(ts, totalRunnableReduce, totalReduceSlots, numAssigned)) {
numAssigned++;
}
assertEquals( "When maxDiff=" + maxDiff + ", with totalRunnableReduce="
+ totalRunnableReduce + " and totalReduceSlots=" + totalReduceSlots
+ ", a tracker with runningReduce=" + runningReduce + " and reduceCap="
+ reduceCap + " should be able to assign " + expectedAssigned + " reduces",
expectedAssigned, numAssigned);
}
/**
* Test canAssignReduce method.
*/
public void testCanAssignReduce() {
oneTestCanAssignReduce(0.0f, 5, 0, 50, 1, 1);
oneTestCanAssignReduce(0.0f, 5, 1, 50, 10, 0);
// 20% load + 20% diff = 40% of available slots, but rounds
// up with floating point error: so we get 3/5 slots on TT.
// 1 already taken, so assigns 2 more
oneTestCanAssignReduce(0.2f, 5, 1, 50, 10, 2);
oneTestCanAssignReduce(0.0f, 5, 1, 50, 11, 1);
oneTestCanAssignReduce(0.0f, 5, 2, 50, 11, 0);
oneTestCanAssignReduce(0.3f, 5, 2, 50, 6, 1);
oneTestCanAssignReduce(1.0f, 5, 5, 50, 50, 0);
}
}
| apache-2.0 |
mdeinum/spring-boot | spring-boot-tests/spring-boot-smoke-tests/spring-boot-smoke-test-hateoas/src/main/java/smoketest/hateoas/web/CustomerController.java | 2437 | /*
* Copyright 2012-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package smoketest.hateoas.web;
import smoketest.hateoas.domain.Customer;
import smoketest.hateoas.domain.CustomerRepository;
import org.springframework.hateoas.CollectionModel;
import org.springframework.hateoas.EntityModel;
import org.springframework.hateoas.server.EntityLinks;
import org.springframework.hateoas.server.ExposesResourceFor;
import org.springframework.http.HttpEntity;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
@Controller
@RequestMapping("/customers")
@ExposesResourceFor(Customer.class)
public class CustomerController {
private final CustomerRepository repository;
private final EntityLinks entityLinks;
public CustomerController(CustomerRepository repository, EntityLinks entityLinks) {
this.repository = repository;
this.entityLinks = entityLinks;
}
@GetMapping(produces = MediaType.APPLICATION_JSON_VALUE)
HttpEntity<CollectionModel<Customer>> showCustomers() {
CollectionModel<Customer> resources = CollectionModel.of(this.repository.findAll());
resources.add(this.entityLinks.linkToCollectionResource(Customer.class));
return new ResponseEntity<>(resources, HttpStatus.OK);
}
@GetMapping(path = "/{id}", produces = MediaType.APPLICATION_JSON_VALUE)
HttpEntity<EntityModel<Customer>> showCustomer(@PathVariable Long id) {
EntityModel<Customer> resource = EntityModel.of(this.repository.findOne(id));
resource.add(this.entityLinks.linkToItemResource(Customer.class, id));
return new ResponseEntity<>(resource, HttpStatus.OK);
}
}
| apache-2.0 |
UnrememberMe/pants | tests/java/org/pantsbuild/tools/junit/lib/NotATestInterface.java | 542 | // Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
package org.pantsbuild.tools.junit.lib;
import org.junit.Test;
/**
* This test is intentionally under a java_library() BUILD target so it will not be run
* on its own. It is run by the ConsoleRunnerTest suite to test ConsoleRunnerImpl.
*/
public interface NotATestInterface {
@Test
// Even though annotated with @Test, this is an interface so the test shouldn't be invoked
public void natif1();
}
| apache-2.0 |
roele/sling | bundles/extensions/discovery/commons/src/main/java/org/apache/sling/discovery/commons/providers/spi/ClusterSyncService.java | 3832 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sling.discovery.commons.providers.spi;
import org.apache.sling.discovery.commons.providers.BaseTopologyView;
/**
* The ClusterSyncService can be used to establish strong
* consistency with the underlying (eventually consistent) repository in use.
* <p>
* The issue is described in length in SLING-4627 - the short
* version is composed of two different factors:
* <ul>
* <li>concurrency of discovery service and its listeners on the
* different instances: upon a change in the topology it is
* important that one listener doesn't do activity based on
* an older incarnation of the topologyView than another listener
* on another instance. they should change from one view to the
* next view based on the same repository state.</li>
* </li>
* <li>when an instance leaves the cluster (eg crashes), then
* depending on the repository it might have left a backlog around
* which would yet have to be processed and which could contain
* relevant topology-dependent data that should be waited for
* to settle before the topology-dependent activity can continue
* </li>
* </ul>
* Both of these two aspects are handled by this ClusterSyncService.
* The former one by introducing a 'sync token' that gets written
* to the repository and on receiving it by the peers they know
* that the writing instance is aware of the ongoing change, that
* the writing instance has sent out TOPOLOGY_CHANGING and that
* the receiving instance has seen all changes that the writing
* instance did prior to sending a TOPOLOGY_CHANGING.
* The latter aspect is achieved by making use of the underlying
* repository: eg on Oak the 'discovery lite' descriptor is
* used to determine if any instance not part of the new view
* is still being deactivated (eg has backlog). So this second
* part is repository dependent.
*/
public interface ClusterSyncService {
/**
* Starts the synchronization process and calls the provided
* callback upon completion.
* <p>
* sync() is not thread-safe and should not be invoked
* concurrently.
* <p>
* If sync() gets called before a previous invocation finished,
* that previous invocation will be discarded, ie the callback
* of the previous invocation will no longer be called.
* <p>
* The synchronization process consists of making sure that
* the repository has processed any potential backlog of instances
* that are no longer part of the provided, new view. Plus
* it writes a 'sync-token' to a well-defined location, with
* all peers doing the same, and upon seeing all other sync-tokens
* declares successful completion - at which point it calls the
* callback.run().
* @param view the view which all instances in the local cluster
* should agree on having seen
* @param callback the runnable which should be called after
* successful syncing
*/
void sync(BaseTopologyView view, Runnable callback);
void cancelSync();
}
| apache-2.0 |
adrian-galbenus/gateway | management/src/main/java/org/kaazing/gateway/management/jmx/ServiceDefaultsConfigurationMXBean.java | 844 | /**
* Copyright 2007-2016, Kaazing Corporation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kaazing.gateway.management.jmx;
public interface ServiceDefaultsConfigurationMXBean {
int getId();
String getAcceptOptions();
String getConnectOptions();
String getMimeMappings();
}
| apache-2.0 |
HappyYang/java-design-patterns | model-view-controller/src/main/java/com/iluwatar/modelviewcontroller/GiantModel.java | 904 | package com.iluwatar.modelviewcontroller;
/**
*
* GiantModel contains the giant data
*
*/
public class GiantModel {
private Health health;
private Fatigue fatigue;
private Nourishment nourishment;
GiantModel(Health health, Fatigue fatigue, Nourishment nourishment) {
this.health = health;
this.fatigue = fatigue;
this.nourishment = nourishment;
}
public Health getHealth() {
return health;
}
public void setHealth(Health health) {
this.health = health;
}
public Fatigue getFatigue() {
return fatigue;
}
public void setFatigue(Fatigue fatigue) {
this.fatigue = fatigue;
}
public Nourishment getNourishment() {
return nourishment;
}
public void setNourishment(Nourishment nourishment) {
this.nourishment = nourishment;
}
@Override
public String toString() {
return String.format("The giant looks %s, %s and %s.", health, fatigue, nourishment);
}
}
| mit |
fred84/elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java | 9440 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.pipeline.bucketselector;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.TreeMap;
import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregator.Parser.BUCKETS_PATH;
import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregator.Parser.GAP_POLICY;
public class BucketSelectorPipelineAggregationBuilder extends AbstractPipelineAggregationBuilder<BucketSelectorPipelineAggregationBuilder> {
public static final String NAME = "bucket_selector";
private final Map<String, String> bucketsPathsMap;
private Script script;
private GapPolicy gapPolicy = GapPolicy.SKIP;
public BucketSelectorPipelineAggregationBuilder(String name, Map<String, String> bucketsPathsMap, Script script) {
super(name, NAME, new TreeMap<>(bucketsPathsMap).values().toArray(new String[bucketsPathsMap.size()]));
this.bucketsPathsMap = bucketsPathsMap;
this.script = script;
}
public BucketSelectorPipelineAggregationBuilder(String name, Script script, String... bucketsPaths) {
this(name, convertToBucketsPathMap(bucketsPaths), script);
}
/**
* Read from a stream.
*/
public BucketSelectorPipelineAggregationBuilder(StreamInput in) throws IOException {
super(in, NAME);
int mapSize = in.readVInt();
bucketsPathsMap = new HashMap<>(mapSize);
for (int i = 0; i < mapSize; i++) {
bucketsPathsMap.put(in.readString(), in.readString());
}
script = new Script(in);
gapPolicy = GapPolicy.readFrom(in);
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
out.writeVInt(bucketsPathsMap.size());
for (Entry<String, String> e : bucketsPathsMap.entrySet()) {
out.writeString(e.getKey());
out.writeString(e.getValue());
}
script.writeTo(out);
gapPolicy.writeTo(out);
}
private static Map<String, String> convertToBucketsPathMap(String[] bucketsPaths) {
Map<String, String> bucketsPathsMap = new HashMap<>();
for (int i = 0; i < bucketsPaths.length; i++) {
bucketsPathsMap.put("_value" + i, bucketsPaths[i]);
}
return bucketsPathsMap;
}
/**
* Sets the gap policy to use for this aggregation.
*/
public BucketSelectorPipelineAggregationBuilder gapPolicy(GapPolicy gapPolicy) {
if (gapPolicy == null) {
throw new IllegalArgumentException("[gapPolicy] must not be null: [" + name + "]");
}
this.gapPolicy = gapPolicy;
return this;
}
/**
* Gets the gap policy to use for this aggregation.
*/
public GapPolicy gapPolicy() {
return gapPolicy;
}
@Override
protected PipelineAggregator createInternal(Map<String, Object> metaData) throws IOException {
return new BucketSelectorPipelineAggregator(name, bucketsPathsMap, script, gapPolicy, metaData);
}
@Override
protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(BUCKETS_PATH.getPreferredName(), bucketsPathsMap);
builder.field(Script.SCRIPT_PARSE_FIELD.getPreferredName(), script);
builder.field(GAP_POLICY.getPreferredName(), gapPolicy.getName());
return builder;
}
public static BucketSelectorPipelineAggregationBuilder parse(String reducerName, XContentParser parser) throws IOException {
XContentParser.Token token;
Script script = null;
String currentFieldName = null;
Map<String, String> bucketsPathsMap = null;
GapPolicy gapPolicy = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.VALUE_STRING) {
if (BUCKETS_PATH.match(currentFieldName)) {
bucketsPathsMap = new HashMap<>();
bucketsPathsMap.put("_value", parser.text());
} else if (GAP_POLICY.match(currentFieldName)) {
gapPolicy = GapPolicy.parse(parser.text(), parser.getTokenLocation());
} else if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) {
script = Script.parse(parser);
} else {
throw new ParsingException(parser.getTokenLocation(),
"Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "].");
}
} else if (token == XContentParser.Token.START_ARRAY) {
if (BUCKETS_PATH.match(currentFieldName)) {
List<String> paths = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
String path = parser.text();
paths.add(path);
}
bucketsPathsMap = new HashMap<>();
for (int i = 0; i < paths.size(); i++) {
bucketsPathsMap.put("_value" + i, paths.get(i));
}
} else {
throw new ParsingException(parser.getTokenLocation(),
"Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "].");
}
} else if (token == XContentParser.Token.START_OBJECT) {
if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) {
script = Script.parse(parser);
} else if (BUCKETS_PATH.match(currentFieldName)) {
Map<String, Object> map = parser.map();
bucketsPathsMap = new HashMap<>();
for (Map.Entry<String, Object> entry : map.entrySet()) {
bucketsPathsMap.put(entry.getKey(), String.valueOf(entry.getValue()));
}
} else {
throw new ParsingException(parser.getTokenLocation(),
"Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "].");
}
} else {
throw new ParsingException(parser.getTokenLocation(), "Unexpected token " + token + " in [" + reducerName + "].");
}
}
if (bucketsPathsMap == null) {
throw new ParsingException(parser.getTokenLocation(), "Missing required field [" + BUCKETS_PATH.getPreferredName()
+ "] for bucket_selector aggregation [" + reducerName + "]");
}
if (script == null) {
throw new ParsingException(parser.getTokenLocation(), "Missing required field [" + Script.SCRIPT_PARSE_FIELD.getPreferredName()
+ "] for bucket_selector aggregation [" + reducerName + "]");
}
BucketSelectorPipelineAggregationBuilder factory =
new BucketSelectorPipelineAggregationBuilder(reducerName, bucketsPathsMap, script);
if (gapPolicy != null) {
factory.gapPolicy(gapPolicy);
}
return factory;
}
@Override
protected boolean overrideBucketsPath() {
return true;
}
@Override
protected int doHashCode() {
return Objects.hash(bucketsPathsMap, script, gapPolicy);
}
@Override
protected boolean doEquals(Object obj) {
BucketSelectorPipelineAggregationBuilder other = (BucketSelectorPipelineAggregationBuilder) obj;
return Objects.equals(bucketsPathsMap, other.bucketsPathsMap) && Objects.equals(script, other.script)
&& Objects.equals(gapPolicy, other.gapPolicy);
}
@Override
public String getWriteableName() {
return NAME;
}
}
| apache-2.0 |
goodwinnk/intellij-community | java/java-tests/testData/inspection/deadCode/localVariables/src/A.java | 118 | class A {
public static void main(String[] args) {
int i = 0;
@SuppressWarnings("unused") int j = 0;
}
}
| apache-2.0 |
cooldoger/cassandra | src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java | 24266 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.compaction;
import java.util.*;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.collect.*;
import com.google.common.primitives.Doubles;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.cassandra.io.sstable.metadata.StatsMetadata;
import org.apache.cassandra.schema.CompactionParams;
import org.apache.cassandra.schema.TableMetadata;
import org.apache.cassandra.config.Config;
import org.apache.cassandra.db.ColumnFamilyStore;
import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
import org.apache.cassandra.db.rows.UnfilteredRowIterator;
import org.apache.cassandra.dht.Range;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.io.sstable.ISSTableScanner;
import org.apache.cassandra.io.sstable.format.SSTableReader;
public class LeveledCompactionStrategy extends AbstractCompactionStrategy
{
private static final Logger logger = LoggerFactory.getLogger(LeveledCompactionStrategy.class);
private static final String SSTABLE_SIZE_OPTION = "sstable_size_in_mb";
private static final boolean tolerateSstableSize = Boolean.getBoolean(Config.PROPERTY_PREFIX + "tolerate_sstable_size");
private static final String LEVEL_FANOUT_SIZE_OPTION = "fanout_size";
private static final String SINGLE_SSTABLE_UPLEVEL_OPTION = "single_sstable_uplevel";
public static final int DEFAULT_LEVEL_FANOUT_SIZE = 10;
@VisibleForTesting
final LeveledManifest manifest;
private final int maxSSTableSizeInMB;
private final int levelFanoutSize;
private final boolean singleSSTableUplevel;
public LeveledCompactionStrategy(ColumnFamilyStore cfs, Map<String, String> options)
{
super(cfs, options);
int configuredMaxSSTableSize = 160;
int configuredLevelFanoutSize = DEFAULT_LEVEL_FANOUT_SIZE;
boolean configuredSingleSSTableUplevel = false;
SizeTieredCompactionStrategyOptions localOptions = new SizeTieredCompactionStrategyOptions(options);
if (options != null)
{
if (options.containsKey(SSTABLE_SIZE_OPTION))
{
configuredMaxSSTableSize = Integer.parseInt(options.get(SSTABLE_SIZE_OPTION));
if (!tolerateSstableSize)
{
if (configuredMaxSSTableSize >= 1000)
logger.warn("Max sstable size of {}MB is configured for {}.{}; having a unit of compaction this large is probably a bad idea",
configuredMaxSSTableSize, cfs.name, cfs.getTableName());
if (configuredMaxSSTableSize < 50)
logger.warn("Max sstable size of {}MB is configured for {}.{}. Testing done for CASSANDRA-5727 indicates that performance improves up to 160MB",
configuredMaxSSTableSize, cfs.name, cfs.getTableName());
}
}
if (options.containsKey(LEVEL_FANOUT_SIZE_OPTION))
{
configuredLevelFanoutSize = Integer.parseInt(options.get(LEVEL_FANOUT_SIZE_OPTION));
}
if (options.containsKey(SINGLE_SSTABLE_UPLEVEL_OPTION))
{
configuredSingleSSTableUplevel = Boolean.parseBoolean(options.get(SINGLE_SSTABLE_UPLEVEL_OPTION));
}
}
maxSSTableSizeInMB = configuredMaxSSTableSize;
levelFanoutSize = configuredLevelFanoutSize;
singleSSTableUplevel = configuredSingleSSTableUplevel;
manifest = new LeveledManifest(cfs, this.maxSSTableSizeInMB, this.levelFanoutSize, localOptions);
logger.trace("Created {}", manifest);
}
public int getLevelSize(int i)
{
return manifest.getLevelSize(i);
}
public int[] getAllLevelSize()
{
return manifest.getAllLevelSize();
}
@Override
public void startup()
{
manifest.calculateLastCompactedKeys();
super.startup();
}
/**
* the only difference between background and maximal in LCS is that maximal is still allowed
* (by explicit user request) even when compaction is disabled.
*/
@SuppressWarnings("resource") // transaction is closed by AbstractCompactionTask::execute
public AbstractCompactionTask getNextBackgroundTask(int gcBefore)
{
Collection<SSTableReader> previousCandidate = null;
while (true)
{
OperationType op;
LeveledManifest.CompactionCandidate candidate = manifest.getCompactionCandidates();
if (candidate == null)
{
// if there is no sstable to compact in standard way, try compacting based on droppable tombstone ratio
SSTableReader sstable = findDroppableSSTable(gcBefore);
if (sstable == null)
{
logger.trace("No compaction necessary for {}", this);
return null;
}
candidate = new LeveledManifest.CompactionCandidate(Collections.singleton(sstable),
sstable.getSSTableLevel(),
getMaxSSTableBytes());
op = OperationType.TOMBSTONE_COMPACTION;
}
else
{
op = OperationType.COMPACTION;
}
// Already tried acquiring references without success. It means there is a race with
// the tracker but candidate SSTables were not yet replaced in the compaction strategy manager
if (candidate.sstables.equals(previousCandidate))
{
logger.warn("Could not acquire references for compacting SSTables {} which is not a problem per se," +
"unless it happens frequently, in which case it must be reported. Will retry later.",
candidate.sstables);
return null;
}
LifecycleTransaction txn = cfs.getTracker().tryModify(candidate.sstables, OperationType.COMPACTION);
if (txn != null)
{
AbstractCompactionTask newTask;
if (!singleSSTableUplevel || op == OperationType.TOMBSTONE_COMPACTION || txn.originals().size() > 1)
newTask = new LeveledCompactionTask(cfs, txn, candidate.level, gcBefore, candidate.maxSSTableBytes, false);
else
newTask = new SingleSSTableLCSTask(cfs, txn, candidate.level);
newTask.setCompactionType(op);
return newTask;
}
previousCandidate = candidate.sstables;
}
}
@SuppressWarnings("resource") // transaction is closed by AbstractCompactionTask::execute
public synchronized Collection<AbstractCompactionTask> getMaximalTask(int gcBefore, boolean splitOutput)
{
Iterable<SSTableReader> sstables = manifest.getAllSSTables();
Iterable<SSTableReader> filteredSSTables = filterSuspectSSTables(sstables);
if (Iterables.isEmpty(sstables))
return null;
LifecycleTransaction txn = cfs.getTracker().tryModify(filteredSSTables, OperationType.COMPACTION);
if (txn == null)
return null;
return Arrays.<AbstractCompactionTask>asList(new LeveledCompactionTask(cfs, txn, 0, gcBefore, getMaxSSTableBytes(), true));
}
@Override
@SuppressWarnings("resource") // transaction is closed by AbstractCompactionTask::execute
public AbstractCompactionTask getUserDefinedTask(Collection<SSTableReader> sstables, int gcBefore)
{
if (sstables.isEmpty())
return null;
LifecycleTransaction transaction = cfs.getTracker().tryModify(sstables, OperationType.COMPACTION);
if (transaction == null)
{
logger.trace("Unable to mark {} for compaction; probably a background compaction got to it first. You can disable background compactions temporarily if this is a problem", sstables);
return null;
}
int level = sstables.size() > 1 ? 0 : sstables.iterator().next().getSSTableLevel();
return new LeveledCompactionTask(cfs, transaction, level, gcBefore, level == 0 ? Long.MAX_VALUE : getMaxSSTableBytes(), false);
}
@Override
public AbstractCompactionTask getCompactionTask(LifecycleTransaction txn, int gcBefore, long maxSSTableBytes)
{
assert txn.originals().size() > 0;
int level = -1;
// if all sstables are in the same level, we can set that level:
for (SSTableReader sstable : txn.originals())
{
if (level == -1)
level = sstable.getSSTableLevel();
if (level != sstable.getSSTableLevel())
level = 0;
}
return new LeveledCompactionTask(cfs, txn, level, gcBefore, maxSSTableBytes, false);
}
/**
* Leveled compaction strategy has guarantees on the data contained within each level so we
* have to make sure we only create groups of SSTables with members from the same level.
* This way we won't end up creating invalid sstables during anti-compaction.
* @param ssTablesToGroup
* @return Groups of sstables from the same level
*/
@Override
public Collection<Collection<SSTableReader>> groupSSTablesForAntiCompaction(Collection<SSTableReader> ssTablesToGroup)
{
int groupSize = 2;
Map<Integer, Collection<SSTableReader>> sstablesByLevel = new HashMap<>();
for (SSTableReader sstable : ssTablesToGroup)
{
Integer level = sstable.getSSTableLevel();
Collection<SSTableReader> sstablesForLevel = sstablesByLevel.get(level);
if (sstablesForLevel == null)
{
sstablesForLevel = new ArrayList<SSTableReader>();
sstablesByLevel.put(level, sstablesForLevel);
}
sstablesForLevel.add(sstable);
}
Collection<Collection<SSTableReader>> groupedSSTables = new ArrayList<>();
for (Collection<SSTableReader> levelOfSSTables : sstablesByLevel.values())
{
Collection<SSTableReader> currGroup = new ArrayList<>(groupSize);
for (SSTableReader sstable : levelOfSSTables)
{
currGroup.add(sstable);
if (currGroup.size() == groupSize)
{
groupedSSTables.add(currGroup);
currGroup = new ArrayList<>(groupSize);
}
}
if (currGroup.size() != 0)
groupedSSTables.add(currGroup);
}
return groupedSSTables;
}
public int getEstimatedRemainingTasks()
{
int n = manifest.getEstimatedTasks();
cfs.getCompactionStrategyManager().compactionLogger.pending(this, n);
return n;
}
public long getMaxSSTableBytes()
{
return maxSSTableSizeInMB * 1024L * 1024L;
}
public int getLevelFanoutSize()
{
return levelFanoutSize;
}
public ScannerList getScanners(Collection<SSTableReader> sstables, Collection<Range<Token>> ranges)
{
Set<SSTableReader>[] sstablesPerLevel = manifest.getSStablesPerLevelSnapshot();
Multimap<Integer, SSTableReader> byLevel = ArrayListMultimap.create();
for (SSTableReader sstable : sstables)
{
int level = sstable.getSSTableLevel();
// if an sstable is not on the manifest, it was recently added or removed
// so we add it to level -1 and create exclusive scanners for it - see below (#9935)
if (level >= sstablesPerLevel.length || !sstablesPerLevel[level].contains(sstable))
{
logger.warn("Live sstable {} from level {} is not on corresponding level in the leveled manifest." +
" This is not a problem per se, but may indicate an orphaned sstable due to a failed" +
" compaction not cleaned up properly.",
sstable.getFilename(), level);
level = -1;
}
byLevel.get(level).add(sstable);
}
List<ISSTableScanner> scanners = new ArrayList<ISSTableScanner>(sstables.size());
try
{
for (Integer level : byLevel.keySet())
{
// level can be -1 when sstables are added to Tracker but not to LeveledManifest
// since we don't know which level those sstable belong yet, we simply do the same as L0 sstables.
if (level <= 0)
{
// L0 makes no guarantees about overlapping-ness. Just create a direct scanner for each
for (SSTableReader sstable : byLevel.get(level))
scanners.add(sstable.getScanner(ranges));
}
else
{
// Create a LeveledScanner that only opens one sstable at a time, in sorted order
Collection<SSTableReader> intersecting = LeveledScanner.intersecting(byLevel.get(level), ranges);
if (!intersecting.isEmpty())
{
@SuppressWarnings("resource") // The ScannerList will be in charge of closing (and we close properly on errors)
ISSTableScanner scanner = new LeveledScanner(cfs.metadata(), intersecting, ranges);
scanners.add(scanner);
}
}
}
}
catch (Throwable t)
{
ISSTableScanner.closeAllAndPropagate(scanners, t);
}
return new ScannerList(scanners);
}
@Override
public void replaceSSTables(Collection<SSTableReader> removed, Collection<SSTableReader> added)
{
manifest.replace(removed, added);
}
@Override
public void metadataChanged(StatsMetadata oldMetadata, SSTableReader sstable)
{
if (sstable.getSSTableLevel() != oldMetadata.sstableLevel)
manifest.newLevel(sstable, oldMetadata.sstableLevel);
}
@Override
public void addSSTable(SSTableReader added)
{
manifest.add(added);
}
@Override
public void removeSSTable(SSTableReader sstable)
{
manifest.remove(sstable);
}
@Override
protected Set<SSTableReader> getSSTables()
{
return manifest.getSSTables();
}
// Lazily creates SSTableBoundedScanner for sstable that are assumed to be from the
// same level (e.g. non overlapping) - see #4142
private static class LeveledScanner extends AbstractIterator<UnfilteredRowIterator> implements ISSTableScanner
{
private final TableMetadata metadata;
private final Collection<Range<Token>> ranges;
private final List<SSTableReader> sstables;
private final Iterator<SSTableReader> sstableIterator;
private final long totalLength;
private final long compressedLength;
private ISSTableScanner currentScanner;
private long positionOffset;
private long totalBytesScanned = 0;
public LeveledScanner(TableMetadata metadata, Collection<SSTableReader> sstables, Collection<Range<Token>> ranges)
{
this.metadata = metadata;
this.ranges = ranges;
// add only sstables that intersect our range, and estimate how much data that involves
this.sstables = new ArrayList<>(sstables.size());
long length = 0;
long cLength = 0;
for (SSTableReader sstable : sstables)
{
this.sstables.add(sstable);
long estimatedKeys = sstable.estimatedKeys();
double estKeysInRangeRatio = 1.0;
if (estimatedKeys > 0 && ranges != null)
estKeysInRangeRatio = ((double) sstable.estimatedKeysForRanges(ranges)) / estimatedKeys;
length += sstable.uncompressedLength() * estKeysInRangeRatio;
cLength += sstable.onDiskLength() * estKeysInRangeRatio;
}
totalLength = length;
compressedLength = cLength;
Collections.sort(this.sstables, SSTableReader.sstableComparator);
sstableIterator = this.sstables.iterator();
assert sstableIterator.hasNext(); // caller should check intersecting first
SSTableReader currentSSTable = sstableIterator.next();
currentScanner = currentSSTable.getScanner(ranges);
}
public static Collection<SSTableReader> intersecting(Collection<SSTableReader> sstables, Collection<Range<Token>> ranges)
{
if (ranges == null)
return Lists.newArrayList(sstables);
Set<SSTableReader> filtered = new HashSet<>();
for (Range<Token> range : ranges)
{
for (SSTableReader sstable : sstables)
{
Range<Token> sstableRange = new Range<>(sstable.first.getToken(), sstable.last.getToken());
if (range == null || sstableRange.intersects(range))
filtered.add(sstable);
}
}
return filtered;
}
public TableMetadata metadata()
{
return metadata;
}
protected UnfilteredRowIterator computeNext()
{
if (currentScanner == null)
return endOfData();
while (true)
{
if (currentScanner.hasNext())
return currentScanner.next();
positionOffset += currentScanner.getLengthInBytes();
totalBytesScanned += currentScanner.getBytesScanned();
currentScanner.close();
if (!sstableIterator.hasNext())
{
// reset to null so getCurrentPosition does not return wrong value
currentScanner = null;
return endOfData();
}
SSTableReader currentSSTable = sstableIterator.next();
currentScanner = currentSSTable.getScanner(ranges);
}
}
public void close()
{
if (currentScanner != null)
currentScanner.close();
}
public long getLengthInBytes()
{
return totalLength;
}
public long getCurrentPosition()
{
return positionOffset + (currentScanner == null ? 0L : currentScanner.getCurrentPosition());
}
public long getCompressedLengthInBytes()
{
return compressedLength;
}
public long getBytesScanned()
{
return currentScanner == null ? totalBytesScanned : totalBytesScanned + currentScanner.getBytesScanned();
}
public Set<SSTableReader> getBackingSSTables()
{
return ImmutableSet.copyOf(sstables);
}
}
@Override
public String toString()
{
return String.format("LCS@%d(%s)", hashCode(), cfs.name);
}
private SSTableReader findDroppableSSTable(final int gcBefore)
{
level:
for (int i = manifest.getLevelCount(); i >= 0; i--)
{
// sort sstables by droppable ratio in descending order
SortedSet<SSTableReader> sstables = manifest.getLevelSorted(i, new Comparator<SSTableReader>()
{
public int compare(SSTableReader o1, SSTableReader o2)
{
double r1 = o1.getEstimatedDroppableTombstoneRatio(gcBefore);
double r2 = o2.getEstimatedDroppableTombstoneRatio(gcBefore);
return -1 * Doubles.compare(r1, r2);
}
});
if (sstables.isEmpty())
continue;
Set<SSTableReader> compacting = cfs.getTracker().getCompacting();
for (SSTableReader sstable : sstables)
{
if (sstable.getEstimatedDroppableTombstoneRatio(gcBefore) <= tombstoneThreshold)
continue level;
else if (!compacting.contains(sstable) && !sstable.isMarkedSuspect() && worthDroppingTombstones(sstable, gcBefore))
return sstable;
}
}
return null;
}
public CompactionLogger.Strategy strategyLogger()
{
return new CompactionLogger.Strategy()
{
public JsonNode sstable(SSTableReader sstable)
{
ObjectNode node = JsonNodeFactory.instance.objectNode();
node.put("level", sstable.getSSTableLevel());
node.put("min_token", sstable.first.getToken().toString());
node.put("max_token", sstable.last.getToken().toString());
return node;
}
public JsonNode options()
{
return null;
}
};
}
public static Map<String, String> validateOptions(Map<String, String> options) throws ConfigurationException
{
Map<String, String> uncheckedOptions = AbstractCompactionStrategy.validateOptions(options);
String size = options.containsKey(SSTABLE_SIZE_OPTION) ? options.get(SSTABLE_SIZE_OPTION) : "1";
try
{
int ssSize = Integer.parseInt(size);
if (ssSize < 1)
{
throw new ConfigurationException(String.format("%s must be larger than 0, but was %s", SSTABLE_SIZE_OPTION, ssSize));
}
}
catch (NumberFormatException ex)
{
throw new ConfigurationException(String.format("%s is not a parsable int (base10) for %s", size, SSTABLE_SIZE_OPTION), ex);
}
uncheckedOptions.remove(SSTABLE_SIZE_OPTION);
// Validate the fanout_size option
String levelFanoutSize = options.containsKey(LEVEL_FANOUT_SIZE_OPTION) ? options.get(LEVEL_FANOUT_SIZE_OPTION) : String.valueOf(DEFAULT_LEVEL_FANOUT_SIZE);
try
{
int fanoutSize = Integer.parseInt(levelFanoutSize);
if (fanoutSize < 1)
{
throw new ConfigurationException(String.format("%s must be larger than 0, but was %s", LEVEL_FANOUT_SIZE_OPTION, fanoutSize));
}
}
catch (NumberFormatException ex)
{
throw new ConfigurationException(String.format("%s is not a parsable int (base10) for %s", size, LEVEL_FANOUT_SIZE_OPTION), ex);
}
uncheckedOptions.remove(LEVEL_FANOUT_SIZE_OPTION);
uncheckedOptions.remove(SINGLE_SSTABLE_UPLEVEL_OPTION);
uncheckedOptions.remove(CompactionParams.Option.MIN_THRESHOLD.toString());
uncheckedOptions.remove(CompactionParams.Option.MAX_THRESHOLD.toString());
uncheckedOptions = SizeTieredCompactionStrategyOptions.validateOptions(options, uncheckedOptions);
return uncheckedOptions;
}
}
| apache-2.0 |
rokn/Count_Words_2015 | testing/openjdk/jdk/test/java/nio/channels/Selector/KeysReady.java | 2373 | /*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/* @test
* @bug 4530007
* @summary Test if keys reported ready multiple times
* @library ..
*/
import java.net.*;
import java.io.*;
import java.nio.*;
import java.nio.channels.*;
import java.nio.charset.*;
import java.nio.channels.spi.SelectorProvider;
public class KeysReady {
static final int DAYTIME_PORT = 13;
static final String DAYTIME_HOST = TestUtil.HOST;
static void test() throws Exception {
InetSocketAddress isa
= new InetSocketAddress(InetAddress.getByName(DAYTIME_HOST),
DAYTIME_PORT);
SocketChannel sc = SocketChannel.open();
sc.configureBlocking(false);
sc.connect(isa);
// Prepare a selector
Selector selector = SelectorProvider.provider().openSelector();
try {
SelectionKey key = sc.register(selector, SelectionKey.OP_CONNECT);
int keysAdded = selector.select();
if (keysAdded > 0) {
keysAdded = selector.select(1000);
if (keysAdded > 0)
throw new Exception("Same key reported added twice");
}
} finally {
selector.close();
sc.close();
}
}
public static void main(String[] args) throws Exception {
test();
}
}
| mit |