repo_name
stringlengths
5
108
path
stringlengths
6
333
size
stringlengths
1
6
content
stringlengths
4
977k
license
stringclasses
15 values
vesense/hadoop
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
37562
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.datanode; import java.io.File; import java.io.FileDescriptor; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.channels.ClosedChannelException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import javax.management.NotCompliantMBeanException; import javax.management.ObjectName; import javax.management.StandardMBean; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream; import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams; import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl; import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DiskChecker.DiskErrorException; /** * This class implements a simulated FSDataset. * * Blocks that are created are recorded but their data (plus their CRCs) are * discarded. * Fixed data is returned when blocks are read; a null CRC meta file is * created for such data. * * This FSDataset does not remember any block information across its * restarts; it does however offer an operation to inject blocks * (See the TestInectionForSImulatedStorage() * for a usage example of injection. * * Note the synchronization is coarse grained - it is at each method. */ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> { static class Factory extends FsDatasetSpi.Factory<SimulatedFSDataset> { @Override public SimulatedFSDataset newInstance(DataNode datanode, DataStorage storage, Configuration conf) throws IOException { return new SimulatedFSDataset(storage, conf); } @Override public boolean isSimulated() { return true; } } public static void setFactory(Configuration conf) { conf.set(DFSConfigKeys.DFS_DATANODE_FSDATASET_FACTORY_KEY, Factory.class.getName()); } public static final String CONFIG_PROPERTY_CAPACITY = "dfs.datanode.simulateddatastorage.capacity"; public static final long DEFAULT_CAPACITY = 2L<<40; // 1 terabyte public static final byte DEFAULT_DATABYTE = 9; public static final String CONFIG_PROPERTY_STATE = "dfs.datanode.simulateddatastorage.state"; private static final DatanodeStorage.State DEFAULT_STATE = DatanodeStorage.State.NORMAL; static final byte[] nullCrcFileData; static { DataChecksum checksum = DataChecksum.newDataChecksum( DataChecksum.Type.NULL, 16*1024 ); byte[] nullCrcHeader = checksum.getHeader(); nullCrcFileData = new byte[2 + nullCrcHeader.length]; nullCrcFileData[0] = (byte) ((BlockMetadataHeader.VERSION >>> 8) & 0xff); nullCrcFileData[1] = (byte) (BlockMetadataHeader.VERSION & 0xff); for (int i = 0; i < nullCrcHeader.length; i++) { nullCrcFileData[i+2] = nullCrcHeader[i]; } } // information about a single block private class BInfo implements ReplicaInPipelineInterface { final Block theBlock; private boolean finalized = false; // if not finalized => ongoing creation SimulatedOutputStream oStream = null; private long bytesAcked; private long bytesRcvd; BInfo(String bpid, Block b, boolean forWriting) throws IOException { theBlock = new Block(b); if (theBlock.getNumBytes() < 0) { theBlock.setNumBytes(0); } if (!storage.alloc(bpid, theBlock.getNumBytes())) { // expected length - actual length may // be more - we find out at finalize DataNode.LOG.warn("Lack of free storage on a block alloc"); throw new IOException("Creating block, no free space available"); } if (forWriting) { finalized = false; oStream = new SimulatedOutputStream(); } else { finalized = true; oStream = null; } } @Override public String getStorageUuid() { return storage.getStorageUuid(); } @Override synchronized public long getGenerationStamp() { return theBlock.getGenerationStamp(); } @Override synchronized public long getNumBytes() { if (!finalized) { return bytesRcvd; } else { return theBlock.getNumBytes(); } } @Override synchronized public void setNumBytes(long length) { if (!finalized) { bytesRcvd = length; } else { theBlock.setNumBytes(length); } } synchronized SimulatedInputStream getIStream() { if (!finalized) { // throw new IOException("Trying to read an unfinalized block"); return new SimulatedInputStream(oStream.getLength(), DEFAULT_DATABYTE); } else { return new SimulatedInputStream(theBlock.getNumBytes(), DEFAULT_DATABYTE); } } synchronized void finalizeBlock(String bpid, long finalSize) throws IOException { if (finalized) { throw new IOException( "Finalizing a block that has already been finalized" + theBlock.getBlockId()); } if (oStream == null) { DataNode.LOG.error("Null oStream on unfinalized block - bug"); throw new IOException("Unexpected error on finalize"); } if (oStream.getLength() != finalSize) { DataNode.LOG.warn("Size passed to finalize (" + finalSize + ")does not match what was written:" + oStream.getLength()); throw new IOException( "Size passed to finalize does not match the amount of data written"); } // We had allocated the expected length when block was created; // adjust if necessary long extraLen = finalSize - theBlock.getNumBytes(); if (extraLen > 0) { if (!storage.alloc(bpid,extraLen)) { DataNode.LOG.warn("Lack of free storage on a block alloc"); throw new IOException("Creating block, no free space available"); } } else { storage.free(bpid, -extraLen); } theBlock.setNumBytes(finalSize); finalized = true; oStream = null; return; } synchronized void unfinalizeBlock() throws IOException { if (!finalized) { throw new IOException("Unfinalized a block that's not finalized " + theBlock); } finalized = false; oStream = new SimulatedOutputStream(); long blockLen = theBlock.getNumBytes(); oStream.setLength(blockLen); bytesRcvd = blockLen; bytesAcked = blockLen; } SimulatedInputStream getMetaIStream() { return new SimulatedInputStream(nullCrcFileData); } synchronized boolean isFinalized() { return finalized; } @Override synchronized public ReplicaOutputStreams createStreams(boolean isCreate, DataChecksum requestedChecksum) throws IOException { if (finalized) { throw new IOException("Trying to write to a finalized replica " + theBlock); } else { SimulatedOutputStream crcStream = new SimulatedOutputStream(); return new ReplicaOutputStreams(oStream, crcStream, requestedChecksum, volume.isTransientStorage()); } } @Override synchronized public long getBlockId() { return theBlock.getBlockId(); } @Override synchronized public long getVisibleLength() { return getBytesAcked(); } @Override public ReplicaState getState() { return null; } @Override synchronized public long getBytesAcked() { if (finalized) { return theBlock.getNumBytes(); } else { return bytesAcked; } } @Override synchronized public void setBytesAcked(long bytesAcked) { if (!finalized) { this.bytesAcked = bytesAcked; } } @Override synchronized public long getBytesOnDisk() { if (finalized) { return theBlock.getNumBytes(); } else { return oStream.getLength(); } } @Override public void setLastChecksumAndDataLen(long dataLength, byte[] lastChecksum) { oStream.setLength(dataLength); } @Override public ChunkChecksum getLastChecksumAndDataLen() { return new ChunkChecksum(oStream.getLength(), null); } @Override public boolean isOnTransientStorage() { return false; } } /** * Class is used for tracking block pool storage utilization similar * to {@link BlockPoolSlice} */ private static class SimulatedBPStorage { private long used; // in bytes long getUsed() { return used; } void alloc(long amount) { used += amount; } void free(long amount) { used -= amount; } SimulatedBPStorage() { used = 0; } } /** * Class used for tracking datanode level storage utilization similar * to {@link FSVolumeSet} */ private static class SimulatedStorage { private final Map<String, SimulatedBPStorage> map = new HashMap<String, SimulatedBPStorage>(); private final long capacity; // in bytes private final DatanodeStorage dnStorage; synchronized long getFree() { return capacity - getUsed(); } long getCapacity() { return capacity; } synchronized long getUsed() { long used = 0; for (SimulatedBPStorage bpStorage : map.values()) { used += bpStorage.getUsed(); } return used; } synchronized long getBlockPoolUsed(String bpid) throws IOException { return getBPStorage(bpid).getUsed(); } int getNumFailedVolumes() { return 0; } synchronized boolean alloc(String bpid, long amount) throws IOException { if (getFree() >= amount) { getBPStorage(bpid).alloc(amount); return true; } return false; } synchronized void free(String bpid, long amount) throws IOException { getBPStorage(bpid).free(amount); } SimulatedStorage(long cap, DatanodeStorage.State state) { capacity = cap; dnStorage = new DatanodeStorage( "SimulatedStorage-" + DatanodeStorage.generateUuid(), state, StorageType.DEFAULT); } synchronized void addBlockPool(String bpid) { SimulatedBPStorage bpStorage = map.get(bpid); if (bpStorage != null) { return; } map.put(bpid, new SimulatedBPStorage()); } synchronized void removeBlockPool(String bpid) { map.remove(bpid); } private SimulatedBPStorage getBPStorage(String bpid) throws IOException { SimulatedBPStorage bpStorage = map.get(bpid); if (bpStorage == null) { throw new IOException("block pool " + bpid + " not found"); } return bpStorage; } String getStorageUuid() { return dnStorage.getStorageID(); } DatanodeStorage getDnStorage() { return dnStorage; } synchronized StorageReport getStorageReport(String bpid) { return new StorageReport(dnStorage, false, getCapacity(), getUsed(), getFree(), map.get(bpid).getUsed()); } } static class SimulatedVolume implements FsVolumeSpi { private final SimulatedStorage storage; SimulatedVolume(final SimulatedStorage storage) { this.storage = storage; } @Override public FsVolumeReference obtainReference() throws ClosedChannelException { return null; } @Override public String getStorageID() { return storage.getStorageUuid(); } @Override public String[] getBlockPoolList() { return new String[0]; } @Override public long getAvailable() throws IOException { return storage.getCapacity() - storage.getUsed(); } @Override public String getBasePath() { return null; } @Override public String getPath(String bpid) throws IOException { return null; } @Override public File getFinalizedDir(String bpid) throws IOException { return null; } @Override public StorageType getStorageType() { return null; } @Override public boolean isTransientStorage() { return false; } @Override public void reserveSpaceForRbw(long bytesToReserve) { } @Override public void releaseReservedSpace(long bytesToRelease) { } @Override public BlockIterator newBlockIterator(String bpid, String name) { throw new UnsupportedOperationException(); } @Override public BlockIterator loadBlockIterator(String bpid, String name) throws IOException { throw new UnsupportedOperationException(); } @Override public FsDatasetSpi getDataset() { throw new UnsupportedOperationException(); } } private final Map<String, Map<Block, BInfo>> blockMap = new HashMap<String, Map<Block,BInfo>>(); private final SimulatedStorage storage; private final SimulatedVolume volume; private final String datanodeUuid; public SimulatedFSDataset(DataStorage storage, Configuration conf) { if (storage != null) { for (int i = 0; i < storage.getNumStorageDirs(); ++i) { storage.createStorageID(storage.getStorageDir(i), false); } this.datanodeUuid = storage.getDatanodeUuid(); } else { this.datanodeUuid = "SimulatedDatanode-" + DataNode.generateUuid(); } registerMBean(datanodeUuid); this.storage = new SimulatedStorage( conf.getLong(CONFIG_PROPERTY_CAPACITY, DEFAULT_CAPACITY), conf.getEnum(CONFIG_PROPERTY_STATE, DEFAULT_STATE)); this.volume = new SimulatedVolume(this.storage); } public synchronized void injectBlocks(String bpid, Iterable<Block> injectBlocks) throws IOException { ExtendedBlock blk = new ExtendedBlock(); if (injectBlocks != null) { for (Block b: injectBlocks) { // if any blocks in list is bad, reject list if (b == null) { throw new NullPointerException("Null blocks in block list"); } blk.set(bpid, b); if (isValidBlock(blk)) { throw new IOException("Block already exists in block list"); } } Map<Block, BInfo> map = blockMap.get(bpid); if (map == null) { map = new HashMap<Block, BInfo>(); blockMap.put(bpid, map); } for (Block b: injectBlocks) { BInfo binfo = new BInfo(bpid, b, false); map.put(binfo.theBlock, binfo); } } } /** Get a map for a given block pool Id */ private Map<Block, BInfo> getMap(String bpid) throws IOException { final Map<Block, BInfo> map = blockMap.get(bpid); if (map == null) { throw new IOException("Non existent blockpool " + bpid); } return map; } @Override // FsDatasetSpi public synchronized void finalizeBlock(ExtendedBlock b) throws IOException { final Map<Block, BInfo> map = getMap(b.getBlockPoolId()); BInfo binfo = map.get(b.getLocalBlock()); if (binfo == null) { throw new IOException("Finalizing a non existing block " + b); } binfo.finalizeBlock(b.getBlockPoolId(), b.getNumBytes()); } @Override // FsDatasetSpi public synchronized void unfinalizeBlock(ExtendedBlock b) throws IOException{ if (isValidRbw(b)) { final Map<Block, BInfo> map = getMap(b.getBlockPoolId()); map.remove(b.getLocalBlock()); } } synchronized BlockListAsLongs getBlockReport(String bpid) { final List<Replica> blocks = new ArrayList<Replica>(); final Map<Block, BInfo> map = blockMap.get(bpid); if (map != null) { for (BInfo b : map.values()) { if (b.isFinalized()) { blocks.add(b); } } } return new BlockListAsLongs(blocks, null); } @Override public synchronized Map<DatanodeStorage, BlockListAsLongs> getBlockReports( String bpid) { return Collections.singletonMap(storage.getDnStorage(), getBlockReport(bpid)); } @Override // FsDatasetSpi public List<Long> getCacheReport(String bpid) { return new LinkedList<Long>(); } @Override // FSDatasetMBean public long getCapacity() { return storage.getCapacity(); } @Override // FSDatasetMBean public long getDfsUsed() { return storage.getUsed(); } @Override // FSDatasetMBean public long getBlockPoolUsed(String bpid) throws IOException { return storage.getBlockPoolUsed(bpid); } @Override // FSDatasetMBean public long getRemaining() { return storage.getFree(); } @Override // FSDatasetMBean public int getNumFailedVolumes() { return storage.getNumFailedVolumes(); } @Override // FSDatasetMBean public long getCacheUsed() { return 0l; } @Override // FSDatasetMBean public long getCacheCapacity() { return 0l; } @Override // FSDatasetMBean public long getNumBlocksCached() { return 0l; } @Override public long getNumBlocksFailedToCache() { return 0l; } @Override public long getNumBlocksFailedToUncache() { return 0l; } @Override // FsDatasetSpi public synchronized long getLength(ExtendedBlock b) throws IOException { final Map<Block, BInfo> map = getMap(b.getBlockPoolId()); BInfo binfo = map.get(b.getLocalBlock()); if (binfo == null) { throw new IOException("Finalizing a non existing block " + b); } return binfo.getNumBytes(); } @Override @Deprecated public Replica getReplica(String bpid, long blockId) { final Map<Block, BInfo> map = blockMap.get(bpid); if (map != null) { return map.get(new Block(blockId)); } return null; } @Override public synchronized String getReplicaString(String bpid, long blockId) { Replica r = null; final Map<Block, BInfo> map = blockMap.get(bpid); if (map != null) { r = map.get(new Block(blockId)); } return r == null? "null": r.toString(); } @Override // FsDatasetSpi public Block getStoredBlock(String bpid, long blkid) throws IOException { final Map<Block, BInfo> map = blockMap.get(bpid); if (map != null) { BInfo binfo = map.get(new Block(blkid)); if (binfo == null) { return null; } return new Block(blkid, binfo.getGenerationStamp(), binfo.getNumBytes()); } return null; } @Override // FsDatasetSpi public synchronized void invalidate(String bpid, Block[] invalidBlks) throws IOException { boolean error = false; if (invalidBlks == null) { return; } final Map<Block, BInfo> map = getMap(bpid); for (Block b: invalidBlks) { if (b == null) { continue; } BInfo binfo = map.get(b); if (binfo == null) { error = true; DataNode.LOG.warn("Invalidate: Missing block"); continue; } storage.free(bpid, binfo.getNumBytes()); map.remove(b); } if (error) { throw new IOException("Invalidate: Missing blocks."); } } @Override // FSDatasetSpi public void cache(String bpid, long[] cacheBlks) { throw new UnsupportedOperationException( "SimulatedFSDataset does not support cache operation!"); } @Override // FSDatasetSpi public void uncache(String bpid, long[] uncacheBlks) { throw new UnsupportedOperationException( "SimulatedFSDataset does not support uncache operation!"); } @Override // FSDatasetSpi public boolean isCached(String bpid, long blockId) { return false; } private BInfo getBInfo(final ExtendedBlock b) { final Map<Block, BInfo> map = blockMap.get(b.getBlockPoolId()); return map == null? null: map.get(b.getLocalBlock()); } @Override // {@link FsDatasetSpi} public boolean contains(ExtendedBlock block) { return getBInfo(block) != null; } /** * Check if a block is valid. * * @param b The block to check. * @param minLength The minimum length that the block must have. May be 0. * @param state If this is null, it is ignored. If it is non-null, we * will check that the replica has this state. * * @throws ReplicaNotFoundException If the replica is not found * * @throws UnexpectedReplicaStateException If the replica is not in the * expected state. */ @Override // {@link FsDatasetSpi} public void checkBlock(ExtendedBlock b, long minLength, ReplicaState state) throws ReplicaNotFoundException, UnexpectedReplicaStateException { final BInfo binfo = getBInfo(b); if (binfo == null) { throw new ReplicaNotFoundException(b); } if ((state == ReplicaState.FINALIZED && !binfo.isFinalized()) || (state != ReplicaState.FINALIZED && binfo.isFinalized())) { throw new UnexpectedReplicaStateException(b,state); } } @Override // FsDatasetSpi public synchronized boolean isValidBlock(ExtendedBlock b) { try { checkBlock(b, 0, ReplicaState.FINALIZED); } catch (IOException e) { return false; } return true; } /* check if a block is created but not finalized */ @Override public synchronized boolean isValidRbw(ExtendedBlock b) { try { checkBlock(b, 0, ReplicaState.RBW); } catch (IOException e) { return false; } return true; } @Override public String toString() { return getStorageInfo(); } @Override // FsDatasetSpi public synchronized ReplicaHandler append( ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException { final Map<Block, BInfo> map = getMap(b.getBlockPoolId()); BInfo binfo = map.get(b.getLocalBlock()); if (binfo == null || !binfo.isFinalized()) { throw new ReplicaNotFoundException("Block " + b + " is not valid, and cannot be appended to."); } binfo.unfinalizeBlock(); return new ReplicaHandler(binfo, null); } @Override // FsDatasetSpi public synchronized ReplicaHandler recoverAppend( ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException { final Map<Block, BInfo> map = getMap(b.getBlockPoolId()); BInfo binfo = map.get(b.getLocalBlock()); if (binfo == null) { throw new ReplicaNotFoundException("Block " + b + " is not valid, and cannot be appended to."); } if (binfo.isFinalized()) { binfo.unfinalizeBlock(); } map.remove(b); binfo.theBlock.setGenerationStamp(newGS); map.put(binfo.theBlock, binfo); return new ReplicaHandler(binfo, null); } @Override // FsDatasetSpi public String recoverClose(ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException { final Map<Block, BInfo> map = getMap(b.getBlockPoolId()); BInfo binfo = map.get(b.getLocalBlock()); if (binfo == null) { throw new ReplicaNotFoundException("Block " + b + " is not valid, and cannot be appended to."); } if (!binfo.isFinalized()) { binfo.finalizeBlock(b.getBlockPoolId(), binfo.getNumBytes()); } map.remove(b.getLocalBlock()); binfo.theBlock.setGenerationStamp(newGS); map.put(binfo.theBlock, binfo); return binfo.getStorageUuid(); } @Override // FsDatasetSpi public synchronized ReplicaHandler recoverRbw( ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd) throws IOException { final Map<Block, BInfo> map = getMap(b.getBlockPoolId()); BInfo binfo = map.get(b.getLocalBlock()); if ( binfo == null) { throw new ReplicaNotFoundException("Block " + b + " does not exist, and cannot be appended to."); } if (binfo.isFinalized()) { throw new ReplicaAlreadyExistsException("Block " + b + " is valid, and cannot be written to."); } map.remove(b); binfo.theBlock.setGenerationStamp(newGS); map.put(binfo.theBlock, binfo); return new ReplicaHandler(binfo, null); } @Override // FsDatasetSpi public synchronized ReplicaHandler createRbw( StorageType storageType, ExtendedBlock b, boolean allowLazyPersist) throws IOException { return createTemporary(storageType, b); } @Override // FsDatasetSpi public synchronized ReplicaHandler createTemporary( StorageType storageType, ExtendedBlock b) throws IOException { if (isValidBlock(b)) { throw new ReplicaAlreadyExistsException("Block " + b + " is valid, and cannot be written to."); } if (isValidRbw(b)) { throw new ReplicaAlreadyExistsException("Block " + b + " is being written, and cannot be written to."); } final Map<Block, BInfo> map = getMap(b.getBlockPoolId()); BInfo binfo = new BInfo(b.getBlockPoolId(), b.getLocalBlock(), true); map.put(binfo.theBlock, binfo); return new ReplicaHandler(binfo, null); } synchronized InputStream getBlockInputStream(ExtendedBlock b ) throws IOException { final Map<Block, BInfo> map = getMap(b.getBlockPoolId()); BInfo binfo = map.get(b.getLocalBlock()); if (binfo == null) { throw new IOException("No such Block " + b ); } return binfo.getIStream(); } @Override // FsDatasetSpi public synchronized InputStream getBlockInputStream(ExtendedBlock b, long seekOffset) throws IOException { InputStream result = getBlockInputStream(b); IOUtils.skipFully(result, seekOffset); return result; } /** Not supported */ @Override // FsDatasetSpi public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff, long ckoff) throws IOException { throw new IOException("Not supported"); } @Override // FsDatasetSpi public synchronized LengthInputStream getMetaDataInputStream(ExtendedBlock b ) throws IOException { final Map<Block, BInfo> map = getMap(b.getBlockPoolId()); BInfo binfo = map.get(b.getLocalBlock()); if (binfo == null) { throw new IOException("No such Block " + b ); } if (!binfo.finalized) { throw new IOException("Block " + b + " is being written, its meta cannot be read"); } final SimulatedInputStream sin = binfo.getMetaIStream(); return new LengthInputStream(sin, sin.getLength()); } @Override public void checkDataDir() throws DiskErrorException { // nothing to check for simulated data set } @Override // FsDatasetSpi public synchronized void adjustCrcChannelPosition(ExtendedBlock b, ReplicaOutputStreams stream, int checksumSize) throws IOException { } /** * Simulated input and output streams * */ static private class SimulatedInputStream extends java.io.InputStream { byte theRepeatedData = 7; final long length; // bytes int currentPos = 0; byte[] data = null; /** * An input stream of size l with repeated bytes * @param l size of the stream * @param iRepeatedData byte that is repeated in the stream */ SimulatedInputStream(long l, byte iRepeatedData) { length = l; theRepeatedData = iRepeatedData; } /** * An input stream of of the supplied data * @param iData data to construct the stream */ SimulatedInputStream(byte[] iData) { data = iData; length = data.length; } /** * @return the lenght of the input stream */ long getLength() { return length; } @Override public int read() throws IOException { if (currentPos >= length) return -1; if (data !=null) { return data[currentPos++]; } else { currentPos++; return theRepeatedData; } } @Override public int read(byte[] b) throws IOException { if (b == null) { throw new NullPointerException(); } if (b.length == 0) { return 0; } if (currentPos >= length) { // EOF return -1; } int bytesRead = (int) Math.min(b.length, length-currentPos); if (data != null) { System.arraycopy(data, currentPos, b, 0, bytesRead); } else { // all data is zero for (int i : b) { b[i] = theRepeatedData; } } currentPos += bytesRead; return bytesRead; } } /** * This class implements an output stream that merely throws its data away, but records its * length. * */ static private class SimulatedOutputStream extends OutputStream { long length = 0; /** * constructor for Simulated Output Steram */ SimulatedOutputStream() { } /** * * @return the length of the data created so far. */ long getLength() { return length; } /** */ void setLength(long length) { this.length = length; } @Override public void write(int arg0) throws IOException { length++; } @Override public void write(byte[] b) throws IOException { length += b.length; } @Override public void write(byte[] b, int off, int len) throws IOException { length += len; } } private ObjectName mbeanName; /** * Register the FSDataset MBean using the name * "hadoop:service=DataNode,name=FSDatasetState-<storageid>" * We use storage id for MBean name since a minicluster within a single * Java VM may have multiple Simulated Datanodes. */ void registerMBean(final String storageId) { // We wrap to bypass standard mbean naming convetion. // This wraping can be removed in java 6 as it is more flexible in // package naming for mbeans and their impl. StandardMBean bean; try { bean = new StandardMBean(this,FSDatasetMBean.class); mbeanName = MBeans.register("DataNode", "FSDatasetState-"+ storageId, bean); } catch (NotCompliantMBeanException e) { DataNode.LOG.warn("Error registering FSDatasetState MBean", e); } DataNode.LOG.info("Registered FSDatasetState MBean"); } @Override public void shutdown() { if (mbeanName != null) MBeans.unregister(mbeanName); } @Override public String getStorageInfo() { return "Simulated FSDataset-" + datanodeUuid; } @Override public boolean hasEnoughResource() { return true; } @Override public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock) throws IOException { ExtendedBlock b = rBlock.getBlock(); final Map<Block, BInfo> map = getMap(b.getBlockPoolId()); BInfo binfo = map.get(b.getLocalBlock()); if (binfo == null) { throw new IOException("No such Block " + b ); } return new ReplicaRecoveryInfo(binfo.getBlockId(), binfo.getBytesOnDisk(), binfo.getGenerationStamp(), binfo.isFinalized()?ReplicaState.FINALIZED : ReplicaState.RBW); } @Override // FsDatasetSpi public String updateReplicaUnderRecovery(ExtendedBlock oldBlock, long recoveryId, long newBlockId, long newlength) { // Caller does not care about the exact Storage UUID returned. return datanodeUuid; } @Override // FsDatasetSpi public long getReplicaVisibleLength(ExtendedBlock block) { return block.getNumBytes(); } @Override // FsDatasetSpi public void addBlockPool(String bpid, Configuration conf) { Map<Block, BInfo> map = new HashMap<Block, BInfo>(); blockMap.put(bpid, map); storage.addBlockPool(bpid); } @Override // FsDatasetSpi public void shutdownBlockPool(String bpid) { blockMap.remove(bpid); storage.removeBlockPool(bpid); } @Override // FsDatasetSpi public void deleteBlockPool(String bpid, boolean force) { return; } @Override public ReplicaInPipelineInterface convertTemporaryToRbw(ExtendedBlock temporary) throws IOException { final Map<Block, BInfo> map = blockMap.get(temporary.getBlockPoolId()); if (map == null) { throw new IOException("Block pool not found, temporary=" + temporary); } final BInfo r = map.get(temporary.getLocalBlock()); if (r == null) { throw new IOException("Block not found, temporary=" + temporary); } else if (r.isFinalized()) { throw new IOException("Replica already finalized, temporary=" + temporary + ", r=" + r); } return r; } @Override public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b) { throw new UnsupportedOperationException(); } @Override public HdfsBlocksMetadata getHdfsBlocksMetadata(String bpid, long[] blockIds) throws IOException { throw new UnsupportedOperationException(); } @Override public void enableTrash(String bpid) { throw new UnsupportedOperationException(); } @Override public void restoreTrash(String bpid) { } @Override public boolean trashEnabled(String bpid) { return false; } @Override public void setRollingUpgradeMarker(String bpid) { } @Override public void clearRollingUpgradeMarker(String bpid) { } @Override public void checkAndUpdate(String bpid, long blockId, File diskFile, File diskMetaFile, FsVolumeSpi vol) throws IOException { throw new UnsupportedOperationException(); } @Override public List<FsVolumeSpi> getVolumes() { throw new UnsupportedOperationException(); } @Override public void addVolume( final StorageLocation location, final List<NamespaceInfo> nsInfos) throws IOException { throw new UnsupportedOperationException(); } @Override public DatanodeStorage getStorage(final String storageUuid) { return storageUuid.equals(storage.getStorageUuid()) ? storage.dnStorage : null; } @Override public StorageReport[] getStorageReports(String bpid) { return new StorageReport[] {storage.getStorageReport(bpid)}; } @Override public List<FinalizedReplica> getFinalizedBlocks(String bpid) { throw new UnsupportedOperationException(); } @Override public List<FinalizedReplica> getFinalizedBlocksOnPersistentStorage(String bpid) { throw new UnsupportedOperationException(); } @Override public Map<String, Object> getVolumeInfoMap() { throw new UnsupportedOperationException(); } @Override public FsVolumeSpi getVolume(ExtendedBlock b) { return volume; } @Override public synchronized void removeVolumes(Collection<StorageLocation> volumes) { throw new UnsupportedOperationException(); } @Override public void submitBackgroundSyncFileRangeRequest(ExtendedBlock block, FileDescriptor fd, long offset, long nbytes, int flags) { throw new UnsupportedOperationException(); } @Override public void onCompleteLazyPersist(String bpId, long blockId, long creationTime, File[] savedFiles, FsVolumeSpi targetVolume) { throw new UnsupportedOperationException(); } @Override public void onFailLazyPersist(String bpId, long blockId) { throw new UnsupportedOperationException(); } @Override public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block, StorageType targetStorageType) throws IOException { // TODO Auto-generated method stub return null; } }
apache-2.0
tiffchou/odo
proxyui/src/test/java/com/groupon/odo/tests/TestServer/TestHttpsProxyContainer.java
3589
/* Copyright 2014 Groupon, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.groupon.odo.tests.TestServer; import org.apache.catalina.Context; import org.apache.catalina.connector.Connector; import org.apache.coyote.http11.Http11NioProtocol; import org.apache.tomcat.JarScanner; import org.apache.tomcat.JarScannerCallback; import org.springframework.boot.context.embedded.EmbeddedServletContainerFactory; import org.springframework.boot.context.embedded.tomcat.TomcatConnectorCustomizer; import org.springframework.boot.context.embedded.tomcat.TomcatContextCustomizer; import org.springframework.boot.context.embedded.tomcat.TomcatEmbeddedServletContainerFactory; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import javax.servlet.ServletContext; import java.io.File; import java.util.Set; import java.util.concurrent.TimeUnit; @Configuration public class TestHttpsProxyContainer extends GenericTestProxyContainer { @Bean public EmbeddedServletContainerFactory servletContainer() throws Exception { TomcatEmbeddedServletContainerFactory factory = new TomcatEmbeddedServletContainerFactory(); MockService config = MockService.getInstance(); factory.setPort(config.getPort()); factory.setSessionTimeout(10, TimeUnit.MINUTES); factory.addContextCustomizers(new TomcatContextCustomizer() { @Override public void customize(Context context) { JarScanner jarScanner = new JarScanner() { @Override public void scan(ServletContext arg0, ClassLoader arg1, JarScannerCallback arg2, Set<String> arg3) { } }; context.setJarScanner(jarScanner); } }); // extract keystores to temp file // the keystore needs to be in the filesystem and not just on the classpath // this ensures that it gets unpacked from the jar/war final File keyStore = com.groupon.odo.proxylib.Utils.copyResourceToLocalFile("tomcat.ks", "tomcat.ks"); // Add HTTPS customization to connector factory.addConnectorCustomizers(new TomcatConnectorCustomizer() { @Override public void customize(Connector connector) { MockService config = MockService.getInstance(); connector.setPort(config.getPort()); connector.setSecure(true); Http11NioProtocol proto = (Http11NioProtocol) connector.getProtocolHandler(); proto.setSSLEnabled(true); connector.setScheme("https"); connector.setAttribute("keystorePass", "changeit"); connector.setAttribute("keystoreFile", keyStore.getAbsolutePath()); connector.setAttribute("clientAuth", "false"); connector.setAttribute("sslProtocol", "TLS"); connector.setAttribute("sslEnabled", true); } }); return factory; } }
apache-2.0
israeldb27/blg
src/main/java/com/busqueumlugar/model/Paramservico.java
4347
/* * To change this template, choose Tools | Templates * and open the template in the editor. */ package com.busqueumlugar.model; import java.io.Serializable; import java.util.Date; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.Table; import javax.persistence.Temporal; import javax.persistence.TemporalType; import javax.persistence.Transient; import com.busqueumlugar.util.DateUtil; @Entity @Table(name = "paramservico") public class Paramservico implements Serializable { private static final long serialVersionUID = 1L; @Id @GeneratedValue(strategy = GenerationType.IDENTITY) @Column(name = "id") private Long id; @Column(name = "labelServico") private String labelServico; @Column(name = "valueServico") private String valueServico; @Column(name = "descricao") private String descricao; @Column(name = "dataCriacao") @Temporal(TemporalType.DATE) private Date dataCriacao; @Column(name = "statusServico") private String statusServico; @Column(name = "tipoParamServico") private String tipoParamServico; @Column(name = "cobranca") private String cobranca; // indica se o servico sera cobrado ou nao @Transient private String dataCriacaoFmt; public Paramservico() { } public Paramservico(Long id) { this.id = id; } public Paramservico(Long id, String labelServico, String valueServico, Date dataCriacao) { this.id = id; this.labelServico = labelServico; this.valueServico = valueServico; this.dataCriacao = dataCriacao; } public Long getId() { return id; } public void setId(Long id) { this.id = id; } public String getLabelServico() { return labelServico; } public void setLabelServico(String labelServico) { this.labelServico = labelServico; } public String getValueServico() { return valueServico; } public void setValueServico(String valueServico) { this.valueServico = valueServico; } public String getDescricao() { return descricao; } public void setDescricao(String descricao) { this.descricao = descricao; } public Date getDataCriacao() { return dataCriacao; } public void setDataCriacao(Date dataCriacao) { this.dataCriacao = dataCriacao; } public String getStatusServico() { return statusServico; } public void setStatusServico(String statusServico) { this.statusServico = statusServico; } @Override public int hashCode() { int hash = 0; hash += (id != null ? id.hashCode() : 0); return hash; } @Override public boolean equals(Object object) { // TODO: Warning - this method won't work in the case the id fields are not set if (!(object instanceof Paramservico)) { return false; } Paramservico other = (Paramservico) object; if ((this.id == null && other.id != null) || (this.id != null && !this.id.equals(other.id))) { return false; } return true; } @Override public String toString() { return "br.app.paramServico.Paramservico[ id=" + id + " ]"; } /** * @return the tipoParamServico */ public String getTipoParamServico() { return tipoParamServico; } /** * @param tipoParamServico the tipoParamServico to set */ public void setTipoParamServico(String tipoParamServico) { this.tipoParamServico = tipoParamServico; } /** * @return the cobranca */ public String getCobranca() { return cobranca; } /** * @param cobranca the cobranca to set */ public void setCobranca(String cobranca) { this.cobranca = cobranca; } public String getDataCriacaoFmt() { if ( dataCriacao != null) return DateUtil.formataData(this.dataCriacao); else return dataCriacaoFmt; } public void setDataCriacaoFmt(String dataCriacaoFmt) { this.dataCriacaoFmt = dataCriacaoFmt; } }
apache-2.0
AntonVasilyuk/Aduma
chapter_001/src/main/java/ru/job4j/Triangle.java
759
package ru.job4j; /**. * Вычисляем площадь треугольника * * @author Anton Vasilyuk * @version 1.0 * @since 0.1 */ public class Triangle { /**. * @a первая точка */ private Point a; /**. * @b вторая точка */ private Point b; /**. * @c вторая точка */ private Point c; /**. * Получаем площадь треугольника * @param a object * @param b object * @param c object * @return result area */ public double area(Point a, Point b, Point c) { Point point = new Point(0, 0); double p = (c.distanceTo(a) + a.distanceTo(b) + b.distanceTo(c)) / 2; return Math.sqrt(p * (p - c.distanceTo(a)) * (p - a.distanceTo(b)) * (p - b.distanceTo(c))); } }
apache-2.0
cipous/spring-data-solr
src/main/java/org/springframework/data/solr/core/query/QueryParameterImpl.java
1848
/* * Copyright 2012 - 2018 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.data.solr.core.query; import org.springframework.lang.Nullable; import org.springframework.util.Assert; /** * Trivial implementation of {@see QueryParameter}. * * @author Christoph Strobl */ public class QueryParameterImpl implements QueryParameter { private final String name; private @Nullable Object value; /** * @param name must not be null * @param value */ public QueryParameterImpl(String name, @Nullable Object value) { super(); Assert.notNull(name, "Name must not be null!"); this.name = name; this.value = value; } @Override public String getName() { return this.name; } @Nullable @Override public Object getValue() { return this.value; } @Override public int hashCode() { return ((this.name == null) ? 0 : this.name.hashCode()); } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } QueryParameterImpl other = (QueryParameterImpl) obj; if (this.name == null) { if (other.name != null) { return false; } } else if (!this.name.equals(other.name)) { return false; } return true; } }
apache-2.0
pioto/radio-thermostat
radio-thermostat-api/src/main/java/org/pioto/radiothermostat/api/package-info.java
1054
/** * Copyright 2014 Mike Kelly * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * This package contains interfaces for a public API for the * <a href="http://www.radiothermostat.com/latestnews.html#advanced">Radio Thermostat</a>. * <p/> * The main interface for interacting with the thermostat is the * {@link org.pioto.radiothermostat.api.Thermostat} interface. Implementations * should register as an OSGi service. * * @author Mike Kelly (pioto@pioto.org) */ package org.pioto.radiothermostat.api;
apache-2.0
eBaoTech/pinpoint
plugins/arcus/src/main/java/com/navercorp/pinpoint/plugin/arcus/ArcusPlugin.java
14868
/* * Copyright 2014 NAVER Corp. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.navercorp.pinpoint.plugin.arcus; import java.security.ProtectionDomain; import com.navercorp.pinpoint.bootstrap.async.AsyncTraceIdAccessor; import com.navercorp.pinpoint.bootstrap.instrument.InstrumentException; import com.navercorp.pinpoint.bootstrap.instrument.InstrumentClass; import com.navercorp.pinpoint.bootstrap.instrument.InstrumentMethod; import com.navercorp.pinpoint.bootstrap.instrument.MethodFilters; import com.navercorp.pinpoint.bootstrap.instrument.Instrumentor; import com.navercorp.pinpoint.bootstrap.instrument.transformer.TransformCallback; import com.navercorp.pinpoint.bootstrap.instrument.transformer.TransformTemplate; import com.navercorp.pinpoint.bootstrap.instrument.transformer.TransformTemplateAware; import com.navercorp.pinpoint.bootstrap.logging.PLogger; import com.navercorp.pinpoint.bootstrap.logging.PLoggerFactory; import com.navercorp.pinpoint.bootstrap.plugin.ProfilerPlugin; import com.navercorp.pinpoint.bootstrap.plugin.ProfilerPluginSetupContext; import com.navercorp.pinpoint.bootstrap.plugin.util.InstrumentUtils; import com.navercorp.pinpoint.plugin.arcus.filter.ArcusMethodFilter; import com.navercorp.pinpoint.plugin.arcus.filter.FrontCacheMemcachedMethodFilter; import static com.navercorp.pinpoint.common.util.VarArgs.va; /** * * @author jaehong.kim * */ public class ArcusPlugin implements ProfilerPlugin, TransformTemplateAware { private final PLogger logger = PLoggerFactory.getLogger(this.getClass()); private TransformTemplate transformTemplate; @Override public void setup(ProfilerPluginSetupContext context) { ArcusPluginConfig config = new ArcusPluginConfig(context.getConfig()); boolean arcus = config.isArcus(); boolean memcached = config.isMemcached(); if (arcus) { addArcusClientEditor(config); addCollectionFutureEditor(); addFrontCacheGetFutureEditor(); addFrontCacheMemcachedClientEditor(config); addCacheManagerEditor(); // add none operation future. over 1.5.4 addBTreeStoreGetFutureEditor(); addCollectionGetBulkFutureEditor(); addSMGetFutureFutureEditor(); } if (arcus || memcached) { addMemcachedClientEditor(config); addBaseOperationImplEditor(); addGetFutureEditor(); addOperationFutureEditor(); // add none operation future. addImmediateFutureEditor(); addBulkGetFutureEditor(); } } private void addArcusClientEditor(final ArcusPluginConfig config) { transformTemplate.transform("net.spy.memcached.ArcusClient", new TransformCallback() { @Override public byte[] doInTransform(Instrumentor instrumentor, ClassLoader loader, String className, Class<?> classBeingRedefined, ProtectionDomain protectionDomain, byte[] classfileBuffer) throws InstrumentException { InstrumentClass target = instrumentor.getInstrumentClass(loader, className, classfileBuffer); if (target.hasMethod("addOp", "java.lang.String", "net.spy.memcached.ops.Operation")) { boolean traceKey = config.isArcusKeyTrace(); final InstrumentMethod setCacheManagerMethod = InstrumentUtils.findMethod(target, "setCacheManager", "net.spy.memcached.CacheManager"); setCacheManagerMethod.addInterceptor("com.navercorp.pinpoint.plugin.arcus.interceptor.SetCacheManagerInterceptor"); for (InstrumentMethod m : target.getDeclaredMethods(new ArcusMethodFilter())) { try { m.addScopedInterceptor("com.navercorp.pinpoint.plugin.arcus.interceptor.ApiInterceptor", va(traceKey), ArcusConstants.ARCUS_SCOPE); } catch (Exception e) { if (logger.isWarnEnabled()) { logger.warn("Unsupported method " + className + "." + m.getName(), e); } } } return target.toBytecode(); } else { return null; } } }); } private void addCacheManagerEditor() { transformTemplate.transform("net.spy.memcached.CacheManager", new TransformCallback() { @Override public byte[] doInTransform(Instrumentor instrumentor, ClassLoader loader, String className, Class<?> classBeingRedefined, ProtectionDomain protectionDomain, byte[] classfileBuffer) throws InstrumentException { InstrumentClass target = instrumentor.getInstrumentClass(loader, className, classfileBuffer); target.addField("com.navercorp.pinpoint.plugin.arcus.ServiceCodeAccessor"); final InstrumentMethod constructorMethod = InstrumentUtils.findConstructor(target, "java.lang.String", "java.lang.String", "net.spy.memcached.ConnectionFactoryBuilder", "java.util.concurrent.CountDownLatch", "int", "int"); constructorMethod.addInterceptor("com.navercorp.pinpoint.plugin.arcus.interceptor.CacheManagerConstructInterceptor"); return target.toBytecode(); } }); } private void addBaseOperationImplEditor() { transformTemplate.transform("net.spy.memcached.protocol.BaseOperationImpl", new TransformCallback() { @Override public byte[] doInTransform(Instrumentor instrumentor, ClassLoader loader, String className, Class<?> classBeingRedefined, ProtectionDomain protectionDomain, byte[] classfileBuffer) throws InstrumentException { InstrumentClass target = instrumentor.getInstrumentClass(loader, className, classfileBuffer); target.addField("com.navercorp.pinpoint.plugin.arcus.ServiceCodeAccessor"); return target.toBytecode(); } }); } private void addFrontCacheGetFutureEditor() { transformTemplate.transform("net.spy.memcached.plugin.FrontCacheGetFuture", new TransformCallback() { @Override public byte[] doInTransform(Instrumentor instrumentor, ClassLoader loader, String className, Class<?> classBeingRedefined, ProtectionDomain protectionDomain, byte[] classfileBuffer) throws InstrumentException { InstrumentClass target = instrumentor.getInstrumentClass(loader, className, classfileBuffer); target.addField("com.navercorp.pinpoint.plugin.arcus.CacheNameAccessor"); target.addField("com.navercorp.pinpoint.plugin.arcus.CacheKeyAccessor"); final InstrumentMethod constructorMethod = InstrumentUtils.findConstructor(target, "net.sf.ehcache.Element"); constructorMethod.addInterceptor("com.navercorp.pinpoint.plugin.arcus.interceptor.FrontCacheGetFutureConstructInterceptor"); final InstrumentMethod get0 = InstrumentUtils.findMethod(target, "get", new String[]{"long", "java.util.concurrent.TimeUnit"}); get0.addScopedInterceptor("com.navercorp.pinpoint.plugin.arcus.interceptor.FrontCacheGetFutureGetInterceptor", ArcusConstants.ARCUS_SCOPE); final InstrumentMethod get1 = InstrumentUtils.findMethod(target, "get", new String[0]); get1.addScopedInterceptor("com.navercorp.pinpoint.plugin.arcus.interceptor.FrontCacheGetFutureGetInterceptor", ArcusConstants.ARCUS_SCOPE); return target.toBytecode(); } }); } private void addFrontCacheMemcachedClientEditor(final ArcusPluginConfig config) { transformTemplate.transform("net.spy.memcached.plugin.FrontCacheMemcachedClient", new TransformCallback() { @Override public byte[] doInTransform(Instrumentor instrumentor, ClassLoader loader, String className, Class<?> classBeingRedefined, ProtectionDomain protectionDomain, byte[] classfileBuffer) throws InstrumentException { InstrumentClass target = instrumentor.getInstrumentClass(loader, className, classfileBuffer); boolean traceKey = config.isMemcachedKeyTrace(); for (InstrumentMethod m : target.getDeclaredMethods(new FrontCacheMemcachedMethodFilter())) { try { m.addScopedInterceptor("com.navercorp.pinpoint.plugin.arcus.interceptor.ApiInterceptor", va(traceKey), ArcusConstants.ARCUS_SCOPE); } catch (Exception e) { if (logger.isWarnEnabled()) { logger.warn("Unsupported method " + className + "." + m.getName(), e); } } } return target.toBytecode(); } }); } private void addMemcachedClientEditor(final ArcusPluginConfig config) { transformTemplate.transform("net.spy.memcached.MemcachedClient", new TransformCallback() { @Override public byte[] doInTransform(Instrumentor instrumentor, ClassLoader loader, String className, Class<?> classBeingRedefined, ProtectionDomain protectionDomain, byte[] classfileBuffer) throws InstrumentException { InstrumentClass target = instrumentor.getInstrumentClass(loader, className, classfileBuffer); if (target.hasDeclaredMethod("addOp", new String[]{"java.lang.String", "net.spy.memcached.ops.Operation"})) { target.addField("com.navercorp.pinpoint.plugin.arcus.ServiceCodeAccessor"); final InstrumentMethod addOpMethod = InstrumentUtils.findMethod(target, "addOp", "java.lang.String", "net.spy.memcached.ops.Operation"); addOpMethod.addInterceptor("com.navercorp.pinpoint.plugin.arcus.interceptor.AddOpInterceptor"); } boolean traceKey = config.isMemcachedKeyTrace(); for (InstrumentMethod m : target.getDeclaredMethods(new FrontCacheMemcachedMethodFilter())) { try { m.addScopedInterceptor("com.navercorp.pinpoint.plugin.arcus.interceptor.ApiInterceptor", va(traceKey), ArcusConstants.ARCUS_SCOPE); } catch (Exception e) { if (logger.isWarnEnabled()) { logger.warn("Unsupported method " + className + "." + m.getName(), e); } } } return target.toBytecode(); } }); } private static final TransformCallback FUTURE_TRANSFORMER = new TransformCallback() { @Override public byte[] doInTransform(Instrumentor instrumentor, ClassLoader loader, String className, Class<?> classBeingRedefined, ProtectionDomain protectionDomain, byte[] classfileBuffer) throws InstrumentException { InstrumentClass target = instrumentor.getInstrumentClass(loader, className, classfileBuffer); target.addField("com.navercorp.pinpoint.plugin.arcus.OperationAccessor"); target.addField(AsyncTraceIdAccessor.class.getName()); // setOperation InstrumentMethod setOperation = target.getDeclaredMethod("setOperation", new String[] { "net.spy.memcached.ops.Operation" }); if (setOperation != null) { setOperation.addInterceptor("com.navercorp.pinpoint.plugin.arcus.interceptor.FutureSetOperationInterceptor"); } // cancel, get, set for (InstrumentMethod m : target.getDeclaredMethods(MethodFilters.name("cancel", "get", "set", "signalComplete"))) { m.addScopedInterceptor("com.navercorp.pinpoint.plugin.arcus.interceptor.FutureGetInterceptor", ArcusConstants.ARCUS_FUTURE_SCOPE); } return target.toBytecode(); } }; private static final TransformCallback INTERNAL_FUTURE_TRANSFORMER = new TransformCallback() { @Override public byte[] doInTransform(Instrumentor instrumentor, ClassLoader loader, String className, Class<?> classBeingRedefined, ProtectionDomain protectionDomain, byte[] classfileBuffer) throws InstrumentException { InstrumentClass target = instrumentor.getInstrumentClass(loader, className, classfileBuffer); target.addField(AsyncTraceIdAccessor.class.getName()); // cancel, get, set for (InstrumentMethod m : target.getDeclaredMethods(MethodFilters.name("cancel", "get"))) { m.addScopedInterceptor("com.navercorp.pinpoint.plugin.arcus.interceptor.FutureInternalMethodInterceptor", ArcusConstants.ARCUS_FUTURE_SCOPE); } return target.toBytecode(); } }; private void addCollectionFutureEditor() { transformTemplate.transform("net.spy.memcached.internal.CollectionFuture", FUTURE_TRANSFORMER); } private void addGetFutureEditor() { transformTemplate.transform("net.spy.memcached.internal.GetFuture", FUTURE_TRANSFORMER); } private void addOperationFutureEditor() { transformTemplate.transform("net.spy.memcached.internal.OperationFuture", FUTURE_TRANSFORMER); } private void addImmediateFutureEditor() { transformTemplate.transform("net.spy.memcached.internal.ImmediateFuture", INTERNAL_FUTURE_TRANSFORMER); } private void addBulkGetFutureEditor() { transformTemplate.transform("net.spy.memcached.internal.BulkGetFuture", INTERNAL_FUTURE_TRANSFORMER); } private void addBTreeStoreGetFutureEditor() { transformTemplate.transform("net.spy.memcached.internal.BTreeStoreAndGetFuture", INTERNAL_FUTURE_TRANSFORMER); } private void addCollectionGetBulkFutureEditor() { transformTemplate.transform("net.spy.memcached.internal.CollectionGetBulkFuture", INTERNAL_FUTURE_TRANSFORMER); } private void addSMGetFutureFutureEditor() { transformTemplate.transform("net.spy.memcached.internal.SMGetFuture", INTERNAL_FUTURE_TRANSFORMER); } @Override public void setTransformTemplate(TransformTemplate transformTemplate) { this.transformTemplate = transformTemplate; } }
apache-2.0
nwnpallewela/developer-studio
datamapper-tool/org.wso2.developerstudio.visualdatamapper.diagram/src/org/wso2/developerstudio/datamapper/diagram/edit/parts/ConcatEditPart.java
6440
package org.wso2.developerstudio.datamapper.diagram.edit.parts; import org.eclipse.draw2d.CompoundBorder; import org.eclipse.draw2d.Graphics; import org.eclipse.draw2d.IFigure; import org.eclipse.draw2d.Label; import org.eclipse.draw2d.LineBorder; import org.eclipse.draw2d.RectangleFigure; import org.eclipse.draw2d.RoundedRectangle; import org.eclipse.draw2d.Shape; import org.eclipse.draw2d.StackLayout; import org.eclipse.draw2d.TitleBarBorder; import org.eclipse.draw2d.geometry.Dimension; import org.eclipse.gef.EditPart; import org.eclipse.gef.EditPolicy; import org.eclipse.gef.Request; import org.eclipse.gef.commands.Command; import org.eclipse.gef.editpolicies.LayoutEditPolicy; import org.eclipse.gef.editpolicies.NonResizableEditPolicy; import org.eclipse.gef.requests.CreateRequest; import org.eclipse.gmf.runtime.diagram.ui.editpolicies.DragDropEditPolicy; import org.eclipse.gmf.runtime.diagram.ui.editpolicies.EditPolicyRoles; import org.eclipse.gmf.runtime.draw2d.ui.figures.ConstrainedToolbarLayout; import org.eclipse.gmf.runtime.draw2d.ui.figures.RoundedRectangleBorder; import org.eclipse.gmf.runtime.gef.ui.figures.DefaultSizeNodeFigure; import org.eclipse.gmf.runtime.gef.ui.figures.NodeFigure; import org.eclipse.gmf.runtime.notation.View; import org.eclipse.gmf.tooling.runtime.edit.policies.reparent.CreationEditPolicyWithCustomReparent; import org.eclipse.swt.SWT; import org.eclipse.swt.graphics.Color; import org.eclipse.swt.graphics.Font; import org.wso2.developerstudio.datamapper.diagram.custom.edit.part.AbstractOperatorEditPart; import org.wso2.developerstudio.datamapper.diagram.edit.parts.custom.CustomNonResizableEditPolicyEx; /** * @generated NOT */ public class ConcatEditPart extends AbstractOperatorEditPart { /** * @generated */ public static final int VISUAL_ID = 2006; /** * @generated */ protected IFigure contentPane; /** * @generated */ protected IFigure primaryShape; /** * @generated */ public ConcatEditPart(View view) { super(view); } /** * @generated NOT */ protected void createDefaultEditPolicies() { installEditPolicy( EditPolicyRoles.CREATION_ROLE, new CreationEditPolicyWithCustomReparent( org.wso2.developerstudio.datamapper.diagram.part.DataMapperVisualIDRegistry.TYPED_INSTANCE)); super.createDefaultEditPolicies(); installEditPolicy( EditPolicyRoles.SEMANTIC_ROLE, new org.wso2.developerstudio.datamapper.diagram.edit.policies.ConcatItemSemanticEditPolicy()); installEditPolicy(EditPolicyRoles.DRAG_DROP_ROLE, new DragDropEditPolicy()); //installEditPolicy(EditPolicyRoles.DRAG_DROP_ROLE, new DMCustomDragDropEditPolicy()); installEditPolicy( EditPolicyRoles.CANONICAL_ROLE, new org.wso2.developerstudio.datamapper.diagram.edit.policies.ConcatCanonicalEditPolicy()); installEditPolicy(EditPolicy.LAYOUT_ROLE, createLayoutEditPolicy()); installEditPolicy(EditPolicy.PRIMARY_DRAG_ROLE, new CustomNonResizableEditPolicyEx()); // XXX need an SCR to runtime to have another abstract superclass that would let children add reasonable editpolicies // removeEditPolicy(org.eclipse.gmf.runtime.diagram.ui.editpolicies.EditPolicyRoles.CONNECTION_HANDLES_ROLE); } /** * @generated */ protected LayoutEditPolicy createLayoutEditPolicy() { org.eclipse.gmf.runtime.diagram.ui.editpolicies.LayoutEditPolicy lep = new org.eclipse.gmf.runtime.diagram.ui.editpolicies.LayoutEditPolicy() { protected EditPolicy createChildEditPolicy(EditPart child) { EditPolicy result = child.getEditPolicy(EditPolicy.PRIMARY_DRAG_ROLE); if (result == null) { result = new NonResizableEditPolicy(); } return result; } protected Command getMoveChildrenCommand(Request request) { return null; } protected Command getCreateCommand(CreateRequest request) { return null; } }; return lep; } /** * @generated NOT */ protected IFigure createNodeShape() { return primaryShape = new ConcatFigure(); } /** * @generated */ public RectangleFigure getPrimaryShape() { return (RectangleFigure) primaryShape; } /** * @generated */ protected NodeFigure createNodePlate() { DefaultSizeNodeFigure result = new DefaultSizeNodeFigure(40, 40); return result; } /** * Creates figure for this edit part. * * Body of this method does not depend on settings in generation model * so you may safely remove <i>generated</i> tag and modify it. * * @generated */ protected NodeFigure createNodeFigure() { NodeFigure figure = createNodePlate(); figure.setLayoutManager(new StackLayout()); IFigure shape = createNodeShape(); figure.add(shape); contentPane = setupContentPane(shape); return figure; } /** * Default implementation treats passed figure as content pane. * Respects layout one may have set for generated figure. * @param nodeShape instance of generated figure class * @generated */ protected IFigure setupContentPane(IFigure nodeShape) { if (nodeShape.getLayoutManager() == null) { ConstrainedToolbarLayout layout = new ConstrainedToolbarLayout(); layout.setSpacing(5); nodeShape.setLayoutManager(layout); } return nodeShape; // use nodeShape itself as contentPane } /** * @generated */ public IFigure getContentPane() { if (contentPane != null) { return contentPane; } return super.getContentPane(); } /** * @generated */ protected void setForegroundColor(Color color) { if (primaryShape != null) { primaryShape.setForegroundColor(color); } } /** * @generated */ protected void setBackgroundColor(Color color) { if (primaryShape != null) { primaryShape.setBackgroundColor(color); } } /** * @generated */ protected void setLineWidth(int width) { if (primaryShape instanceof Shape) { ((Shape) primaryShape).setLineWidth(width); } } /** * @generated */ protected void setLineType(int style) { if (primaryShape instanceof Shape) { ((Shape) primaryShape).setLineStyle(style); } } public class ConcatFigure extends OperatorRectangle { public ConcatFigure() { super("Concat"); this.setBackgroundColor(THIS_BACK); } public String getIconPath() { return "icons/ico20/log-mediator.gif"; } public String getNodeName() { return "Equal"; } public IFigure getToolTip() { return new Label("Equal Operation"); } } static final Color THIS_BACK = new Color(null, 230, 230, 230); }
apache-2.0
bitsetd4d/word-playground
WordPlaygroundSpeakingTool/src/d3bug/tool/phonetic/SpellingScoreToolButton.java
2109
package d3bug.tool.phonetic; import java.awt.Color; import java.awt.Rectangle; import java.util.HashSet; import java.util.Set; import d3bug.kidswing.FontManager; import d3bug.poc.SpriteButtonUniverse; import d3bug.poc.sounds.SoundFx; import d3bug.poc.tasks.ShakeButtonsTask; import d3bug.poc.threads.UniverseRunner; import d3bug.poc.tools.ToolButton; import d3bug.util.FireIfNotInterruptedTimer; import d3bug.util.FireIfNotInterruptedTimer.TimerFiredListener; import d3bug.words.dictionary.DictionaryLookup; import d3bug.words.dictionary.DictionaryWord; public class SpellingScoreToolButton extends ToolButton implements TimerFiredListener { private boolean disabled = false; private int score = 0; private Set<String> words = new HashSet<String>(); private FireIfNotInterruptedTimer fireTimer = new FireIfNotInterruptedTimer(this); public SpellingScoreToolButton(SpriteButtonUniverse universe) { super(universe); showState(); setForeground(Color.DARK_GRAY); getComponent().setBounds(new Rectangle(80,60)); setFont(FontManager.getInstance().getComicFont(20)); setVisible(true); } @Override public void onWordCreated(String x) { if (!disabled && x.length() >= 2) { wordToCheck = x; fireTimer.fireIfNotInterrupted(750); } } public void onTimerFired() { checkWordSpelledCorrectly(); } private String wordToCheck; private void checkWordSpelledCorrectly() { if (wordToCheck == null) return; DictionaryWord dw = DictionaryLookup.lookup(wordToCheck); if (dw != null) { if (words.contains(dw.getWord())) return; score++; words.add(dw.getWord()); showState(); SoundFx.getInstance().soundCorrect(); UniverseRunner.getRunner().execute(new ShakeButtonsTask(this)); } } @Override public void onToolHorizontalShake() { words.clear(); score = 0; showState(); } @Override public void onToolDoubleClick() { disabled = !disabled; showState(); UniverseRunner.getRunner().execute(new ShakeButtonsTask(this)); } private void showState() { if (disabled) { setText("X"); return; } setText(String.valueOf(score)); } }
apache-2.0
mesosphere/dcos-commons
sdk/scheduler/src/test/java/com/mesosphere/sdk/scheduler/plan/DefaultStepFactoryTest.java
16677
package com.mesosphere.sdk.scheduler.plan; import com.google.common.collect.ImmutableList; import com.mesosphere.sdk.offer.CommonIdUtils; import com.mesosphere.sdk.offer.taskdata.TaskLabelWriter; import com.mesosphere.sdk.specification.*; import com.mesosphere.sdk.state.ConfigStore; import com.mesosphere.sdk.state.StateStore; import com.mesosphere.sdk.storage.MemPersister; import com.mesosphere.sdk.storage.Persister; import com.mesosphere.sdk.testutils.TestConstants; import com.mesosphere.sdk.testutils.TestPodFactory; import org.apache.mesos.Protos; import org.junit.Assert; import org.junit.Test; import java.util.Arrays; import java.util.List; import java.util.Optional; import java.util.UUID; import java.util.stream.Collectors; import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertThat; /** * This class tests the {@link DefaultStepFactory} class. */ public class DefaultStepFactoryTest { private StepFactory stepFactory; private ConfigStore<ServiceSpec> configStore; private StateStore stateStore; @Test public void testGetStepFailsOnMultipleResourceSetReferences() throws Exception { PodInstance podInstance = getPodInstanceWithSameResourceSets(); List<String> tasksToLaunch = podInstance.getPod().getTasks().stream() .map(taskSpec -> taskSpec.getName()) .collect(Collectors.toList()); Step step = stepFactory.getStep(podInstance, tasksToLaunch); Assert.assertEquals(Status.ERROR, step.getStatus()); } @Test public void testGetStepFailsOnDuplicateDNSNames() throws Exception { PodInstance podInstance = getPodInstanceWithSameDnsPrefixes(); List<String> tasksToLaunch = podInstance.getPod().getTasks().stream() .map(taskSpec -> taskSpec.getName()) .collect(Collectors.toList()); Step step = stepFactory.getStep(podInstance, tasksToLaunch); Assert.assertEquals(Status.ERROR, step.getStatus()); } @Test public void testInitialStateForRunningTaskOnDefaultExecutorDependsOnReadinessCheck() throws Exception { PodInstance podInstance = getPodInstanceWithASingleTask(); List<String> tasksToLaunch = podInstance.getPod().getTasks().stream() .map(taskSpec -> taskSpec.getName()) .collect(Collectors.toList()); UUID configId = UUID.randomUUID(); configStore.setTargetConfig(configId); String taskName = podInstance.getName() + '-' + tasksToLaunch.get(0); stateStore.storeTasks(ImmutableList.of( Protos.TaskInfo.newBuilder() .setName(taskName) .setTaskId(CommonIdUtils.toTaskId(TestConstants.SERVICE_NAME, taskName)) .setSlaveId(Protos.SlaveID.newBuilder() .setValue("proto-field-required") ) .setLabels(new TaskLabelWriter(TestConstants.TASK_INFO) .setTargetConfiguration(configId) .setReadinessCheck(Protos.HealthCheck.newBuilder().build()) .toProto()) .build())); Protos.TaskInfo taskInfo = stateStore.fetchTask(taskName).get(); stateStore.storeStatus(taskName, Protos.TaskStatus.newBuilder() .setState(Protos.TaskState.TASK_RUNNING) .setTaskId(taskInfo.getTaskId()) .setLabels(Protos.Labels.newBuilder().addLabels(Protos.Label.newBuilder().setKey("readiness_check_passed").setValue("false").build()).build()) .build()); assertThat(((DefaultStepFactory) stepFactory) .hasReachedGoalState(stateStore.fetchTask(taskName).get(), GoalState.RUNNING, configId), is(false)); Step step = stepFactory.getStep(podInstance, tasksToLaunch); assertThat(step.isComplete(), is(false)); assertThat(step.isPending(), is(true)); stateStore.storeStatus(taskName, Protos.TaskStatus.newBuilder() .setState(Protos.TaskState.TASK_RUNNING) .setTaskId(taskInfo.getTaskId()) .setLabels(Protos.Labels.newBuilder().addLabels(Protos.Label.newBuilder().setKey("readiness_check_passed").setValue("true").build()).build()) .build()); assertThat(((DefaultStepFactory) stepFactory) .hasReachedGoalState(stateStore.fetchTask(taskName).get(), GoalState.RUNNING, configId), is(true)); step = stepFactory.getStep(podInstance, tasksToLaunch); assertThat(step.isComplete(), is(true)); assertThat(step.isPending(), is(false)); } @Test public void testTaskWithFinishedGoalStateCanReachGoalState() throws Exception { PodInstance podInstance = getPodInstanceWithGoalState(GoalState.ONCE); List<String> tasksToLaunch = podInstance.getPod().getTasks().stream() .map(taskSpec -> taskSpec.getName()) .collect(Collectors.toList()); UUID configId = UUID.randomUUID(); configStore.setTargetConfig(configId); String taskName = podInstance.getName() + '-' + tasksToLaunch.get(0); stateStore.storeTasks(ImmutableList.of( Protos.TaskInfo.newBuilder() .setName(taskName) .setTaskId(CommonIdUtils.toTaskId(TestConstants.SERVICE_NAME, taskName)) .setSlaveId(Protos.SlaveID.newBuilder() .setValue("proto-field-required") ) .setLabels(new TaskLabelWriter(TestConstants.TASK_INFO) .setTargetConfiguration(configId) .toProto()) .build())); Protos.TaskInfo taskInfo = stateStore.fetchTask(taskName).get(); stateStore.storeStatus(taskName, Protos.TaskStatus.newBuilder() .setState(Protos.TaskState.TASK_RUNNING) .setTaskId(taskInfo.getTaskId()) .build()); assertThat(((DefaultStepFactory) stepFactory) .hasReachedGoalState(stateStore.fetchTask(taskName).get(), GoalState.ONCE, configId), is(false)); stateStore.storeStatus(taskName, Protos.TaskStatus.newBuilder() .setState(Protos.TaskState.TASK_FINISHED) .setTaskId(taskInfo.getTaskId()) .build()); assertThat(((DefaultStepFactory) stepFactory) .hasReachedGoalState(stateStore.fetchTask(taskName).get(), GoalState.ONCE, configId), is(true)); } @Test public void testTaskWithFinishGoalStateCanReachGoalState() throws Exception { PodInstance podInstance = getPodInstanceWithGoalState(GoalState.FINISH); List<String> tasksToLaunch = podInstance.getPod().getTasks().stream() .map(taskSpec -> taskSpec.getName()) .collect(Collectors.toList()); UUID configId = UUID.randomUUID(); configStore.setTargetConfig(configId); String taskName = podInstance.getName() + '-' + tasksToLaunch.get(0); stateStore.storeTasks(ImmutableList.of( Protos.TaskInfo.newBuilder() .setName(taskName) .setTaskId(CommonIdUtils.toTaskId(TestConstants.SERVICE_NAME, taskName)) .setSlaveId(Protos.SlaveID.newBuilder() .setValue("proto-field-required") ) .setLabels(new TaskLabelWriter(TestConstants.TASK_INFO) .setTargetConfiguration(configId) .toProto()) .build())); Protos.TaskInfo taskInfo = stateStore.fetchTask(taskName).get(); stateStore.storeStatus(taskName, Protos.TaskStatus.newBuilder() .setState(Protos.TaskState.TASK_RUNNING) .setTaskId(taskInfo.getTaskId()) .build()); assertThat(((DefaultStepFactory) stepFactory) .hasReachedGoalState(stateStore.fetchTask(taskName).get(), GoalState.FINISH, configId), is(false)); stateStore.storeStatus(taskName, Protos.TaskStatus.newBuilder() .setState(Protos.TaskState.TASK_FINISHED) .setTaskId(taskInfo.getTaskId()) .build()); assertThat(((DefaultStepFactory) stepFactory) .hasReachedGoalState(stateStore.fetchTask(taskName).get(), GoalState.FINISH, configId), is(true)); } @Test public void testTaskWithOnceGoalStateCanReachGoalState() throws Exception { PodInstance podInstance = getPodInstanceWithGoalState(GoalState.ONCE); List<String> tasksToLaunch = podInstance.getPod().getTasks().stream() .map(taskSpec -> taskSpec.getName()) .collect(Collectors.toList()); UUID configId = UUID.randomUUID(); configStore.setTargetConfig(configId); String taskName = podInstance.getName() + '-' + tasksToLaunch.get(0); stateStore.storeTasks(ImmutableList.of( Protos.TaskInfo.newBuilder() .setName(taskName) .setTaskId(CommonIdUtils.toTaskId(TestConstants.SERVICE_NAME, taskName)) .setSlaveId(Protos.SlaveID.newBuilder() .setValue("proto-field-required") ) .setLabels(new TaskLabelWriter(TestConstants.TASK_INFO) .setTargetConfiguration(configId) .toProto()) .build())); Protos.TaskInfo taskInfo = stateStore.fetchTask(taskName).get(); stateStore.storeStatus(taskName, Protos.TaskStatus.newBuilder() .setState(Protos.TaskState.TASK_RUNNING) .setTaskId(taskInfo.getTaskId()) .build()); assertThat(((DefaultStepFactory) stepFactory) .hasReachedGoalState(stateStore.fetchTask(taskName).get(), GoalState.ONCE, configId), is(false)); stateStore.storeStatus(taskName, Protos.TaskStatus.newBuilder() .setState(Protos.TaskState.TASK_FINISHED) .setTaskId(taskInfo.getTaskId()) .build()); assertThat(((DefaultStepFactory) stepFactory) .hasReachedGoalState(stateStore.fetchTask(taskName).get(), GoalState.ONCE, configId), is(true)); } private PodInstance getPodInstanceWithASingleTask() throws Exception { TaskSpec taskSpec0 = TestPodFactory.getTaskSpec(TestConstants.TASK_NAME + 0, TestConstants.RESOURCE_SET_ID); PodSpec podSpec = DefaultPodSpec.newBuilder( TestConstants.POD_TYPE, 1, Arrays.asList(taskSpec0)) .build(); ServiceSpec serviceSpec = DefaultServiceSpec.newBuilder() .name(TestConstants.SERVICE_NAME) .role(TestConstants.ROLE) .principal(TestConstants.PRINCIPAL) .zookeeperConnection("foo.bar.com") .pods(Arrays.asList(podSpec)) .build(); Persister persister = MemPersister.newBuilder().build(); stateStore = new StateStore(persister); configStore = new ConfigStore<>(DefaultServiceSpec.getConfigurationFactory(serviceSpec), persister); UUID configId = configStore.store(serviceSpec); configStore.setTargetConfig(configId); stepFactory = new DefaultStepFactory(configStore, stateStore, Optional.empty()); return new DefaultPodInstance(podSpec, 0); } private PodInstance getPodInstanceWithSameResourceSets() throws Exception { TaskSpec taskSpec0 = TestPodFactory.getTaskSpec(TestConstants.TASK_NAME + 0, TestConstants.RESOURCE_SET_ID); TaskSpec taskSpec1 = TestPodFactory.getTaskSpec(TestConstants.TASK_NAME + 1, TestConstants.RESOURCE_SET_ID); PodSpec podSpec = DefaultPodSpec.newBuilder( TestConstants.POD_TYPE, 1, Arrays.asList(taskSpec0, taskSpec1)) .build(); ServiceSpec serviceSpec = DefaultServiceSpec.newBuilder() .name(TestConstants.SERVICE_NAME) .role(TestConstants.ROLE) .principal(TestConstants.PRINCIPAL) .zookeeperConnection("foo.bar.com") .pods(Arrays.asList(podSpec)) .build(); Persister persister = MemPersister.newBuilder().build(); stateStore = new StateStore(persister); configStore = new ConfigStore<>(DefaultServiceSpec.getConfigurationFactory(serviceSpec), persister); UUID configId = configStore.store(serviceSpec); configStore.setTargetConfig(configId); stepFactory = new DefaultStepFactory(configStore, stateStore, Optional.empty()); return new DefaultPodInstance(podSpec, 0); } private PodInstance getPodInstanceWithGoalState(GoalState goalState) throws Exception { TaskSpec taskSpec = TestPodFactory.getTaskSpecWithGoalState( TestConstants.TASK_NAME, TestConstants.RESOURCE_SET_ID, goalState); PodSpec podSpec = DefaultPodSpec.newBuilder( TestConstants.POD_TYPE, 1, Arrays.asList(taskSpec)) .build(); ServiceSpec serviceSpec = DefaultServiceSpec.newBuilder() .name(TestConstants.SERVICE_NAME) .role(TestConstants.ROLE) .principal(TestConstants.PRINCIPAL) .zookeeperConnection("foo.bar.com") .pods(Arrays.asList(podSpec)) .build(); Persister persister = MemPersister.newBuilder().build(); stateStore = new StateStore(persister); configStore = new ConfigStore<>(DefaultServiceSpec.getConfigurationFactory(serviceSpec), persister); UUID configId = configStore.store(serviceSpec); configStore.setTargetConfig(configId); stepFactory = new DefaultStepFactory(configStore, stateStore, Optional.empty()); return new DefaultPodInstance(podSpec, 0); } private PodInstance getPodInstanceWithSameDnsPrefixes() throws Exception { TaskSpec taskSpec0 = TestPodFactory.getTaskSpec( TestConstants.TASK_NAME + 0, TestConstants.RESOURCE_SET_ID + 0, TestConstants.TASK_DNS_PREFIX); TaskSpec taskSpec1 = TestPodFactory.getTaskSpec( TestConstants.TASK_NAME + 1, TestConstants.RESOURCE_SET_ID + 1, TestConstants.TASK_DNS_PREFIX); PodSpec podSpec = DefaultPodSpec.newBuilder( TestConstants.POD_TYPE, 1, Arrays.asList(taskSpec0, taskSpec1)) .build(); ServiceSpec serviceSpec = DefaultServiceSpec.newBuilder() .name(TestConstants.SERVICE_NAME) .role(TestConstants.ROLE) .principal(TestConstants.PRINCIPAL) .zookeeperConnection("foo.bar.com") .pods(Arrays.asList(podSpec)) .build(); Persister persister = MemPersister.newBuilder().build(); stateStore = new StateStore(persister); configStore = new ConfigStore<>(DefaultServiceSpec.getConfigurationFactory(serviceSpec), persister); UUID configId = configStore.store(serviceSpec); configStore.setTargetConfig(configId); stepFactory = new DefaultStepFactory(configStore, stateStore, Optional.empty()); return new DefaultPodInstance(podSpec, 0); } }
apache-2.0
flitte/Gaffer
accumulo-store/src/main/java/gaffer/accumulostore/operation/hdfs/handler/ImportAccumuloKeyValueFilesHandler.java
2078
/* * Copyright 2016 Crown Copyright * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package gaffer.accumulostore.operation.hdfs.handler; import gaffer.accumulostore.AccumuloStore; import gaffer.accumulostore.operation.hdfs.handler.tool.ImportElementsToAccumulo; import gaffer.accumulostore.operation.hdfs.impl.ImportAccumuloKeyValueFiles; import gaffer.operation.OperationException; import gaffer.store.Context; import gaffer.store.Store; import gaffer.store.operation.handler.OperationHandler; import org.apache.hadoop.util.ToolRunner; public class ImportAccumuloKeyValueFilesHandler implements OperationHandler<ImportAccumuloKeyValueFiles, Void> { @Override public Void doOperation(final ImportAccumuloKeyValueFiles operation, final Context context, final Store store) throws OperationException { doOperation(operation, (AccumuloStore) store); return null; } public void doOperation(final ImportAccumuloKeyValueFiles operation, final AccumuloStore store) throws OperationException { splitTable(operation, store); } private void splitTable(final ImportAccumuloKeyValueFiles operation, final AccumuloStore store) throws OperationException { final ImportElementsToAccumulo importTool = new ImportElementsToAccumulo(operation.getInputPath(), operation.getFailurePath(), store); try { ToolRunner.run(importTool, new String[0]); } catch (final Exception e) { throw new OperationException(e.getMessage(), e); } } }
apache-2.0
aws/aws-sdk-java
aws-java-sdk-directory/src/main/java/com/amazonaws/services/directory/model/ClientException.java
1842
/* * Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.directory.model; import javax.annotation.Generated; /** * <p> * A client exception has occurred. * </p> */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class ClientException extends com.amazonaws.services.directory.model.AWSDirectoryServiceException { private static final long serialVersionUID = 1L; private String requestId; /** * Constructs a new ClientException with the specified error message. * * @param message * Describes the error encountered. */ public ClientException(String message) { super(message); } /** * @param requestId */ @com.fasterxml.jackson.annotation.JsonProperty("RequestId") public void setRequestId(String requestId) { this.requestId = requestId; } /** * @return */ @com.fasterxml.jackson.annotation.JsonProperty("RequestId") public String getRequestId() { return this.requestId; } /** * @param requestId * @return Returns a reference to this object so that method calls can be chained together. */ public ClientException withRequestId(String requestId) { setRequestId(requestId); return this; } }
apache-2.0
mksmbrtsh/LLRPexplorer
src/maximsblog/blogspot/com/llrpexplorer/ActivityMain.java
9207
package maximsblog.blogspot.com.llrpexplorer; import java.util.List; import java.util.concurrent.TimeoutException; import maximsblog.blogspot.com.llrpexplorer.ReaderService.IntentDo; import org.llrp.ltk.exceptions.InvalidLLRPMessageException; import org.llrp.ltk.generated.LLRPMessageFactory; import org.llrp.ltk.generated.enumerations.AISpecStopTriggerType; import org.llrp.ltk.generated.enumerations.AccessReportTriggerType; import org.llrp.ltk.generated.enumerations.AirProtocols; import org.llrp.ltk.generated.enumerations.GetReaderCapabilitiesRequestedData; import org.llrp.ltk.generated.enumerations.NotificationEventType; import org.llrp.ltk.generated.enumerations.ROReportTriggerType; import org.llrp.ltk.generated.enumerations.ROSpecStartTriggerType; import org.llrp.ltk.generated.enumerations.ROSpecState; import org.llrp.ltk.generated.enumerations.ROSpecStopTriggerType; import org.llrp.ltk.generated.messages.ADD_ROSPEC; import org.llrp.ltk.generated.messages.DELETE_ROSPEC; import org.llrp.ltk.generated.messages.DISABLE_ROSPEC; import org.llrp.ltk.generated.messages.ENABLE_EVENTS_AND_REPORTS; import org.llrp.ltk.generated.messages.ENABLE_ROSPEC; import org.llrp.ltk.generated.messages.GET_READER_CAPABILITIES; import org.llrp.ltk.generated.messages.GET_READER_CAPABILITIES_RESPONSE; import org.llrp.ltk.generated.messages.GET_READER_CONFIG; import org.llrp.ltk.generated.messages.READER_EVENT_NOTIFICATION; import org.llrp.ltk.generated.messages.RO_ACCESS_REPORT; import org.llrp.ltk.generated.messages.SET_READER_CONFIG; import org.llrp.ltk.generated.messages.START_ROSPEC; import org.llrp.ltk.generated.messages.START_ROSPEC_RESPONSE; import org.llrp.ltk.generated.messages.STOP_ROSPEC; import org.llrp.ltk.generated.messages.STOP_ROSPEC_RESPONSE; import org.llrp.ltk.generated.parameters.AISpec; import org.llrp.ltk.generated.parameters.AISpecStopTrigger; import org.llrp.ltk.generated.parameters.AccessReportSpec; import org.llrp.ltk.generated.parameters.C1G2EPCMemorySelector; import org.llrp.ltk.generated.parameters.ConnectionAttemptEvent; import org.llrp.ltk.generated.parameters.EventNotificationState; import org.llrp.ltk.generated.parameters.InventoryParameterSpec; import org.llrp.ltk.generated.parameters.ROBoundarySpec; import org.llrp.ltk.generated.parameters.ROReportSpec; import org.llrp.ltk.generated.parameters.ROSpec; import org.llrp.ltk.generated.parameters.ROSpecStartTrigger; import org.llrp.ltk.generated.parameters.ROSpecStopTrigger; import org.llrp.ltk.generated.parameters.ReaderEventNotificationData; import org.llrp.ltk.generated.parameters.ReaderEventNotificationSpec; import org.llrp.ltk.generated.parameters.TagReportContentSelector; import org.llrp.ltk.generated.parameters.TagReportData; import org.llrp.ltk.net.LLRPConnection; import org.llrp.ltk.net.LLRPConnectionAttemptFailedException; import org.llrp.ltk.net.LLRPConnector; import org.llrp.ltk.net.LLRPEndpoint; import org.llrp.ltk.types.Bit; import org.llrp.ltk.types.LLRPBitList; import org.llrp.ltk.types.LLRPMessage; import org.llrp.ltk.types.UnsignedByte; import org.llrp.ltk.types.UnsignedInteger; import org.llrp.ltk.types.UnsignedShort; import org.llrp.ltk.types.UnsignedShortArray; import android.app.Activity; import android.content.BroadcastReceiver; import android.content.Context; import android.content.Intent; import android.content.IntentFilter; import android.os.Bundle; import android.os.Handler; import android.os.StrictMode; import android.text.Html; import android.text.method.LinkMovementMethod; import android.text.method.ScrollingMovementMethod; import android.text.util.Linkify; import android.view.Menu; import android.view.MenuItem; import android.view.View; import android.view.View.OnClickListener; import android.widget.Button; import android.widget.Switch; import android.widget.TextView; public class ActivityMain extends Activity implements OnClickListener { private IntentFilter mIntentFilter; private TextView t; private Switch mToggleConnectSwitch; private Button mButton; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); mIntentFilter = new IntentFilter(); mIntentFilter.addAction(ReaderService.IntentDo.connect.toString()); mIntentFilter.addAction(ReaderService.IntentDo.message.toString()); mIntentFilter.addAction(ReaderService.IntentDo.error.toString()); mIntentFilter.addAction(ReaderService.IntentDo.connectionError .toString()); mIntentFilter.addAction(ReaderService.IntentDo.disconnect.toString()); t = (TextView) findViewById(R.id.t); t.setMovementMethod(new ScrollingMovementMethod()); mButton = (Button) findViewById(R.id.exec); mButton.setOnClickListener(this); mButton.setEnabled(false); mToggleConnectSwitch = (Switch) findViewById(R.id.toggleButton1); mToggleConnectSwitch.setOnClickListener(this); TextView textAbout = (TextView)findViewById(R.id.textView1); textAbout.setText(getResources().getText(R.string.about_text)); Linkify.addLinks(textAbout, Linkify.ALL); textAbout.setMovementMethod(LinkMovementMethod.getInstance()); } @Override public void onResume() { super.onStart(); registerReceiver(mIntentReceiver, mIntentFilter); } @Override public void onPause() { super.onStop(); if (mIntentReceiver != null) unregisterReceiver(mIntentReceiver); }; private BroadcastReceiver mIntentReceiver = new BroadcastReceiver() { @Override public void onReceive(Context context, Intent intent) { if (intent != null) { if (intent.getAction().equals( ReaderService.IntentDo.message.toString())) { try { LLRPMessage message = LLRPMessageFactory .createLLRPMessage(new LLRPBitList(intent .getStringExtra("msg"))); updateUI(message); } catch (InvalidLLRPMessageException e) { e.printStackTrace(); } } else if (intent.getAction().equals( ReaderService.IntentDo.error.toString())) { String message = intent.getStringExtra("error"); t.setText(t.getText().toString() + "\n" + message); } else if (intent.getAction().equals( ReaderService.IntentDo.connectionError.toString())) { t.setText(t.getText().toString() + "\n" + "connection error"); mButton.setEnabled(false); mToggleConnectSwitch.setOnClickListener(null); mToggleConnectSwitch.setChecked(false); mToggleConnectSwitch.setEnabled(true); mToggleConnectSwitch.setOnClickListener(ActivityMain.this); } } } }; @Override public void onClick(View v) { if (v.getId() == R.id.toggleButton1) { Intent service; service = new Intent(this, ReaderService.class); if (!mToggleConnectSwitch.isChecked()) { service.putExtra("IntentDo", IntentDo.disconnect); } else { service.putExtra("IntentDo", IntentDo.connect); } mToggleConnectSwitch.setEnabled(true); startService(service); } else { Intent service = new Intent(this, ReaderService.class); service.putExtra("IntentDo", IntentDo.read); startService(service); } } private void updateUI(LLRPMessage message) { if (message instanceof READER_EVENT_NOTIFICATION) { READER_EVENT_NOTIFICATION readerEventNotification = ((READER_EVENT_NOTIFICATION) message); ReaderEventNotificationData red = readerEventNotification .getReaderEventNotificationData(); if (red.getAISpecEvent() == null && red.getAntennaEvent() == null && red.getConnectionCloseEvent() == null && red.getGPIEvent() == null && red.getHoppingEvent() == null && red.getReaderExceptionEvent() == null && red.getReportBufferLevelWarningEvent() == null && red.getReportBufferOverflowErrorEvent() == null && red.getRFSurveyEvent() == null && red.getROSpecEvent() == null) { if (red.getConnectionAttemptEvent() != null) { t.setText(t.getText().toString() + "\n" + "Connection attempt was successful"); mButton.setEnabled(true); } else { t.setText(t.getText().toString() + "\n" + "Connection attempt was unsucessful"); mButton.setEnabled(false); } mToggleConnectSwitch.setEnabled(true); } else { //t.setText(t.getText().toString() + "\n" // + readerEventNotification.toString()); } } else if (message instanceof GET_READER_CAPABILITIES_RESPONSE) { GET_READER_CAPABILITIES_RESPONSE getReaderCap = (GET_READER_CAPABILITIES_RESPONSE) message; UnsignedShort maxNumberOfAntennaSupported = getReaderCap .getGeneralDeviceCapabilities() .getMaxNumberOfAntennaSupported(); t.setText(t.getText().toString() + "\nmaxNumberOfAntennaSupported: " + maxNumberOfAntennaSupported); } else if (message instanceof START_ROSPEC_RESPONSE) { } else if (message instanceof RO_ACCESS_REPORT) { RO_ACCESS_REPORT roAccessReport = (RO_ACCESS_REPORT) message; List<TagReportData> l = roAccessReport.getTagReportDataList(); if(l == null || l.isEmpty()) { t.setText(t.getText().toString() + "\n" + "Tag not found"); } for (TagReportData trd : l) t.setText(t.getText().toString() + "\n" + trd.getEPCParameter().toString()); } if (message instanceof STOP_ROSPEC_RESPONSE) { } else { } } }
apache-2.0
snorrees/artemis-odb
artemis-test/src/main/java/com/artemis/system/iterating/IntOptimizedSystemAdditional.java
550
package com.artemis.system.iterating; import com.artemis.Aspect; import com.artemis.Entity; import com.artemis.systems.EntityProcessingSystem; import com.artemis.systems.IteratingSystem; public class IntOptimizedSystemAdditional extends IteratingSystem { public IntOptimizedSystemAdditional() { super(Aspect.all()); setEnabled(true); begin(); } @Override public void setEnabled(boolean enabled) { super.setEnabled(enabled); } @Override protected void begin() { super.begin(); } @Override protected void process(int e) {} }
apache-2.0
eSDK/esdk_tp_native_java
test/demo/eSDK_TP_Demo_BS_Java/src/com/huawei/esdk/demo/autogen/SetBroadcastSiteExResponse.java
1726
package com.huawei.esdk.demo.autogen; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlSchemaType; import javax.xml.bind.annotation.XmlType; import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter; /** * <p>Java class for anonymous complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType> * &lt;complexContent> * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"> * &lt;sequence> * &lt;element name="resultCode" type="{http://www.w3.org/2001/XMLSchema}int"/> * &lt;/sequence> * &lt;/restriction> * &lt;/complexContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "", propOrder = { "resultCode" }) @XmlRootElement(name = "setBroadcastSiteExResponse") public class SetBroadcastSiteExResponse { @XmlElement(required = true, type = String.class) @XmlJavaTypeAdapter(Adapter2 .class) @XmlSchemaType(name = "int") protected Integer resultCode; /** * Gets the value of the resultCode property. * * @return * possible object is * {@link String } * */ public Integer getResultCode() { return resultCode; } /** * Sets the value of the resultCode property. * * @param value * allowed object is * {@link String } * */ public void setResultCode(Integer value) { this.resultCode = value; } }
apache-2.0
trasa/aws-sdk-java
aws-java-sdk-inspector/src/main/java/com/amazonaws/services/inspector/model/AgentsFilter.java
6327
/* * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.inspector.model; import java.io.Serializable; /** * <p> * This data type is used as a response element in the * <a>ListAssessmentAgents</a> action. * </p> */ public class AgentsFilter implements Serializable, Cloneable { /** * <p> * For a record to match a filter, the value specified for this data type * property must be the exact match of the value of the <b>agentHealth</b> * property of the <a>Agent</a> data type. * </p> */ private java.util.List<String> agentHealthList; /** * <p> * For a record to match a filter, the value specified for this data type * property must be the exact match of the value of the <b>agentHealth</b> * property of the <a>Agent</a> data type. * </p> * * @return For a record to match a filter, the value specified for this data * type property must be the exact match of the value of the * <b>agentHealth</b> property of the <a>Agent</a> data type. */ public java.util.List<String> getAgentHealthList() { return agentHealthList; } /** * <p> * For a record to match a filter, the value specified for this data type * property must be the exact match of the value of the <b>agentHealth</b> * property of the <a>Agent</a> data type. * </p> * * @param agentHealthList * For a record to match a filter, the value specified for this data * type property must be the exact match of the value of the * <b>agentHealth</b> property of the <a>Agent</a> data type. */ public void setAgentHealthList(java.util.Collection<String> agentHealthList) { if (agentHealthList == null) { this.agentHealthList = null; return; } this.agentHealthList = new java.util.ArrayList<String>(agentHealthList); } /** * <p> * For a record to match a filter, the value specified for this data type * property must be the exact match of the value of the <b>agentHealth</b> * property of the <a>Agent</a> data type. * </p> * <p> * <b>NOTE:</b> This method appends the values to the existing list (if * any). Use {@link #setAgentHealthList(java.util.Collection)} or * {@link #withAgentHealthList(java.util.Collection)} if you want to * override the existing values. * </p> * * @param agentHealthList * For a record to match a filter, the value specified for this data * type property must be the exact match of the value of the * <b>agentHealth</b> property of the <a>Agent</a> data type. * @return Returns a reference to this object so that method calls can be * chained together. */ public AgentsFilter withAgentHealthList(String... agentHealthList) { if (this.agentHealthList == null) { setAgentHealthList(new java.util.ArrayList<String>( agentHealthList.length)); } for (String ele : agentHealthList) { this.agentHealthList.add(ele); } return this; } /** * <p> * For a record to match a filter, the value specified for this data type * property must be the exact match of the value of the <b>agentHealth</b> * property of the <a>Agent</a> data type. * </p> * * @param agentHealthList * For a record to match a filter, the value specified for this data * type property must be the exact match of the value of the * <b>agentHealth</b> property of the <a>Agent</a> data type. * @return Returns a reference to this object so that method calls can be * chained together. */ public AgentsFilter withAgentHealthList( java.util.Collection<String> agentHealthList) { setAgentHealthList(agentHealthList); return this; } /** * Returns a string representation of this object; useful for testing and * debugging. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getAgentHealthList() != null) sb.append("AgentHealthList: " + getAgentHealthList()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof AgentsFilter == false) return false; AgentsFilter other = (AgentsFilter) obj; if (other.getAgentHealthList() == null ^ this.getAgentHealthList() == null) return false; if (other.getAgentHealthList() != null && other.getAgentHealthList().equals(this.getAgentHealthList()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getAgentHealthList() == null) ? 0 : getAgentHealthList() .hashCode()); return hashCode; } @Override public AgentsFilter clone() { try { return (AgentsFilter) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException( "Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } }
apache-2.0
vpeurala/JRetrofit
src/main/java/org/jretrofit/AbstractMethodLookupHelper.java
3960
/* * Copyright 2006 Ville Peurala * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jretrofit; import java.io.Serializable; import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Arrays; /** * @author Ville Peurala */ abstract class AbstractMethodLookupHelper implements Serializable { private static final long serialVersionUID = 1977L; private Object target; protected AbstractMethodLookupHelper(Object target) { this.target = target; } protected Method findCompatibleMethod(Method interfaceMethod) { Method[] publicMethods = getTarget().getClass().getMethods(); Method[] declaredMethods = getTarget().getClass().getDeclaredMethods(); ArrayList<Method> allMethods = new ArrayList<Method>(); allMethods.addAll(Arrays.asList(publicMethods)); allMethods.addAll(Arrays.asList(declaredMethods)); Method[] targetMethods = allMethods.toArray(new Method[allMethods .size()]); for (int i = 0; i < targetMethods.length; i++) { Method currentMethod = targetMethods[i]; if (areMethodsCompatible(interfaceMethod, currentMethod)) { if (!currentMethod.isAccessible()) { currentMethod.setAccessible(true); } return currentMethod; } } throw new UnsupportedOperationException("Target object '" + getTarget() + "' does not have a method which is compatible with '" + interfaceMethod + "'!"); } private boolean areMethodsCompatible(Method requestedMethod, Method candidateMethod) { return areNamesCompatible(requestedMethod, candidateMethod) && areParametersCompatible(requestedMethod, candidateMethod) && areReturnValuesCompatible(requestedMethod, candidateMethod); } private boolean areNamesCompatible(Method requestedMethod, Method candidateMethod) { return requestedMethod.getName().equals(candidateMethod.getName()); } private boolean areParametersCompatible(Method requestedMethod, Method candidateMethod) { Class<?>[] requestedParameterTypes = requestedMethod .getParameterTypes(); Class<?>[] candidateParameterTypes = candidateMethod .getParameterTypes(); if (requestedParameterTypes.length != candidateParameterTypes.length) { return false; } for (int i = 0; i < requestedParameterTypes.length; i++) { Class<?> currentRequestedParameterType = requestedParameterTypes[i]; Class<?> currentCandidateParameterType = candidateParameterTypes[i]; if (!currentCandidateParameterType .isAssignableFrom(currentRequestedParameterType)) { return false; } } return true; } private boolean areReturnValuesCompatible(Method requestedMethod, Method candidateMethod) { Class<?> requstedReturnValueType = requestedMethod.getReturnType(); Class<?> candidateReturnValueType = candidateMethod.getReturnType(); return requstedReturnValueType .isAssignableFrom(candidateReturnValueType); } abstract Method findMethodToCall(Method interfaceMethod); protected final Object getTarget() { return target; } }
apache-2.0
dagix5/backbox
BackBox/src/it/backbox/transaction/CopyTask.java
800
package it.backbox.transaction; import it.backbox.utility.Utility; import java.io.File; public class CopyTask extends Task { private String srcPath; private String destPath; public void setInput(String srcPath, String destPath) { this.srcPath = srcPath; this.destPath = destPath; } public CopyTask() { super(); } public CopyTask(String srcPath, String destPath) { super(); setInput(srcPath, destPath); } @Override public void run() throws Exception { File dest = new File(destPath); File parent = dest.getParentFile(); if (parent == null) throw new IllegalStateException("Couldn't create dir: " + dest); if (!parent.exists()) parent.mkdirs(); dest.createNewFile(); Utility.copy(new File(srcPath), dest); } }
apache-2.0
divercraig/GasCalculator
GasCalcLib/src/test/java/uk/co/craigwarren/gascalc/gaslaw/RealGasLawTest.java
1026
/** * */ package uk.co.craigwarren.gascalc.gaslaw; import org.junit.Before; import uk.co.craigwarren.gascalc.gaslaw.IdealGasLaw; import uk.co.craigwarren.gascalc.gaslaw.NewtonRaphson; import uk.co.craigwarren.gascalc.gaslaw.RealGasLaw; import uk.co.craigwarren.gascalc.gaslaw.VanDerWaalsConstantsCalculator; import uk.co.craigwarren.gascalc.gaslaw.VanDerWaalsFunctionFactory; import uk.co.craigwarren.gascalc.model.Gas; /** * @author craig * */ public class RealGasLawTest extends GasLawTest{ @Override public double getTestQuantity() { return 85.40888; } @Before public void setup(){ Gas testGas = new Gas(21, 79); VanDerWaalsConstantsCalculator testConstCalc = new VanDerWaalsConstantsCalculator(); NewtonRaphson testNewtonRaphson = new NewtonRaphson(); VanDerWaalsFunctionFactory testVDWFF = new VanDerWaalsFunctionFactory(); IdealGasLaw testIdealGasLaws = new IdealGasLaw(); gasLawUnderTest = new RealGasLaw(testGas, testConstCalc, testNewtonRaphson, testVDWFF, testIdealGasLaws); } }
apache-2.0
Izakey/Java
Chapter8/TimeThree/TimeThreePackageTest.java
1361
// TimeThreePackageTest.java // TimeThree object used in an application import com.deitel.jhtp.chapter8.TimeThree; public class TimeThreePackageTest { public static void main(String[] args ) { TimeThree time = new TimeThree(); // Output string representations of the time System.out.print("The intitial universal time is: " ); System.out.println( time.toUniversalString()); System.out.print("The intitial standard time is: " ); System.out.println( time.toString()); System.out.println(); // Change time and output updated time time.setTime(13, 27, 6); System.out.print("The intitial universal time is: " ); System.out.println( time.toUniversalString()); System.out.print("The intitial standard time is: " ); System.out.println( time.toString()); System.out.println(); // attempt to set time with invalied values try { time.setTime(99,99,99); } catch( IllegalArgumentException error ) { System.out.printf("Exception: %s\n\n", error.getMessage()); } // Display time after attempting to set invalied values System.out.print("After attempting invalied settings: " ); System.out.print("Universal time: " ); System.out.println( time.toUniversalString()); System.out.print("Standard time: " ); System.out.println( time.toString()); } }
apache-2.0
IvanBelyaev/ibelyaev
chapter_004/src/main/java/ru/job4j/post/package-info.java
134
/** * Combine all user emails. * * @author Ivan Belyaev (gh0st84@mail.ru) * @version $Id$ * @since 1.0 */ package ru.job4j.post;
apache-2.0
GIP-RECIA/esco-grouper-ui
metier/esco-web/src/main/java/org/esco/grouperui/web/controllers/person/PersonSubscriptionsController.java
12667
/** * Copyright (C) 2009 GIP RECIA http://www.recia.fr * @Author (C) 2009 GIP RECIA <contact@recia.fr> * @Contributor (C) 2009 SOPRA http://www.sopragroup.com/ * @Contributor (C) 2011 Pierre Legay <pierre.legay@recia.fr> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.esco.grouperui.web.controllers.person; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.apache.commons.lang.Validate; import org.esco.grouperui.domaine.beans.Group; import org.esco.grouperui.domaine.beans.GroupPrivilegeEnum; import org.esco.grouperui.domaine.beans.Members; import org.esco.grouperui.domaine.beans.Person; import org.esco.grouperui.domaine.beans.Sortable; import org.esco.grouperui.domaine.beans.Subject; import org.esco.grouperui.exceptions.ESCOBusinessException; import org.esco.grouperui.exceptions.business.ESCOGroupNotFoundException; import org.esco.grouperui.exceptions.business.ESCOGroupNotUniqueException; import org.esco.grouperui.exceptions.business.ESCOSubjectNotFoundException; import org.esco.grouperui.exceptions.business.ESCOSubjectNotUniqueException; import org.esco.grouperui.services.application.IGrouperService; import org.esco.grouperui.tools.DistinctSortableList; import org.esco.grouperui.web.ESCOConstantes; import org.esco.grouperui.web.beans.Status; import org.esco.grouperui.web.beans.XMLResultString; import org.esco.grouperui.web.beans.summary.Resume; import org.esco.grouperui.web.beans.table.TableData; import org.esco.grouperui.web.beans.table.TableDataFactory; import org.esco.grouperui.web.controllers.PersonController; import org.esco.grouperui.web.plugins.AbstractControllerAware; import org.esco.grouperui.web.utils.I18nExceptionAdapter; import org.esco.grouperui.web.utils.XmlProducer; /** * It is the controller of the tab subscriptions in the person properties. * Requirement(s) : [RECIA-ESCO-L1-002] * * @author aChesneau */ public class PersonSubscriptionsController extends AbstractControllerAware { /** * The default message exception. */ private static final String DEFAULT_MESSAGE_EXCEPTION = "DEFAULT_MESSAGE_EXCEPTION"; /** * The web parameter to get the type of subscription(OPTIN or OPTOUT). */ private static final String TYPE_OF_SUBSCRIPTION_PARAM = "typeOfSubscription"; /** * The web parameter to get the group id. */ private static final String GROUP_ID_PARAMS = "groupId"; /** * The default serial id generated. */ private static final long serialVersionUID = -5318226972895150022L; /** * Default constructor. */ public PersonSubscriptionsController() { } /** * get TabsController for this tab and cast it in appropriate class. * * @return PersonController. */ public PersonController getPersonController() { return (PersonController) super.getTabsController(); } /** * The method call by ajax to find subscriptions. * * @return the producted xml * @throws ESCOSubjectNotFoundException * if the person is not found. * @throws ESCOSubjectNotUniqueException * if the person is not unique. */ public String findSubscriptions() throws ESCOSubjectNotFoundException, ESCOSubjectNotUniqueException { this.findSubscriptionsMethod(); XmlProducer producer = new XmlProducer(); producer.setTarget(new Status(this.isRowToReturn())); producer.setTypesOfTarget(Status.class); return this.xmlProducerWrapper.wrap(producer); } /** * Find all the subscriptions of the person. * * @throws ESCOSubjectNotFoundException * if the person is not found. * @throws ESCOSubjectNotUniqueException * if the person is not unique. */ public void findSubscriptionsMethod() throws ESCOSubjectNotFoundException, ESCOSubjectNotUniqueException { this.data.clear(); Members member = new Members(); // The list of groups. List < Group > groups = null; if (this.getPersonController().getPerson() != null) { // The group name from which we want to retrieve the memberships. groups = this.getPersonController().getGrouperService().findGroupsMemberOptinOptout( this.getPersonController().getPerson()); // Adding the groups to the memberships. Iterator < Group > itGroup = groups.iterator(); while (itGroup.hasNext()) { member.addGroup(itGroup.next()); } Iterator < Subject > itSubject = member.getSubjects().iterator(); while (itSubject.hasNext()) { this.data.add(itSubject.next()); } } } /** * Output the data as an XML stream. * * @return the XML data. */ public String dataResult() { final String theRows = this.getParam("rows"); final String thePage = this.getParam("page"); final String theSortBy = this.getParam("sidx"); final String theSortType = this.getParam("sord"); this.extractItems(); this.manageRowsDisplaying(); this.storedData.setIsExistingAddedItem(this.getIsExistAddedItems()); this.storedData.setNbResultDisplay(theRows); this.storedData.setCurrentPage(thePage); TableData tableData = TableDataFactory.populate(this.storedData, this.sortableRowDataWrapper, theSortBy, theSortType); return this.xmlProducerWrapper.wrap(TableDataFactory.getProducer(tableData)); } /** * Remove all the row that is not necessary to display in the grid. */ public void manageRowsDisplaying() { DistinctSortableList sortablesToAdd = new DistinctSortableList(); List < Sortable > sortables = this.storedData.getListOfSortable(); for (Sortable sortable : sortables) { String isMember = sortable.getValueFormCol(ESCOConstantes.IS_MEMBER); String canOptin = sortable.getValueFormCol(ESCOConstantes.CAN_OPTIN); if (!(ESCOConstantes.FALSE.equals(isMember) && ESCOConstantes.FALSE.equals(canOptin))) { sortablesToAdd.add(sortable); } } this.storedData.setListOfSortable(sortablesToAdd); } /** * Subscribe or Unsubscribe to a group. * * @return the producted xml * @throws ESCOSubjectNotUniqueException * if the user loose the connection. * @throws ESCOSubjectNotFoundException * if the user loose the connection. * @throws ESCOGroupNotUniqueException * if the group is not unique * @throws ESCOGroupNotFoundException * if the group is not found */ public String subscribeOrUnsubscribeToGroup() throws ESCOSubjectNotFoundException, ESCOSubjectNotUniqueException, ESCOGroupNotFoundException, ESCOGroupNotUniqueException { String theGroupId = this.getParam(PersonSubscriptionsController.GROUP_ID_PARAMS); String theType = this.getParam(PersonSubscriptionsController.TYPE_OF_SUBSCRIPTION_PARAM); Validate.notNull(theGroupId); Validate.notNull(theType); Sortable theSortable = null; IGrouperService grouperService = this.getPersonController().getGrouperService(); Person userConnected = PersonController.getConnectedPerson(); for (Sortable sortable : this.data) { if (sortable.getValueFormCol(ESCOConstantes.ID_PROPERTY).equals(theGroupId)) { theSortable = sortable; break; } } if (theSortable == null) { // We try to found the group in grouper theSortable = grouperService.findGroupByUid(userConnected, theGroupId); if (theSortable == null) { this.errorClassesNames.add(ESCOGroupNotFoundException.class.getSimpleName()); } } if (theSortable != null) { List < String > members = new ArrayList < String >(); members.add(this.getPersonController().getPerson().getId()); try { // Call the service that will add the memberships if (theType.equals(GroupPrivilegeEnum.OPTIN.name())) { grouperService.addMembers(userConnected, theSortable.getValueFormCol("name"), members); theSortable.addMappingFieldCol(ESCOConstantes.IS_MEMBER, ESCOConstantes.TRUE); } else { grouperService.removeMembers(userConnected, theSortable.getValueFormCol("name"), members, false); theSortable.addMappingFieldCol(ESCOConstantes.IS_MEMBER, ESCOConstantes.FALSE); } } catch (ESCOBusinessException ebe) { this.handleException(ebe, theGroupId); } } if (this.errorClassesNames.isEmpty()) { XmlProducer producer = new XmlProducer(); producer.setTarget(new Status(true)); producer.setTypesOfTarget(Status.class); return this.xmlProducerWrapper.wrap(producer); } else { // Get the internationalized message and if not exist the default // value. String theError = I18nExceptionAdapter.getExceptionString(this.getI18nService(), this.errorClassesNames.get(0), PersonSubscriptionsController.DEFAULT_MESSAGE_EXCEPTION); XmlProducer producer = new XmlProducer(); producer.setTarget(new XMLResultString(theError)); producer.setTypesOfTarget(XMLResultString.class); return this.xmlProducerWrapper.wrap(producer); } } /** * {@inheritDoc} */ @Override public boolean addAdded() { // Not use because there is not a summary for the person 's // subscriptions. return false; } /** * {@inheritDoc} */ @Override public boolean removeDeleted() { // Not use because there is not a summary for the person 's // subscriptions. return false; } /** * {@inheritDoc} */ public Boolean getIsModified() { // There is not a summary for the person 's subscriptions. return Boolean.FALSE; } /** * {@inheritDoc} */ public void applyModification(final String theIndex, final String theNewValue) { // Not use because there is not a summary for the person 's // subscriptions. } /** * {@inheritDoc} */ public void discardModification(final String theIndex) { // Not use because there is not a summary for the person 's // subscriptions. } /** * {@inheritDoc} */ public String getAttributeKey(final String theIndex) { return null; } /** * {@inheritDoc} */ public List < String > getErrorClassesNames() { // Not use because there is not a summary for the person 's // subscriptions. return null; } /** * {@inheritDoc} */ public List < Resume > getListResume() { // Not use because there is not a summary for the person 's // subscriptions. return null; } /** * {@inheritDoc} */ @Override public boolean isModified() { // Not use because there is not a summary for the person 's // subscriptions. return false; } /** * {@inheritDoc} */ public Status save() { // Not use because there is not a summary for the person 's // subscriptions. return new Status(Boolean.TRUE); } }
apache-2.0
xasx/camunda-bpm-platform
engine-rest/engine-rest/src/main/java/org/camunda/bpm/engine/rest/util/PathUtil.java
899
/* * Copyright © 2013-2018 camunda services GmbH and various authors (info@camunda.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.camunda.bpm.engine.rest.util; /** * @author Daniel Meyer * */ public class PathUtil { public static String decodePathParam(String param) { return param .replaceAll("%2F", "/") .replaceAll("%5C", "\\\\"); } }
apache-2.0
blackberry/BB-BigData-Log-Tools
src/com/blackberry/logdriver/locks/LockUtil.java
16849
/** Copyright (c) 2014 BlackBerry Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * This class handles locking and unlocking of services within zookeeper. * <p> * It consists of a few utility classes that handle the locking. * <p> * Important! <code>zk.connect.string</code> must be set, or this will fail. */ package com.blackberry.logdriver.locks; import java.io.IOException; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Queue; import java.util.concurrent.SynchronousQueue; import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.WatchedEvent; import org.apache.zookeeper.Watcher; import org.apache.zookeeper.ZooDefs.Ids; import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.data.Stat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.blackberry.logdriver.fs.PathInfo; public class LockUtil { private static final Logger LOG = LoggerFactory.getLogger(LockUtil.class); private static final String ZK_CONNECT_STRING_PROPERTY = "zk.connect.string"; public static final String ROOT = "/logdriver/locks"; private static final Pattern readPattern = Pattern.compile("read-\\d{10}"); private static final Pattern writePattern = Pattern.compile("write-\\d{10}"); private static final int SESSION_TIMEOUT = 15000; private ZooKeeper zk; public LockUtil(ZooKeeper zk) { this.zk = zk; } public LockUtil(String zkConnectString) throws IOException { this.zk = getClient(zkConnectString); } public LockUtil(Configuration conf) throws Exception { String zkConnectString = conf.get(ZK_CONNECT_STRING_PROPERTY); if (zkConnectString == null) { throw new Exception("Configuration item missing: " + ZK_CONNECT_STRING_PROPERTY); } this.zk = getClient(zkConnectString); } private static ZooKeeper getClient(String zkConnectString) throws IOException { ZooKeeper zk = new ZooKeeper(zkConnectString, SESSION_TIMEOUT, new Watcher() { @Override public void process(WatchedEvent event) { } }); return zk; } public String getLockPath(PathInfo pathInfo) throws Exception { return ROOT + pathInfo.getFullPath(); } protected void ensureNodeExists(String path) throws KeeperException, InterruptedException { if (zk.exists(path, false) == null) { String[] parts = path.split("/"); String p = ""; for (int i = 0; i < parts.length; i++) { if ("".equals(parts[i])) { continue; } p += "/" + parts[i]; try { zk.create(p, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); } catch (KeeperException.NodeExistsException e) { // That's fine. } } } } public ZooKeeper getZkClient() { return zk; } public List<LockInfo> scan(String scanPath) throws Exception { String basePath = ROOT + scanPath; List<LockInfo> lockInfo = new ArrayList<LockInfo>(); Stat stat = new Stat(); // Go down the tree, looking for nodes called 'read-' or 'write-' Queue<String> pathQueue = new ArrayDeque<String>(); pathQueue.add(basePath); while (pathQueue.size() > 0) { String path = pathQueue.remove(); List<String> children; try { children = zk.getChildren(path, false); } catch (KeeperException.NoNodeException e) { continue; } if (children.size() == 0) { zk.getData(path, false, stat); LockInfo li = new LockInfo(); li.setPath(path); li.setReadLockCount(0); li.setWriteLockCount(0); li.setLastModified(stat.getMtime()); lockInfo.add(li); continue; } int read = 0; int write = 0; for (String child : children) { if (readPattern.matcher(child).matches()) { read++; } else if (writePattern.matcher(child).matches()) { write++; } else { pathQueue.add(path + "/" + child); } } if (read > 0 || write > 0) { zk.getData(path, false, stat); LockInfo li = new LockInfo(); li.setPath(path); li.setReadLockCount(read); li.setWriteLockCount(write); li.setLastModified(stat.getMtime()); lockInfo.add(li); continue; } } Collections.sort(lockInfo); return lockInfo; } public void acquireReadLock(String lockPath) throws Exception { final SynchronousQueue<Byte> gate = new SynchronousQueue<Byte>(); LOG.info("Getting read lock on {}", lockPath); // There is a possibility here that the node we're working on will be // deleting while we try all this. So catch any NoNode exceptions and retry // if that happens. while (true) { try { // Ensure the parent exists ensureNodeExists(lockPath); String node = null; // Do we already have a node? LOG.debug("Checking for an existing lock"); Stat stat = new Stat(); for (String child : zk.getChildren(lockPath, false)) { if (!child.startsWith("read-")) { LOG.debug(" {} does not start with read-", child); continue; } // Sometimes someone else will delete their node while I'm searching // through them for mine. That's okay! try { zk.getData(lockPath + "/" + child, false, stat); } catch (KeeperException.NoNodeException e) { LOG.debug("Node was deleted before I could check if I own it: {}", child, e); continue; } if (zk.getSessionId() == stat.getEphemeralOwner()) { LOG.debug(" {} is owned by me!", child); node = lockPath + "/" + child; break; } } // Create a sequential node under the parent if (node == null) { LOG.debug("Creating a new node"); node = zk.create(lockPath + "/read-", new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL); } List<String> children; // The node number is 10 digits at the end of the node name String nodeNumber = node.substring(node.length() - 10); String previousNode = null; String previousNodeNumber = null; String childNodeNumber = null; while (true) { previousNode = null; children = zk.getChildren(lockPath, false); LOG.debug("Children = {}", children); for (String child : children) { // Skip anything that is not a write lock. if (!child.startsWith("write-")) { continue; } // So get the number, and if it's less, then wait on it. Otherwise, // we // have the lock. childNodeNumber = child.substring(child.length() - 10); if (nodeNumber.compareTo(childNodeNumber) > 0) { // This child comes before me. if (previousNode == null) { previousNode = child; previousNodeNumber = childNodeNumber; } else if (previousNodeNumber.compareTo(childNodeNumber) < 0) { previousNode = child; previousNodeNumber = childNodeNumber; } } LOG.debug("Previous node={}", previousNode); } Watcher watcher = new Watcher() { @Override public void process(WatchedEvent event) { try { gate.put((byte) 0); } catch (InterruptedException e) { // If this happens, this method will never return. } } }; if (previousNode == null) { // No previous node? We have the lock! LOG.debug("No previous node - got lock"); break; } stat = zk.exists(lockPath + "/" + previousNode, watcher); if (stat == null) { continue; } // wait for the watcher to get news gate.take(); } } catch (KeeperException.NoNodeException e) { LOG.warn("Node was deleted while trying to acquire lock. Retrying."); continue; } break; } LOG.info("Got read lock on {}", lockPath); } public long getReadLockCount(String lockPath) throws Exception { int count = 0; try { for (String child : zk.getChildren(lockPath, false)) { if (child.startsWith("read-")) { count++; } } } catch (KeeperException.NoNodeException e) { // Nothing there? That's a zero. return 0; } return count; } public boolean releaseReadLock(String lockPath) throws Exception { LOG.info("Releasing read lock on {}", lockPath); // Ensure the parent exists ensureNodeExists(lockPath); // Do we have a node? LOG.debug("Checking for an existing lock"); Stat stat = new Stat(); for (String child : zk.getChildren(lockPath, false)) { if (!child.startsWith("read-")) { LOG.debug(" {} does not start with read-", child); continue; } // Sometimes someone else will delete their node while I'm searching // through them for mine. That's okay! try { zk.getData(lockPath + "/" + child, false, stat); } catch (KeeperException.NoNodeException e) { LOG.debug("Node was deleted before I could check if I own it: {}", child, e); continue; } if (zk.getSessionId() == stat.getEphemeralOwner()) { LOG.debug(" {} is owned by me!", child); zk.delete(lockPath + "/" + child, -1); LOG.info("Released read lock on {}", lockPath); return true; } } LOG.info("No read lock found to release on {}", lockPath); return false; } public boolean resetReadLock(String lockPath) throws Exception { LOG.info("Resetting read lock on {}", lockPath); // Ensure the parent exists ensureNodeExists(lockPath); for (String child : zk.getChildren(lockPath, false)) { if (child.startsWith("read-")) { zk.delete(lockPath + "/" + child, -1); } } return true; } public void acquireWriteLock(String lockPath) throws Exception { final SynchronousQueue<Byte> gate = new SynchronousQueue<Byte>(); LOG.info("Getting write lock on {}", lockPath); // There is a possibility here that the node we're working on will be // deleting while we try all this. So catch any NoNode exceptions and retry // if that happens. while (true) { try { // Ensure the parent exists ensureNodeExists(lockPath); String node = null; // Do we already have a node? LOG.debug("Checking for an existing lock"); Stat stat = new Stat(); for (String child : zk.getChildren(lockPath, false)) { if (!child.startsWith("write-")) { LOG.debug(" {} does not start with write-", child); continue; } // Sometimes someone else will delete their node while I'm searching // through them for mine. That's okay! try { zk.getData(lockPath + "/" + child, false, stat); } catch (KeeperException.NoNodeException e) { LOG.debug("Node was deleted before I could check if I own it: {}", child, e); continue; } if (zk.getSessionId() == stat.getEphemeralOwner()) { LOG.debug(" {} is owned by me!", child); node = lockPath + "/" + child; break; } } // Create a sequential node under the parent if (node == null) { LOG.debug("Creating a new node"); node = zk.create(lockPath + "/write-", new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL); } List<String> children; // The node number is 10 digits at the end of the node name String nodeNumber = node.substring(node.length() - 10); String previousNode = null; String previousNodeNumber = null; String childNodeNumber = null; while (true) { previousNode = null; children = zk.getChildren(lockPath, false); LOG.debug("Children = {}", children); for (String child : children) { // So get the number, and if it's less, then wait on it. Otherwise, // we // have the lock. childNodeNumber = child.substring(child.length() - 10); if (nodeNumber.compareTo(childNodeNumber) > 0) { // This child comes before me. if (previousNode == null) { previousNode = child; previousNodeNumber = childNodeNumber; } else if (previousNodeNumber.compareTo(childNodeNumber) < 0) { previousNode = child; previousNodeNumber = childNodeNumber; } } LOG.debug("Previous node={}", previousNode); } Watcher watcher = new Watcher() { @Override public void process(WatchedEvent event) { try { gate.put((byte) 0); } catch (InterruptedException e) { // If this happens, this method will never return. } } }; if (previousNode == null) { // No previous node? We have the lock! LOG.debug("No previous node - got lock"); break; } stat = zk.exists(lockPath + "/" + previousNode, watcher); if (stat == null) { continue; } // wait for the watcher to get news gate.take(); } } catch (KeeperException.NoNodeException e) { LOG.warn("Node was deleted while trying to acquire lock. Retrying."); continue; } break; } LOG.info("Got write lock on {}", lockPath); } public long getWriteLockCount(String lockPath) throws Exception { int count = 0; try { for (String child : zk.getChildren(lockPath, false)) { if (child.startsWith("write-")) { count++; } } } catch (KeeperException.NoNodeException e) { // Nothing there? That's a zero. return 0; } return count; } public boolean releaseWriteLock(String lockPath) throws Exception { LOG.info("Releasing write lock on {}", lockPath); // Ensure the parent exists ensureNodeExists(lockPath); // Do we have a node? LOG.debug("Checking for an existing lock"); Stat stat = new Stat(); for (String child : zk.getChildren(lockPath, false)) { if (!child.startsWith("write-")) { LOG.debug(" {} does not start with write-", child); continue; } // Sometimes someone else will delete their node while I'm searching // through them for mine. That's okay! try { zk.getData(lockPath + "/" + child, false, stat); } catch (KeeperException.NoNodeException e) { LOG.debug("Node was deleted before I could check if I own it: {}", child, e); continue; } if (zk.getSessionId() == stat.getEphemeralOwner()) { LOG.debug(" {} is owned by me!", child); zk.delete(lockPath + "/" + child, -1); LOG.info("Released write lock on {}", lockPath); return true; } } LOG.info("No write lock found to release on {}", lockPath); return false; } public boolean resetWriteLock(String lockPath) throws Exception { LOG.debug("Resetting write lock on {}", lockPath); // Ensure the parent exists ensureNodeExists(lockPath); for (String child : zk.getChildren(lockPath, false)) { if (child.startsWith("write-")) { zk.delete(lockPath + "/" + child, -1); } } return true; } public void close() throws InterruptedException { zk.close(); } }
apache-2.0
fuyuanwu/springside4
modules/core/src/test/java/org/springside/modules/test/selenium/SeleniumSnapshotRule.java
1012
/******************************************************************************* * Copyright (c) 2005, 2014 springside.github.io * * Licensed under the Apache License, Version 2.0 (the "License"); *******************************************************************************/ package org.springside.modules.test.selenium; import org.junit.rules.TestWatcher; import org.junit.runner.Description; /** * 在出错时截屏的规则. * * 在出错时将屏幕保存为png格式的文件,默认路径为项目的target/screensnapshot/测试类名_测试方法名.png * * @author fuyuanwu */ public class SeleniumSnapshotRule extends TestWatcher { private final Selenium2 s; public SeleniumSnapshotRule(Selenium2 s) { this.s = s; } @Override protected void failed(Throwable e, Description description) { String basePath = "target/screenshot/"; String outputFileName = description.getClassName() + "_" + description.getMethodName() + ".png"; s.snapshot(basePath, outputFileName); } }
apache-2.0
kay/neverfear-util
src/test/java/org/neverfear/util/TimeDeviceTest.java
2583
package org.neverfear.util; import static java.lang.Thread.sleep; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.util.concurrent.TimeUnit; import org.junit.Before; import org.junit.Test; public class TimeDeviceTest { private static final int TIME_IN_SECONDS = 123; private TimeDevice subject; @Before public void before() { this.subject = new TimeDevice(TimeUnit.SECONDS) { @Override public long read() { return TIME_IN_SECONDS; } }; } @Test public void givenTicker_whenTimeUnit_expectNanoseconds() { /* * Given */ final TimeDevice ticker = TimeDevice.ticker(); /* * When */ final TimeUnit actual = ticker.timeUnit(); /* * Then */ assertEquals(TimeUnit.NANOSECONDS, actual); } @Test public void givenTicker_whenReadTwiceSeparatedByOneMillisecond_expectAtLeastOneThousand() throws Exception { /* * Given */ final TimeDevice ticker = TimeDevice.ticker(); /* * When */ final long first = ticker.read(); sleep(1); final long second = ticker.read(); /* * Then */ final long elapsed = second - first; assertTrue(elapsed >= 1000); } @Test public void givenWallClock_whenTimeUnit_expectNanoseconds() { /* * Given */ final TimeDevice wallClock = TimeDevice.wallClock(); /* * When */ final TimeUnit actual = wallClock.timeUnit(); /* * Then */ assertEquals(TimeUnit.MILLISECONDS, actual); } @Test public void givenWallClock_whenReadTwiceSeparatedByOneMillisecond_expectAtLeastFive() throws Exception { /* * Given */ final TimeDevice wallClock = TimeDevice.wallClock(); /* * When */ final long first = wallClock.read(); sleep(5); final long second = wallClock.read(); /* * Then */ final long elapsed = second - first; assertTrue(elapsed >= 5); } @Test public void givenTimeDeviceMeasuredInSecond_whenTimeUnit_expectSeconds() { assertEquals(TimeUnit.SECONDS, this.subject.timeUnit()); } @Test public void givenTimeDevice_whenRead_expectFixedTime() { final long actual = this.subject.read(); assertEquals(TIME_IN_SECONDS, actual); } @Test public void givenTimeDevice_whenReadAsMilliseconds_expectTimeRepresentedAsMilliseconds() { final long actual = this.subject.read(TimeUnit.MILLISECONDS); assertEquals(TIME_IN_SECONDS * 1000, actual); } @Test public void givenTimeDevice_whenReadAsSameUnits_expectTimeRepresentedAsSeconds() { final long actual = this.subject.read(TimeUnit.SECONDS); assertEquals(TIME_IN_SECONDS, actual); } }
apache-2.0
davidzchen/bazel
src/main/java/com/google/devtools/build/lib/rules/android/AndroidPreDexJarProvider.java
2074
// Copyright 2015 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.devtools.build.lib.rules.android; import com.google.devtools.build.lib.actions.Artifact; import com.google.devtools.build.lib.concurrent.ThreadSafety.Immutable; import com.google.devtools.build.lib.packages.BuiltinProvider; import com.google.devtools.build.lib.packages.NativeInfo; import com.google.devtools.build.lib.starlarkbuildapi.android.AndroidPreDexJarProviderApi; import net.starlark.java.eval.EvalException; /** A provider of the final Jar to be dexed for targets that build APKs. */ @Immutable public final class AndroidPreDexJarProvider extends NativeInfo implements AndroidPreDexJarProviderApi<Artifact> { public static final Provider PROVIDER = new Provider(); private final Artifact preDexJar; /** Returns the jar to be dexed. */ @Override public Artifact getPreDexJar() { return preDexJar; } public AndroidPreDexJarProvider(Artifact preDexJar) { super(PROVIDER); this.preDexJar = preDexJar; } /** Provider class for {@link AndroidPreDexJarProvider} objects. */ public static class Provider extends BuiltinProvider<AndroidPreDexJarProvider> implements AndroidPreDexJarProviderApi.Provider<Artifact> { private Provider() { super(NAME, AndroidPreDexJarProvider.class); } @Override public AndroidPreDexJarProviderApi<Artifact> createInfo(Artifact preDexJar) throws EvalException { return new AndroidPreDexJarProvider(preDexJar); } } }
apache-2.0
gamerson/liferay-blade-samples
maven/apps/service-builder/adq/adq-service/src/main/java/com/liferay/blade/samples/servicebuilder/adq/model/impl/BarModelImpl.java
22836
/** * Copyright 2000-present Liferay, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.liferay.blade.samples.servicebuilder.adq.model.impl; import com.liferay.blade.samples.servicebuilder.adq.model.Bar; import com.liferay.blade.samples.servicebuilder.adq.model.BarModel; import com.liferay.blade.samples.servicebuilder.adq.model.BarSoap; import com.liferay.expando.kernel.model.ExpandoBridge; import com.liferay.expando.kernel.util.ExpandoBridgeFactoryUtil; import com.liferay.exportimport.kernel.lar.StagedModelType; import com.liferay.petra.string.StringBundler; import com.liferay.portal.kernel.bean.AutoEscapeBeanHandler; import com.liferay.portal.kernel.exception.PortalException; import com.liferay.portal.kernel.json.JSON; import com.liferay.portal.kernel.model.CacheModel; import com.liferay.portal.kernel.model.ModelWrapper; import com.liferay.portal.kernel.model.User; import com.liferay.portal.kernel.model.impl.BaseModelImpl; import com.liferay.portal.kernel.service.ServiceContext; import com.liferay.portal.kernel.service.UserLocalServiceUtil; import com.liferay.portal.kernel.util.GetterUtil; import com.liferay.portal.kernel.util.PortalUtil; import com.liferay.portal.kernel.util.ProxyUtil; import java.io.Serializable; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationHandler; import java.sql.Types; import java.util.ArrayList; import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.function.BiConsumer; import java.util.function.Function; /** * The base model implementation for the Bar service. Represents a row in the &quot;ADQ_Bar&quot; database table, with each column mapped to a property of this class. * * <p> * This implementation and its corresponding interface </code>BarModel</code> exist only as a container for the default property accessors generated by ServiceBuilder. Helper methods and all application logic should be put in {@link BarImpl}. * </p> * * @author Brian Wing Shun Chan * @see BarImpl * @generated */ @JSON(strict = true) public class BarModelImpl extends BaseModelImpl<Bar> implements BarModel { /** * NOTE FOR DEVELOPERS: * * Never modify or reference this class directly. All methods that expect a bar model instance should use the <code>Bar</code> interface instead. */ public static final String TABLE_NAME = "ADQ_Bar"; public static final Object[][] TABLE_COLUMNS = { {"uuid_", Types.VARCHAR}, {"barId", Types.BIGINT}, {"groupId", Types.BIGINT}, {"companyId", Types.BIGINT}, {"userId", Types.BIGINT}, {"userName", Types.VARCHAR}, {"createDate", Types.TIMESTAMP}, {"modifiedDate", Types.TIMESTAMP}, {"field1", Types.VARCHAR}, {"field2", Types.BOOLEAN}, {"field3", Types.INTEGER}, {"field4", Types.TIMESTAMP}, {"field5", Types.VARCHAR} }; public static final Map<String, Integer> TABLE_COLUMNS_MAP = new HashMap<String, Integer>(); static { TABLE_COLUMNS_MAP.put("uuid_", Types.VARCHAR); TABLE_COLUMNS_MAP.put("barId", Types.BIGINT); TABLE_COLUMNS_MAP.put("groupId", Types.BIGINT); TABLE_COLUMNS_MAP.put("companyId", Types.BIGINT); TABLE_COLUMNS_MAP.put("userId", Types.BIGINT); TABLE_COLUMNS_MAP.put("userName", Types.VARCHAR); TABLE_COLUMNS_MAP.put("createDate", Types.TIMESTAMP); TABLE_COLUMNS_MAP.put("modifiedDate", Types.TIMESTAMP); TABLE_COLUMNS_MAP.put("field1", Types.VARCHAR); TABLE_COLUMNS_MAP.put("field2", Types.BOOLEAN); TABLE_COLUMNS_MAP.put("field3", Types.INTEGER); TABLE_COLUMNS_MAP.put("field4", Types.TIMESTAMP); TABLE_COLUMNS_MAP.put("field5", Types.VARCHAR); } public static final String TABLE_SQL_CREATE = "create table ADQ_Bar (uuid_ VARCHAR(75) null,barId LONG not null primary key,groupId LONG,companyId LONG,userId LONG,userName VARCHAR(75) null,createDate DATE null,modifiedDate DATE null,field1 VARCHAR(75) null,field2 BOOLEAN,field3 INTEGER,field4 DATE null,field5 VARCHAR(75) null)"; public static final String TABLE_SQL_DROP = "drop table ADQ_Bar"; public static final String ORDER_BY_JPQL = " ORDER BY bar.field1 ASC"; public static final String ORDER_BY_SQL = " ORDER BY ADQ_Bar.field1 ASC"; public static final String DATA_SOURCE = "liferayDataSource"; public static final String SESSION_FACTORY = "liferaySessionFactory"; public static final String TX_MANAGER = "liferayTransactionManager"; public static final boolean ENTITY_CACHE_ENABLED = GetterUtil.getBoolean( com.liferay.blade.samples.servicebuilder.service.util.PropsUtil.get( "value.object.entity.cache.enabled.com.liferay.blade.samples.servicebuilder.adq.model.Bar"), true); public static final boolean FINDER_CACHE_ENABLED = GetterUtil.getBoolean( com.liferay.blade.samples.servicebuilder.service.util.PropsUtil.get( "value.object.finder.cache.enabled.com.liferay.blade.samples.servicebuilder.adq.model.Bar"), true); public static final boolean COLUMN_BITMASK_ENABLED = GetterUtil.getBoolean( com.liferay.blade.samples.servicebuilder.service.util.PropsUtil.get( "value.object.column.bitmask.enabled.com.liferay.blade.samples.servicebuilder.adq.model.Bar"), true); public static final long COMPANYID_COLUMN_BITMASK = 1L; public static final long FIELD2_COLUMN_BITMASK = 2L; public static final long GROUPID_COLUMN_BITMASK = 4L; public static final long UUID_COLUMN_BITMASK = 8L; public static final long FIELD1_COLUMN_BITMASK = 16L; /** * Converts the soap model instance into a normal model instance. * * @param soapModel the soap model instance to convert * @return the normal model instance */ public static Bar toModel(BarSoap soapModel) { if (soapModel == null) { return null; } Bar model = new BarImpl(); model.setUuid(soapModel.getUuid()); model.setBarId(soapModel.getBarId()); model.setGroupId(soapModel.getGroupId()); model.setCompanyId(soapModel.getCompanyId()); model.setUserId(soapModel.getUserId()); model.setUserName(soapModel.getUserName()); model.setCreateDate(soapModel.getCreateDate()); model.setModifiedDate(soapModel.getModifiedDate()); model.setField1(soapModel.getField1()); model.setField2(soapModel.isField2()); model.setField3(soapModel.getField3()); model.setField4(soapModel.getField4()); model.setField5(soapModel.getField5()); return model; } /** * Converts the soap model instances into normal model instances. * * @param soapModels the soap model instances to convert * @return the normal model instances */ public static List<Bar> toModels(BarSoap[] soapModels) { if (soapModels == null) { return null; } List<Bar> models = new ArrayList<Bar>(soapModels.length); for (BarSoap soapModel : soapModels) { models.add(toModel(soapModel)); } return models; } public static final long LOCK_EXPIRATION_TIME = GetterUtil.getLong( com.liferay.blade.samples.servicebuilder.service.util.PropsUtil.get( "lock.expiration.time.com.liferay.blade.samples.servicebuilder.adq.model.Bar")); public BarModelImpl() { } @Override public long getPrimaryKey() { return _barId; } @Override public void setPrimaryKey(long primaryKey) { setBarId(primaryKey); } @Override public Serializable getPrimaryKeyObj() { return _barId; } @Override public void setPrimaryKeyObj(Serializable primaryKeyObj) { setPrimaryKey(((Long)primaryKeyObj).longValue()); } @Override public Class<?> getModelClass() { return Bar.class; } @Override public String getModelClassName() { return Bar.class.getName(); } @Override public Map<String, Object> getModelAttributes() { Map<String, Object> attributes = new HashMap<String, Object>(); Map<String, Function<Bar, Object>> attributeGetterFunctions = getAttributeGetterFunctions(); for (Map.Entry<String, Function<Bar, Object>> entry : attributeGetterFunctions.entrySet()) { String attributeName = entry.getKey(); Function<Bar, Object> attributeGetterFunction = entry.getValue(); attributes.put( attributeName, attributeGetterFunction.apply((Bar)this)); } attributes.put("entityCacheEnabled", isEntityCacheEnabled()); attributes.put("finderCacheEnabled", isFinderCacheEnabled()); return attributes; } @Override public void setModelAttributes(Map<String, Object> attributes) { Map<String, BiConsumer<Bar, Object>> attributeSetterBiConsumers = getAttributeSetterBiConsumers(); for (Map.Entry<String, Object> entry : attributes.entrySet()) { String attributeName = entry.getKey(); BiConsumer<Bar, Object> attributeSetterBiConsumer = attributeSetterBiConsumers.get(attributeName); if (attributeSetterBiConsumer != null) { attributeSetterBiConsumer.accept((Bar)this, entry.getValue()); } } } public Map<String, Function<Bar, Object>> getAttributeGetterFunctions() { return _attributeGetterFunctions; } public Map<String, BiConsumer<Bar, Object>> getAttributeSetterBiConsumers() { return _attributeSetterBiConsumers; } private static Function<InvocationHandler, Bar> _getProxyProviderFunction() { Class<?> proxyClass = ProxyUtil.getProxyClass( Bar.class.getClassLoader(), Bar.class, ModelWrapper.class); try { Constructor<Bar> constructor = (Constructor<Bar>)proxyClass.getConstructor( InvocationHandler.class); return invocationHandler -> { try { return constructor.newInstance(invocationHandler); } catch (ReflectiveOperationException roe) { throw new InternalError(roe); } }; } catch (NoSuchMethodException nsme) { throw new InternalError(nsme); } } private static final Map<String, Function<Bar, Object>> _attributeGetterFunctions; private static final Map<String, BiConsumer<Bar, Object>> _attributeSetterBiConsumers; static { Map<String, Function<Bar, Object>> attributeGetterFunctions = new LinkedHashMap<String, Function<Bar, Object>>(); Map<String, BiConsumer<Bar, ?>> attributeSetterBiConsumers = new LinkedHashMap<String, BiConsumer<Bar, ?>>(); attributeGetterFunctions.put("uuid", Bar::getUuid); attributeSetterBiConsumers.put( "uuid", (BiConsumer<Bar, String>)Bar::setUuid); attributeGetterFunctions.put("barId", Bar::getBarId); attributeSetterBiConsumers.put( "barId", (BiConsumer<Bar, Long>)Bar::setBarId); attributeGetterFunctions.put("groupId", Bar::getGroupId); attributeSetterBiConsumers.put( "groupId", (BiConsumer<Bar, Long>)Bar::setGroupId); attributeGetterFunctions.put("companyId", Bar::getCompanyId); attributeSetterBiConsumers.put( "companyId", (BiConsumer<Bar, Long>)Bar::setCompanyId); attributeGetterFunctions.put("userId", Bar::getUserId); attributeSetterBiConsumers.put( "userId", (BiConsumer<Bar, Long>)Bar::setUserId); attributeGetterFunctions.put("userName", Bar::getUserName); attributeSetterBiConsumers.put( "userName", (BiConsumer<Bar, String>)Bar::setUserName); attributeGetterFunctions.put("createDate", Bar::getCreateDate); attributeSetterBiConsumers.put( "createDate", (BiConsumer<Bar, Date>)Bar::setCreateDate); attributeGetterFunctions.put("modifiedDate", Bar::getModifiedDate); attributeSetterBiConsumers.put( "modifiedDate", (BiConsumer<Bar, Date>)Bar::setModifiedDate); attributeGetterFunctions.put("field1", Bar::getField1); attributeSetterBiConsumers.put( "field1", (BiConsumer<Bar, String>)Bar::setField1); attributeGetterFunctions.put("field2", Bar::getField2); attributeSetterBiConsumers.put( "field2", (BiConsumer<Bar, Boolean>)Bar::setField2); attributeGetterFunctions.put("field3", Bar::getField3); attributeSetterBiConsumers.put( "field3", (BiConsumer<Bar, Integer>)Bar::setField3); attributeGetterFunctions.put("field4", Bar::getField4); attributeSetterBiConsumers.put( "field4", (BiConsumer<Bar, Date>)Bar::setField4); attributeGetterFunctions.put("field5", Bar::getField5); attributeSetterBiConsumers.put( "field5", (BiConsumer<Bar, String>)Bar::setField5); _attributeGetterFunctions = Collections.unmodifiableMap( attributeGetterFunctions); _attributeSetterBiConsumers = Collections.unmodifiableMap( (Map)attributeSetterBiConsumers); } @JSON @Override public String getUuid() { if (_uuid == null) { return ""; } else { return _uuid; } } @Override public void setUuid(String uuid) { _columnBitmask |= UUID_COLUMN_BITMASK; if (_originalUuid == null) { _originalUuid = _uuid; } _uuid = uuid; } public String getOriginalUuid() { return GetterUtil.getString(_originalUuid); } @JSON @Override public long getBarId() { return _barId; } @Override public void setBarId(long barId) { _barId = barId; } @JSON @Override public long getGroupId() { return _groupId; } @Override public void setGroupId(long groupId) { _columnBitmask |= GROUPID_COLUMN_BITMASK; if (!_setOriginalGroupId) { _setOriginalGroupId = true; _originalGroupId = _groupId; } _groupId = groupId; } public long getOriginalGroupId() { return _originalGroupId; } @JSON @Override public long getCompanyId() { return _companyId; } @Override public void setCompanyId(long companyId) { _columnBitmask |= COMPANYID_COLUMN_BITMASK; if (!_setOriginalCompanyId) { _setOriginalCompanyId = true; _originalCompanyId = _companyId; } _companyId = companyId; } public long getOriginalCompanyId() { return _originalCompanyId; } @JSON @Override public long getUserId() { return _userId; } @Override public void setUserId(long userId) { _userId = userId; } @Override public String getUserUuid() { try { User user = UserLocalServiceUtil.getUserById(getUserId()); return user.getUuid(); } catch (PortalException pe) { return ""; } } @Override public void setUserUuid(String userUuid) { } @JSON @Override public String getUserName() { if (_userName == null) { return ""; } else { return _userName; } } @Override public void setUserName(String userName) { _userName = userName; } @JSON @Override public Date getCreateDate() { return _createDate; } @Override public void setCreateDate(Date createDate) { _createDate = createDate; } @JSON @Override public Date getModifiedDate() { return _modifiedDate; } public boolean hasSetModifiedDate() { return _setModifiedDate; } @Override public void setModifiedDate(Date modifiedDate) { _setModifiedDate = true; _modifiedDate = modifiedDate; } @JSON @Override public String getField1() { if (_field1 == null) { return ""; } else { return _field1; } } @Override public void setField1(String field1) { _columnBitmask = -1L; _field1 = field1; } @JSON @Override public boolean getField2() { return _field2; } @JSON @Override public boolean isField2() { return _field2; } @Override public void setField2(boolean field2) { _columnBitmask |= FIELD2_COLUMN_BITMASK; if (!_setOriginalField2) { _setOriginalField2 = true; _originalField2 = _field2; } _field2 = field2; } public boolean getOriginalField2() { return _originalField2; } @JSON @Override public int getField3() { return _field3; } @Override public void setField3(int field3) { _field3 = field3; } @JSON @Override public Date getField4() { return _field4; } @Override public void setField4(Date field4) { _field4 = field4; } @JSON @Override public String getField5() { if (_field5 == null) { return ""; } else { return _field5; } } @Override public void setField5(String field5) { _field5 = field5; } @Override public StagedModelType getStagedModelType() { return new StagedModelType( PortalUtil.getClassNameId(Bar.class.getName())); } public long getColumnBitmask() { return _columnBitmask; } @Override public ExpandoBridge getExpandoBridge() { return ExpandoBridgeFactoryUtil.getExpandoBridge( getCompanyId(), Bar.class.getName(), getPrimaryKey()); } @Override public void setExpandoBridgeAttributes(ServiceContext serviceContext) { ExpandoBridge expandoBridge = getExpandoBridge(); expandoBridge.setAttributes(serviceContext); } @Override public Bar toEscapedModel() { if (_escapedModel == null) { Function<InvocationHandler, Bar> escapedModelProxyProviderFunction = EscapedModelProxyProviderFunctionHolder. _escapedModelProxyProviderFunction; _escapedModel = escapedModelProxyProviderFunction.apply( new AutoEscapeBeanHandler(this)); } return _escapedModel; } @Override public Object clone() { BarImpl barImpl = new BarImpl(); barImpl.setUuid(getUuid()); barImpl.setBarId(getBarId()); barImpl.setGroupId(getGroupId()); barImpl.setCompanyId(getCompanyId()); barImpl.setUserId(getUserId()); barImpl.setUserName(getUserName()); barImpl.setCreateDate(getCreateDate()); barImpl.setModifiedDate(getModifiedDate()); barImpl.setField1(getField1()); barImpl.setField2(isField2()); barImpl.setField3(getField3()); barImpl.setField4(getField4()); barImpl.setField5(getField5()); barImpl.resetOriginalValues(); return barImpl; } @Override public int compareTo(Bar bar) { int value = 0; value = getField1().compareTo(bar.getField1()); if (value != 0) { return value; } return 0; } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (!(obj instanceof Bar)) { return false; } Bar bar = (Bar)obj; long primaryKey = bar.getPrimaryKey(); if (getPrimaryKey() == primaryKey) { return true; } else { return false; } } @Override public int hashCode() { return (int)getPrimaryKey(); } @Override public boolean isEntityCacheEnabled() { return ENTITY_CACHE_ENABLED; } @Override public boolean isFinderCacheEnabled() { return FINDER_CACHE_ENABLED; } @Override public void resetOriginalValues() { BarModelImpl barModelImpl = this; barModelImpl._originalUuid = barModelImpl._uuid; barModelImpl._originalGroupId = barModelImpl._groupId; barModelImpl._setOriginalGroupId = false; barModelImpl._originalCompanyId = barModelImpl._companyId; barModelImpl._setOriginalCompanyId = false; barModelImpl._setModifiedDate = false; barModelImpl._originalField2 = barModelImpl._field2; barModelImpl._setOriginalField2 = false; barModelImpl._columnBitmask = 0; } @Override public CacheModel<Bar> toCacheModel() { BarCacheModel barCacheModel = new BarCacheModel(); barCacheModel.uuid = getUuid(); String uuid = barCacheModel.uuid; if ((uuid != null) && (uuid.length() == 0)) { barCacheModel.uuid = null; } barCacheModel.barId = getBarId(); barCacheModel.groupId = getGroupId(); barCacheModel.companyId = getCompanyId(); barCacheModel.userId = getUserId(); barCacheModel.userName = getUserName(); String userName = barCacheModel.userName; if ((userName != null) && (userName.length() == 0)) { barCacheModel.userName = null; } Date createDate = getCreateDate(); if (createDate != null) { barCacheModel.createDate = createDate.getTime(); } else { barCacheModel.createDate = Long.MIN_VALUE; } Date modifiedDate = getModifiedDate(); if (modifiedDate != null) { barCacheModel.modifiedDate = modifiedDate.getTime(); } else { barCacheModel.modifiedDate = Long.MIN_VALUE; } barCacheModel.field1 = getField1(); String field1 = barCacheModel.field1; if ((field1 != null) && (field1.length() == 0)) { barCacheModel.field1 = null; } barCacheModel.field2 = isField2(); barCacheModel.field3 = getField3(); Date field4 = getField4(); if (field4 != null) { barCacheModel.field4 = field4.getTime(); } else { barCacheModel.field4 = Long.MIN_VALUE; } barCacheModel.field5 = getField5(); String field5 = barCacheModel.field5; if ((field5 != null) && (field5.length() == 0)) { barCacheModel.field5 = null; } return barCacheModel; } @Override public String toString() { Map<String, Function<Bar, Object>> attributeGetterFunctions = getAttributeGetterFunctions(); StringBundler sb = new StringBundler( 4 * attributeGetterFunctions.size() + 2); sb.append("{"); for (Map.Entry<String, Function<Bar, Object>> entry : attributeGetterFunctions.entrySet()) { String attributeName = entry.getKey(); Function<Bar, Object> attributeGetterFunction = entry.getValue(); sb.append(attributeName); sb.append("="); sb.append(attributeGetterFunction.apply((Bar)this)); sb.append(", "); } if (sb.index() > 1) { sb.setIndex(sb.index() - 1); } sb.append("}"); return sb.toString(); } @Override public String toXmlString() { Map<String, Function<Bar, Object>> attributeGetterFunctions = getAttributeGetterFunctions(); StringBundler sb = new StringBundler( 5 * attributeGetterFunctions.size() + 4); sb.append("<model><model-name>"); sb.append(getModelClassName()); sb.append("</model-name>"); for (Map.Entry<String, Function<Bar, Object>> entry : attributeGetterFunctions.entrySet()) { String attributeName = entry.getKey(); Function<Bar, Object> attributeGetterFunction = entry.getValue(); sb.append("<column><column-name>"); sb.append(attributeName); sb.append("</column-name><column-value><![CDATA["); sb.append(attributeGetterFunction.apply((Bar)this)); sb.append("]]></column-value></column>"); } sb.append("</model>"); return sb.toString(); } private static class EscapedModelProxyProviderFunctionHolder { private static final Function<InvocationHandler, Bar> _escapedModelProxyProviderFunction = _getProxyProviderFunction(); } private String _uuid; private String _originalUuid; private long _barId; private long _groupId; private long _originalGroupId; private boolean _setOriginalGroupId; private long _companyId; private long _originalCompanyId; private boolean _setOriginalCompanyId; private long _userId; private String _userName; private Date _createDate; private Date _modifiedDate; private boolean _setModifiedDate; private String _field1; private boolean _field2; private boolean _originalField2; private boolean _setOriginalField2; private int _field3; private Date _field4; private String _field5; private long _columnBitmask; private Bar _escapedModel; }
apache-2.0
unidal/cat
report-event/src/test/java/com/dianping/cat/event/EventDailyGraphMergerTest.java
1493
package com.dianping.cat.event; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; import junit.framework.Assert; import org.junit.Before; import org.junit.Test; import org.unidal.helper.Files; import com.dianping.cat.event.model.entity.EventReport; import com.dianping.cat.event.model.transform.DefaultSaxParser; import com.dianping.cat.event.task.EventMerger; public class EventDailyGraphMergerTest { private EventMerger m_meger = new EventMerger(); private Set<String> m_domains = new HashSet<String>(); private String m_reportDomain = "MobileApi"; List<EventReport> reports = new ArrayList<EventReport>(); @Before public void setUp() { m_domains.add("MobileApi"); m_domains.add("MobileApi1"); for (int i = 0; i < 5; i++) { reports.add(creatReport()); } } @Test public void testForMergerDaily() throws Exception { EventReport report = m_meger.mergeForDaily(m_reportDomain, reports, m_domains, 1); String expeted = Files.forIO().readFrom(getClass().getResourceAsStream("EventMergerDaily.xml"), "utf-8"); Assert.assertEquals(expeted.replaceAll("\r", ""), report.toString().replaceAll("\r", "")); } private EventReport creatReport() { EventReport result = new EventReport(); try { String xml = Files.forIO().readFrom(getClass().getResourceAsStream("BaseEventReport.xml"), "utf-8"); return DefaultSaxParser.parse(xml); } catch (Exception e) { e.printStackTrace(); } return result; } }
apache-2.0
daradurvs/ignite
modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java
122472
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite; import java.io.Serializable; import java.lang.management.RuntimeMXBean; import java.util.Arrays; import java.util.Iterator; import java.util.Map; import java.util.Properties; import javax.net.ssl.HostnameVerifier; import org.apache.ignite.cache.CacheEntryProcessor; import org.apache.ignite.cluster.ClusterGroup; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.CheckpointWriteOrder; import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.DeploymentMode; import org.apache.ignite.configuration.DiskPageCompression; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.marshaller.optimized.OptimizedMarshaller; import org.apache.ignite.internal.processors.metastorage.DistributedMetaStorage; import org.apache.ignite.internal.processors.metric.GridMetricManager; import org.apache.ignite.internal.processors.rest.GridRestCommand; import org.apache.ignite.internal.util.GridLogThrottle; import org.apache.ignite.lang.IgniteExperimental; import org.apache.ignite.mxbean.MetricsMxBean; import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi; import org.apache.ignite.stream.StreamTransformer; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.cache.CacheManager.DFLT_JCACHE_DEFAULT_ISOLATED; import static org.apache.ignite.configuration.DataStorageConfiguration.DFLT_USE_ASYNC_FILE_IO_FACTORY; import static org.apache.ignite.internal.IgniteKernal.DFLT_EVENT_DRIVEN_SERVICE_PROCESSOR_ENABLED; import static org.apache.ignite.internal.IgniteKernal.DFLT_LOG_CLASSPATH_CONTENT_ON_STARTUP; import static org.apache.ignite.internal.IgniteKernal.DFLT_LONG_OPERATIONS_DUMP_TIMEOUT; import static org.apache.ignite.internal.IgniteKernal.DFLT_PERIODIC_STARVATION_CHECK_FREQ; import static org.apache.ignite.internal.LongJVMPauseDetector.DEFAULT_JVM_PAUSE_DETECTOR_THRESHOLD; import static org.apache.ignite.internal.LongJVMPauseDetector.DFLT_JVM_PAUSE_DETECTOR_LAST_EVENTS_COUNT; import static org.apache.ignite.internal.LongJVMPauseDetector.DFLT_JVM_PAUSE_DETECTOR_PRECISION; import static org.apache.ignite.internal.binary.streams.BinaryMemoryAllocator.DFLT_MARSHAL_BUFFERS_PER_THREAD_POOL_SIZE; import static org.apache.ignite.internal.binary.streams.BinaryMemoryAllocator.DFLT_MARSHAL_BUFFERS_RECHECK; import static org.apache.ignite.internal.managers.discovery.GridDiscoveryManager.DFLT_DISCOVERY_HISTORY_SIZE; import static org.apache.ignite.internal.processors.affinity.AffinityAssignment.DFLT_AFFINITY_BACKUPS_THRESHOLD; import static org.apache.ignite.internal.processors.affinity.GridAffinityAssignmentCache.DFLT_AFFINITY_HISTORY_SIZE; import static org.apache.ignite.internal.processors.affinity.GridAffinityAssignmentCache.DFLT_PART_DISTRIBUTION_WARN_THRESHOLD; import static org.apache.ignite.internal.processors.cache.CacheAffinitySharedManager.DFLT_CLIENT_CACHE_CHANGE_MESSAGE_TIMEOUT; import static org.apache.ignite.internal.processors.cache.GridCacheAdapter.DFLT_CACHE_RETRIES_COUNT; import static org.apache.ignite.internal.processors.cache.GridCacheAdapter.DFLT_CACHE_START_SIZE; import static org.apache.ignite.internal.processors.cache.GridCacheContext.DFLT_READ_LOAD_BALANCING; import static org.apache.ignite.internal.processors.cache.GridCacheMvccManager.DFLT_MAX_NESTED_LISTENER_CALLS; import static org.apache.ignite.internal.processors.cache.GridCachePartitionExchangeManager.DFLT_DIAGNOSTIC_WARN_LIMIT; import static org.apache.ignite.internal.processors.cache.GridCachePartitionExchangeManager.DFLT_EXCHANGE_HISTORY_SIZE; import static org.apache.ignite.internal.processors.cache.GridCachePartitionExchangeManager.DFLT_EXCHANGE_MERGE_DELAY; import static org.apache.ignite.internal.processors.cache.GridCacheProcessor.DFLT_ALLOW_START_CACHES_IN_PARALLEL; import static org.apache.ignite.internal.processors.cache.GridCacheTtlManager.DFLT_UNWIND_THROTTLING_TIMEOUT; import static org.apache.ignite.internal.processors.cache.GridCacheUtils.DFLT_TTL_EXPIRE_BATCH_SIZE; import static org.apache.ignite.internal.processors.cache.WalStateManager.DFLT_DISABLE_WAL_DURING_REBALANCING; import static org.apache.ignite.internal.processors.cache.binary.CacheObjectBinaryProcessorImpl.DFLT_WAIT_SCHEMA_UPDATE; import static org.apache.ignite.internal.processors.cache.distributed.dht.CacheDistributedGetFutureAdapter.DFLT_MAX_REMAP_CNT; import static org.apache.ignite.internal.processors.cache.distributed.dht.atomic.GridDhtAtomicCache.DFLT_ATOMIC_DEFERRED_ACK_BUFFER_SIZE; import static org.apache.ignite.internal.processors.cache.distributed.dht.atomic.GridDhtAtomicCache.DFLT_ATOMIC_DEFERRED_ACK_TIMEOUT; import static org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture.DFLT_LONG_OPERATIONS_DUMP_TIMEOUT_LIMIT; import static org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture.DFLT_PARTITION_RELEASE_FUTURE_DUMP_THRESHOLD; import static org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPreloader.DFLT_PRELOAD_RESEND_TIMEOUT; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition.DFLT_ATOMIC_CACHE_DELETE_HISTORY_SIZE; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition.DFLT_CACHE_REMOVE_ENTRIES_TTL; import static org.apache.ignite.internal.processors.cache.mvcc.MvccCachingManager.DFLT_MVCC_TX_SIZE_CACHING_THRESHOLD; import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.DFLT_PDS_WAL_REBALANCE_THRESHOLD; import static org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointHistory.DFLT_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE; import static org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointWorkflow.DFLT_CHECKPOINT_PARALLEL_SORT_THRESHOLD; import static org.apache.ignite.internal.processors.cache.persistence.diagnostic.pagelocktracker.LockTrackerFactory.DFLT_PAGE_LOCK_TRACKER_CAPACITY; import static org.apache.ignite.internal.processors.cache.persistence.diagnostic.pagelocktracker.LockTrackerFactory.HEAP_LOG; import static org.apache.ignite.internal.processors.cache.persistence.diagnostic.pagelocktracker.SharedPageLockTracker.DFLT_PAGE_LOCK_TRACKER_CHECK_INTERVAL; import static org.apache.ignite.internal.processors.cache.persistence.pagemem.FullPageIdTable.DFLT_LONG_LONG_HASH_MAP_LOAD_FACTOR; import static org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryImpl.DFLT_DELAYED_REPLACED_PAGE_WRITE; import static org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryImpl.DFLT_LOADED_PAGES_BACKWARD_SHIFT_MAP; import static org.apache.ignite.internal.processors.cache.persistence.pagemem.PagesWriteThrottlePolicy.DFLT_THROTTLE_LOG_THRESHOLD; import static org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree.IGNITE_BPLUS_TREE_LOCK_RETRIES_DEFAULT; import static org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager.DFLT_CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE; import static org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager.DFLT_THRESHOLD_WAIT_TIME_NEXT_WAL_SEGMENT; import static org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager.DFLT_THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE; import static org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager.DFLT_WAL_COMPRESSOR_WORKER_THREAD_CNT; import static org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager.DFLT_WAL_MMAP; import static org.apache.ignite.internal.processors.cache.persistence.wal.filehandle.FileHandleManagerImpl.DFLT_WAL_SEGMENT_SYNC_TIMEOUT; import static org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializerFactory.LATEST_SERIALIZER_VERSION; import static org.apache.ignite.internal.processors.cache.transactions.IgniteTxManager.DFLT_DEFERRED_ONE_PHASE_COMMIT_ACK_REQUEST_BUFFER_SIZE; import static org.apache.ignite.internal.processors.cache.transactions.IgniteTxManager.DFLT_DEFERRED_ONE_PHASE_COMMIT_ACK_REQUEST_TIMEOUT; import static org.apache.ignite.internal.processors.cache.transactions.IgniteTxManager.DFLT_DUMP_TX_COLLISIONS_INTERVAL; import static org.apache.ignite.internal.processors.cache.transactions.IgniteTxManager.DFLT_MAX_COMPLETED_TX_CNT; import static org.apache.ignite.internal.processors.cache.transactions.IgniteTxManager.DFLT_SLOW_TX_WARN_TIMEOUT; import static org.apache.ignite.internal.processors.cache.transactions.IgniteTxManager.DFLT_TRANSACTION_TIME_DUMP_SAMPLES_PER_SECOND_LIMIT; import static org.apache.ignite.internal.processors.cache.transactions.IgniteTxManager.DFLT_TX_DEADLOCK_DETECTION_MAX_ITERS; import static org.apache.ignite.internal.processors.cache.transactions.IgniteTxManager.DFLT_TX_OWNER_DUMP_REQUESTS_ALLOWED; import static org.apache.ignite.internal.processors.cache.transactions.TxDeadlockDetection.DFLT_TX_DEADLOCK_DETECTION_TIMEOUT; import static org.apache.ignite.internal.processors.cluster.ClusterProcessor.DFLT_DIAGNOSTIC_ENABLED; import static org.apache.ignite.internal.processors.cluster.ClusterProcessor.DFLT_UPDATE_NOTIFIER; import static org.apache.ignite.internal.processors.cluster.baseline.autoadjust.ChangeTopologyWatcher.DFLT_BASELINE_AUTO_ADJUST_LOG_INTERVAL; import static org.apache.ignite.internal.processors.datastructures.GridAtomicCacheQueueImpl.DFLT_ATOMIC_CACHE_QUERY_RETRY_TIMEOUT; import static org.apache.ignite.internal.processors.diagnostic.DiagnosticProcessor.DFLT_DUMP_PAGE_LOCK_ON_FAILURE; import static org.apache.ignite.internal.processors.failure.FailureProcessor.DFLT_FAILURE_HANDLER_RESERVE_BUFFER_SIZE; import static org.apache.ignite.internal.processors.job.GridJobProcessor.DFLT_JOBS_HISTORY_SIZE; import static org.apache.ignite.internal.processors.jobmetrics.GridJobMetricsProcessor.DFLT_JOBS_METRICS_CONCURRENCY_LEVEL; import static org.apache.ignite.internal.processors.metastorage.persistence.DistributedMetaStorageImpl.DFLT_MAX_HISTORY_BYTES; import static org.apache.ignite.internal.processors.query.QueryUtils.DFLT_INDEXING_DISCOVERY_HISTORY_SIZE; import static org.apache.ignite.internal.processors.rest.GridRestProcessor.DFLT_SES_TIMEOUT; import static org.apache.ignite.internal.processors.rest.GridRestProcessor.DFLT_SES_TOKEN_INVALIDATE_INTERVAL; import static org.apache.ignite.internal.processors.rest.handlers.task.GridTaskCommandHandler.DFLT_MAX_TASK_RESULTS; import static org.apache.ignite.internal.util.GridLogThrottle.DFLT_LOG_THROTTLE_CAPACITY; import static org.apache.ignite.internal.util.GridReflectionCache.DFLT_REFLECTION_CACHE_SIZE; import static org.apache.ignite.internal.util.GridUnsafe.DFLT_MEMORY_PER_BYTE_COPY_THRESHOLD; import static org.apache.ignite.internal.util.IgniteExceptionRegistry.DEFAULT_QUEUE_SIZE; import static org.apache.ignite.internal.util.IgniteUtils.DFLT_MBEAN_APPEND_CLASS_LOADER_ID; import static org.apache.ignite.internal.util.StripedExecutor.DFLT_DATA_STREAMING_EXECUTOR_SERVICE_TASKS_STEALING_THRESHOLD; import static org.apache.ignite.internal.util.nio.GridNioRecoveryDescriptor.DFLT_NIO_RECOVERY_DESCRIPTOR_RESERVATION_TIMEOUT; import static org.apache.ignite.internal.util.nio.GridNioServer.DFLT_IO_BALANCE_PERIOD; import static org.apache.ignite.internal.util.tostring.GridToStringBuilder.DFLT_TO_STRING_COLLECTION_LIMIT; import static org.apache.ignite.internal.util.tostring.GridToStringBuilder.DFLT_TO_STRING_INCLUDE_SENSITIVE; import static org.apache.ignite.internal.util.tostring.GridToStringBuilder.DFLT_TO_STRING_MAX_LENGTH; import static org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi.DFLT_DISCOVERY_CLIENT_RECONNECT_HISTORY_SIZE; import static org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi.DFLT_DISCOVERY_METRICS_QNT_WARN; import static org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi.DFLT_DISCO_FAILED_CLIENT_RECONNECT_DELAY; import static org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi.DFLT_NODE_IDS_HISTORY_SIZE; import static org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi.DFLT_THROTTLE_RECONNECT_RESET_TIMEOUT_INTERVAL; import static org.apache.ignite.startup.cmdline.CommandLineStartup.DFLT_PROG_NAME; /** * Contains constants for all system properties and environmental variables in Ignite. * These properties and variables can be used to affect the behavior of Ignite. */ public final class IgniteSystemProperties { /** * If this system property is present the Ignite will include grid name into verbose log. * * @deprecated Use {@link #IGNITE_LOG_INSTANCE_NAME}. */ @Deprecated @SystemProperty(value = "If this system property is present Ignite will include grid name into " + "verbose log", type = String.class) public static final String IGNITE_LOG_GRID_NAME = "IGNITE_LOG_GRID_NAME"; /** * If this system property is present the Ignite will include instance name into verbose log. */ @SystemProperty(value = "If this system property is present Ignite will include instance name " + "into verbose log", type = String.class) public static final String IGNITE_LOG_INSTANCE_NAME = "IGNITE_LOG_INSTANCE_NAME"; /** * This property is used internally to pass an exit code to loader when * Ignite instance is being restarted. */ @SystemProperty(value = "Exit code to pass to loader when Ignite instance is being restarted", type = Integer.class, defaults = "0") public static final String IGNITE_RESTART_CODE = "IGNITE_RESTART_CODE"; /** * Presence of this system property with value {@code true} will make the grid * node start as a daemon node. Node that this system property will override * {@link org.apache.ignite.configuration.IgniteConfiguration#isDaemon()} configuration. */ @SystemProperty("If true Ignite will start as a daemon node. Note that this system property " + "will override IgniteConfiguration.isDaemon() configuration") public static final String IGNITE_DAEMON = "IGNITE_DAEMON"; /** Defines Ignite installation folder. */ @SystemProperty(value = "Defines Ignite installation folder", type = String.class, defaults = "") public static final String IGNITE_HOME = "IGNITE_HOME"; /** If this system property is set to {@code true} - no shutdown hook will be set. */ @SystemProperty("If true then no shutdown hook will be set") public static final String IGNITE_NO_SHUTDOWN_HOOK = "IGNITE_NO_SHUTDOWN_HOOK"; /** * Name of the system property to disable requirement for proper node ordering * by discovery SPI. Use with care, as proper node ordering is required for * cache consistency. If set to {@code true}, then any discovery SPI can be used * with distributed cache, otherwise, only discovery SPIs that have annotation * {@link org.apache.ignite.spi.discovery.DiscoverySpiOrderSupport @GridDiscoverySpiOrderSupport(true)} will * be allowed. */ @SystemProperty("If true requirement for proper node ordering " + "by discovery SPI will be disabled. Use with care, as proper node ordering is required for cache consistency. " + "If set to true, then any discovery SPI can be used with distributed cache, otherwise, " + "only discovery SPIs that have annotation @GridDiscoverySpiOrderSupport(true) will be allowed") public static final String IGNITE_NO_DISCO_ORDER = "IGNITE_NO_DISCO_ORDER"; /** Defines reconnect delay in milliseconds for client node that was failed forcible. */ @SystemProperty(value = "Reconnect delay in milliseconds for client node that was failed forcible", type = Long.class, defaults = DFLT_DISCO_FAILED_CLIENT_RECONNECT_DELAY + " milliseconds") public static final String IGNITE_DISCO_FAILED_CLIENT_RECONNECT_DELAY = "IGNITE_DISCO_FAILED_CLIENT_RECONNECT_DELAY"; /** * If this system property is set to {@code false} - no checks for new versions will * be performed by Ignite. By default, Ignite periodically checks for the new * version and prints out the message into the log if a new version of Ignite is * available for download. * * Update notifier enabled flag is a cluster-wide value and determined according to the local setting * during the start of the first node in the cluster. The chosen value will survive the first node shutdown * and will override the property value on all newly joining nodes. */ @SystemProperty(value = "If this system property is set to false - no checks for new versions will " + "be performed by Ignite. By default, Ignite periodically checks for the new version and prints out the " + "message into the log if a new version of Ignite is available for download. Update notifier enabled flag is " + "a cluster-wide value and determined according to the local setting during the start of the first node in " + "the cluster. The chosen value will survive the first node shutdown and will override the property value " + "on all newly joining nodes", defaults = "" + DFLT_UPDATE_NOTIFIER) public static final String IGNITE_UPDATE_NOTIFIER = "IGNITE_UPDATE_NOTIFIER"; /** * This system property defines interval in milliseconds in which Ignite will check * thread pool state for starvation. Zero value will disable this checker. */ @SystemProperty(value = "Interval in milliseconds in which Ignite will check thread pool state for starvation. " + "Zero value will disable checker", type = Long.class, defaults = DFLT_PERIODIC_STARVATION_CHECK_FREQ + " milliseconds") public static final String IGNITE_STARVATION_CHECK_INTERVAL = "IGNITE_STARVATION_CHECK_INTERVAL"; /** * If this system property is present (any value) - no ASCII logo will * be printed. */ @SystemProperty(value = "If this system property is present (any value) - no ASCII logo will be printed") public static final String IGNITE_NO_ASCII = "IGNITE_NO_ASCII"; /** * This property allows to override Jetty host for REST processor. */ @SystemProperty(value = "Jetty host for REST processor", type = String.class) public static final String IGNITE_JETTY_HOST = "IGNITE_JETTY_HOST"; /** * This property allows to override Jetty local port for REST processor. */ @SystemProperty(value = "Jetty local port for REST processor", type = Integer.class) public static final String IGNITE_JETTY_PORT = "IGNITE_JETTY_PORT"; /** * This property does not allow Ignite to override Jetty log configuration for REST processor. */ @SystemProperty("If true then disallow Ignite to override Jetty log configuration for REST processor") public static final String IGNITE_JETTY_LOG_NO_OVERRIDE = "IGNITE_JETTY_LOG_NO_OVERRIDE"; /** This property allow rewriting default ({@code 30}) REST session expire time (in seconds). */ @SystemProperty(value = "REST session expire time in seconds", type = Long.class, defaults = DFLT_SES_TIMEOUT + " seconds") public static final String IGNITE_REST_SESSION_TIMEOUT = "IGNITE_REST_SESSION_TIMEOUT"; /** This property allow rewriting default ({@code 300}) REST session security token expire time (in seconds). */ @SystemProperty(value = "REST session security token expire time in seconds", type = Long.class, defaults = DFLT_SES_TOKEN_INVALIDATE_INTERVAL + " seconds") public static final String IGNITE_REST_SECURITY_TOKEN_TIMEOUT = "IGNITE_REST_SECURITY_TOKEN_TIMEOUT"; /** * This property allows to override maximum count of task results stored on one node * in REST processor. */ @SystemProperty(value = "Maximum count of task results stored on one node in REST processor", type = Integer.class, defaults = "" + DFLT_MAX_TASK_RESULTS) public static final String IGNITE_REST_MAX_TASK_RESULTS = "IGNITE_REST_MAX_TASK_RESULTS"; /** * This property allows to override default behavior that rest processor * doesn't start on client node. If set {@code true} than rest processor will be started on client node. */ @SystemProperty("Enables start of the rest processor on client node") public static final String IGNITE_REST_START_ON_CLIENT = "IGNITE_REST_START_ON_CLIENT"; /** * This property changes output format of {@link GridRestCommand#CACHE_GET_ALL} from {k: v, ...} * to [{"key": k, "value": v}, ...] to allow non-string keys output. * * @deprecated Should be made default in Apache Ignite 3.0. */ @Deprecated @SystemProperty("If true output format of GridRestCommand.CACHE_GET_ALL will change from " + "{k: v, ...} to [{\"key\": k, \"value\": v}, ...] to allow non-string keys output") public static final String IGNITE_REST_GETALL_AS_ARRAY = "IGNITE_REST_GETALL_AS_ARRAY"; /** * This property defines the maximum number of attempts to remap near get to the same * primary node. Remapping may be needed when topology is changed concurrently with * get operation. */ @SystemProperty(value = "Maximum number of attempts to remap near get to the same primary node. " + "Remapping may be needed when topology is changed concurrently with get operation", type = Integer.class, defaults = "" + DFLT_MAX_REMAP_CNT) public static final String IGNITE_NEAR_GET_MAX_REMAPS = "IGNITE_NEAR_GET_MAX_REMAPS"; /** * Set to either {@code true} or {@code false} to enable or disable quiet mode * of Ignite. In quiet mode, only warning and errors are printed into the log * additionally to a shortened version of standard output on the start. * <p> * Note that if you use <tt>ignite.{sh|bat}</tt> scripts to start Ignite they * start by default in quiet mode. You can supply <tt>-v</tt> flag to override it. */ @SystemProperty(value = "In quiet mode, only warning and errors are printed into the log additionally to a " + "shortened version of standard output on the start. Note that if you use ignite.{sh|bat} scripts to start " + "Ignite they start by default in quiet mode. You can supply -v flag to override it", defaults = "true") public static final String IGNITE_QUIET = "IGNITE_QUIET"; /** * Setting this option to {@code true} will enable troubleshooting logger. * Troubleshooting logger makes logging more verbose without enabling debug mode * to provide more detailed logs without performance penalty. */ @SystemProperty("Enables troubleshooting logger. " + "Troubleshooting logger makes logging more verbose without enabling debug mode to provide more detailed " + "logs without performance penalty") public static final String IGNITE_TROUBLESHOOTING_LOGGER = "IGNITE_TROUBLESHOOTING_LOGGER"; /** * Setting to {@code true} enables writing sensitive information in {@code toString()} output. */ @SystemProperty(value = "Enables writing sensitive information in toString() output", defaults = "" + DFLT_TO_STRING_INCLUDE_SENSITIVE) public static final String IGNITE_TO_STRING_INCLUDE_SENSITIVE = "IGNITE_TO_STRING_INCLUDE_SENSITIVE"; /** Maximum length for {@code toString()} result. */ @SystemProperty(value = "Maximum length for toString() result", type = Integer.class, defaults = "" + DFLT_TO_STRING_MAX_LENGTH) public static final String IGNITE_TO_STRING_MAX_LENGTH = "IGNITE_TO_STRING_MAX_LENGTH"; /** * Limit collection (map, array) elements number to output. */ @SystemProperty(value = "Number of collection (map, array) elements to output", type = Integer.class, defaults = "" + DFLT_TO_STRING_COLLECTION_LIMIT) public static final String IGNITE_TO_STRING_COLLECTION_LIMIT = "IGNITE_TO_STRING_COLLECTION_LIMIT"; /** * If this property is set to {@code true} (default) and Ignite is launched * in verbose mode (see {@link #IGNITE_QUIET}) and no console appenders can be found * in configuration, then default console appender will be added. * Set this property to {@code false} if no appenders should be added. */ @SystemProperty(value = "If true (default) and Ignite is launched in verbose mode (see IGNITE_QUIET) " + "and no console appenders can be found in configuration, then default console appender will be added. " + "Set this property to false if no appenders should be added", defaults = "true") public static final String IGNITE_CONSOLE_APPENDER = "IGNITE_CONSOLE_APPENDER"; /** Maximum size for exchange history. Default value is {@code 1000}.*/ @SystemProperty(value = "Maximum size for exchange history", type = Integer.class, defaults = "" + DFLT_EXCHANGE_HISTORY_SIZE) public static final String IGNITE_EXCHANGE_HISTORY_SIZE = "IGNITE_EXCHANGE_HISTORY_SIZE"; /** */ @SystemProperty(value = "Partition map exchange merge delay in milliseconds", type = Long.class, defaults = "" + DFLT_EXCHANGE_MERGE_DELAY) public static final String IGNITE_EXCHANGE_MERGE_DELAY = "IGNITE_EXCHANGE_MERGE_DELAY"; /** PME-free switch explicitly disabled. */ @SystemProperty("Disables PME-free switch") public static final String IGNITE_PME_FREE_SWITCH_DISABLED = "IGNITE_PME_FREE_SWITCH_DISABLED"; /** * Name of the system property defining name of command line program. */ @SystemProperty(value = "Name of command line program", type = String.class, defaults = DFLT_PROG_NAME) public static final String IGNITE_PROG_NAME = "IGNITE_PROG_NAME"; /** * Name of the system property defining success file name. This file * is used with auto-restarting functionality when Ignite is started * by supplied <tt>ignite.{bat|sh}</tt> scripts. */ @SystemProperty(value = "Success file name. This file is used with auto-restarting functionality " + "when Ignite is started by supplied ignite.{bat|sh} scripts", type = String.class) public static final String IGNITE_SUCCESS_FILE = "IGNITE_SUCCESS_FILE"; /** * The system property sets a system-wide local IP address or hostname to be used by Ignite networking components. * Once provided, the property overrides all the default local binding settings for Ignite nodes. * <p> * Note, that the address can also be changed via * {@link org.apache.ignite.configuration.IgniteConfiguration#setLocalHost(String)} method. * However, this system property has bigger priority and overrides the settings set via * {@link org.apache.ignite.configuration.IgniteConfiguration}. */ @SystemProperty(value = "Sets a system-wide local IP address or hostname to be " + "used by Ignite networking components. Once provided, the property overrides all the default local binding " + "settings for Ignite nodes. Note, that the address can also be changed via " + "IgniteConfiguration.setLocalHost(String) method. However, this system property has bigger priority and " + "overrides the settings set via IgniteConfiguration", type = String.class) public static final String IGNITE_LOCAL_HOST = "IGNITE_LOCAL_HOST"; /** * System property to override deployment mode configuration parameter. * Valid values for property are: PRIVATE, ISOLATED, SHARED or CONTINUOUS. * * @see org.apache.ignite.configuration.DeploymentMode * @see org.apache.ignite.configuration.IgniteConfiguration#getDeploymentMode() */ @SystemProperty(value = "Sets deployment mode configuration parameter. Valid " + "values for property are: PRIVATE, ISOLATED, SHARED or CONTINUOUS", type = DeploymentMode.class) public static final String IGNITE_DEP_MODE_OVERRIDE = "IGNITE_DEPLOYMENT_MODE_OVERRIDE"; /** * Property controlling size of buffer holding completed transaction versions. Such buffer * is used to detect duplicate transaction and has a default value of {@code 102400}. In * most cases this value is large enough and does not need to be changed. */ @SystemProperty(value = "Size of buffer holding completed transaction versions. " + "Buffer is used to detect duplicate transaction. " + "In most cases this value is large enough and does not need to be changed", type = Integer.class, defaults = "" + DFLT_MAX_COMPLETED_TX_CNT) public static final String IGNITE_MAX_COMPLETED_TX_COUNT = "IGNITE_MAX_COMPLETED_TX_COUNT"; /** * Transactions that take more time, than value of this property, will be output to log * with warning level. {@code 0} (default value) disables warning on slow transactions. */ @SystemProperty(value = "Transactions that take more time, than value of this property (in milliseconds), " + "will be output to warnings. 0 disables warnings on slow transactions", type = Integer.class, defaults = "" + DFLT_SLOW_TX_WARN_TIMEOUT) public static final String IGNITE_SLOW_TX_WARN_TIMEOUT = "IGNITE_SLOW_TX_WARN_TIMEOUT"; /** * Specifies maximum number of iterations for deadlock detection procedure. * If value of this property is less then or equal to zero then deadlock detection will be disabled. */ @SystemProperty(value = "Maximum number of iterations for deadlock detection procedure. " + "If value of this property is less then or equal to zero then deadlock detection will be disabled", type = Integer.class, defaults = "" + DFLT_TX_DEADLOCK_DETECTION_MAX_ITERS) public static final String IGNITE_TX_DEADLOCK_DETECTION_MAX_ITERS = "IGNITE_TX_DEADLOCK_DETECTION_MAX_ITERS"; /** * Specifies timeout for deadlock detection procedure. */ @SystemProperty(value = "Timeout for deadlock detection procedure", type = Integer.class, defaults = "" + DFLT_TX_DEADLOCK_DETECTION_TIMEOUT) public static final String IGNITE_TX_DEADLOCK_DETECTION_TIMEOUT = "IGNITE_TX_DEADLOCK_DETECTION_TIMEOUT"; /** * System property to enable pending transaction tracker. * Affects impact of {@link IgniteSystemProperties#IGNITE_DISABLE_WAL_DURING_REBALANCING} property: * if this property is set, WAL anyway won't be disabled during rebalancing triggered by baseline topology change. */ @SystemProperty(value = "Enables pending transaction tracker. " + "Affects impact of IGNITE_DISABLE_WAL_DURING_REBALANCING property: if this property is set, " + "WAL anyway won't be disabled during rebalancing triggered by baseline topology change") public static final String IGNITE_PENDING_TX_TRACKER_ENABLED = "IGNITE_PENDING_TX_TRACKER_ENABLED"; /** * System property to override multicast group taken from configuration. * Used for testing purposes. */ @SystemProperty(value = "Overrides multicast group taken from configuration. Used for testing purposes", type = String.class) public static final String IGNITE_OVERRIDE_MCAST_GRP = "IGNITE_OVERRIDE_MCAST_GRP"; /** * System property to override default reflection cache size. Default value is {@code 128}. */ @SystemProperty(value = "Overrides default reflection cache size", type = Integer.class, defaults = "" + DFLT_REFLECTION_CACHE_SIZE) public static final String IGNITE_REFLECTION_CACHE_SIZE = "IGNITE_REFLECTION_CACHE_SIZE"; /** * System property to override default job processor maps sizes for finished jobs and * cancellation requests. Default value is {@code 10240}. */ @SystemProperty(value = "Job processor maps sizes for finished jobs and cancellation requests. ", type = Integer.class, defaults = "" + DFLT_JOBS_HISTORY_SIZE) public static final String IGNITE_JOBS_HISTORY_SIZE = "IGNITE_JOBS_HISTORY_SIZE"; /** * System property to override default job metrics processor property defining * concurrency level for structure holding job metrics snapshots. * Default value is {@code 64}. * * @deprecated Use {@link GridMetricManager} instead. */ @Deprecated @SystemProperty(value = "Job metrics processor property defining concurrency level " + "for structure holding job metrics snapshots", type = Integer.class, defaults = "" + DFLT_JOBS_METRICS_CONCURRENCY_LEVEL) public static final String IGNITE_JOBS_METRICS_CONCURRENCY_LEVEL = "IGNITE_JOBS_METRICS_CONCURRENCY_LEVEL"; /** * System property to hold optional configuration URL. */ @SystemProperty(value = "Configuration URL", type = String.class) public static final String IGNITE_CONFIG_URL = "IGNITE_CONFIG_URL"; /** System property to hold SSH host for visor-started nodes. */ @SystemProperty(value = "SSH host name for visor-started nodes", type = String.class) public static final String IGNITE_SSH_HOST = "IGNITE_SSH_HOST"; /** * System property to enable experimental commands in control.sh script. * @deprecated Use "--enable-experimental" parameter instead. */ @Deprecated @SystemProperty("Enables experimental commands in control.sh script") public static final String IGNITE_ENABLE_EXPERIMENTAL_COMMAND = "IGNITE_ENABLE_EXPERIMENTAL_COMMAND"; /** System property to hold SSH user name for visor-started nodes. */ @SystemProperty(value = "SSH user name for visor-started nodes", type = String.class) public static final String IGNITE_SSH_USER_NAME = "IGNITE_SSH_USER_NAME"; /** System property to hold preload resend timeout for evicted partitions. */ @SystemProperty(value = "Preload resend timeout for evicted partitions in milliseconds", type = Long.class, defaults = "" + DFLT_PRELOAD_RESEND_TIMEOUT) public static final String IGNITE_PRELOAD_RESEND_TIMEOUT = "IGNITE_PRELOAD_RESEND_TIMEOUT"; /** * System property to specify how often in milliseconds marshal buffers * should be rechecked and potentially trimmed. Default value is {@code 10,000ms}. */ @SystemProperty(value = "How often in milliseconds marshal buffers should be rechecked and potentially trimmed", type = Long.class, defaults = "" + DFLT_MARSHAL_BUFFERS_RECHECK) public static final String IGNITE_MARSHAL_BUFFERS_RECHECK = "IGNITE_MARSHAL_BUFFERS_RECHECK"; /** * System property to specify per thread binary allocator chunk pool size. Default value is {@code 32}. */ @SystemProperty(value = "Per thread binary allocator chunk pool size", type = Integer.class, defaults = "" + DFLT_MARSHAL_BUFFERS_PER_THREAD_POOL_SIZE) public static final String IGNITE_MARSHAL_BUFFERS_PER_THREAD_POOL_SIZE = "IGNITE_MARSHAL_BUFFERS_PER_THREAD_POOL_SIZE"; /** * System property to disable {@link HostnameVerifier} for SSL connections. * Can be used for development with self-signed certificates. Default value is {@code false}. */ @SystemProperty("Disables HostnameVerifier for SSL connections. " + "Can be used for development with self-signed certificates") public static final String IGNITE_DISABLE_HOSTNAME_VERIFIER = "IGNITE_DISABLE_HOSTNAME_VERIFIER"; /** * System property to disable buffered communication if node sends less messages count than * specified by this property. Default value is {@code 512}. * * @deprecated Not used anymore. */ @Deprecated @SystemProperty(value = "Disables buffered communication if node sends less messages count " + "than specified by this property", type = Integer.class, defaults = "512") public static final String IGNITE_MIN_BUFFERED_COMMUNICATION_MSG_CNT = "IGNITE_MIN_BUFFERED_COMMUNICATION_MSG_CNT"; /** * Flag that will force Ignite to fill memory block with some recognisable pattern right before * this memory block is released. This will help to recognize cases when already released memory is accessed. */ @SystemProperty("Force Ignite to fill memory block with some recognisable pattern right before this " + "memory block is released. This will help to recognize cases when already released memory is accessed") public static final String IGNITE_OFFHEAP_SAFE_RELEASE = "IGNITE_OFFHEAP_SAFE_RELEASE"; /** Maximum size for atomic cache queue delete history (default is 200 000 entries per partition). */ @SystemProperty(value = "Maximum size for atomic cache queue delete history", type = Integer.class, defaults = "" + DFLT_ATOMIC_CACHE_DELETE_HISTORY_SIZE + " per partition") public static final String IGNITE_ATOMIC_CACHE_DELETE_HISTORY_SIZE = "IGNITE_ATOMIC_CACHE_DELETE_HISTORY_SIZE"; /** Ttl of removed cache entries (ms). */ @SystemProperty(value = "Ttl of removed cache entries in milliseconds", type = Long.class, defaults = "" + DFLT_CACHE_REMOVE_ENTRIES_TTL) public static final String IGNITE_CACHE_REMOVED_ENTRIES_TTL = "IGNITE_CACHE_REMOVED_ENTRIES_TTL"; /** * Comma separated list of addresses in format "10.100.22.100:45000,10.100.22.101:45000". * Makes sense only for {@link org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder}. */ @SystemProperty(value = "Comma separated list of addresses in format " + "\"10.100.22.100:45000,10.100.22.101:45000\". Used only for " + "org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder", type = String.class) public static final String IGNITE_TCP_DISCOVERY_ADDRESSES = "IGNITE_TCP_DISCOVERY_ADDRESSES"; /** * Flag indicating whether performance suggestions output on start should be disabled. */ @SystemProperty("Disables performance suggestions output on start") public static final String IGNITE_PERFORMANCE_SUGGESTIONS_DISABLED = "IGNITE_PERFORMANCE_SUGGESTIONS_DISABLED"; /** * Flag indicating whether atomic operations allowed for use inside transactions. */ @SystemProperty(value = "Allows atomic operations inside transactions", defaults = "true") public static final String IGNITE_ALLOW_ATOMIC_OPS_IN_TX = "IGNITE_ALLOW_ATOMIC_OPS_IN_TX"; /** * Atomic cache deferred update response buffer size. */ @SystemProperty(value = "Atomic cache deferred update response buffer size", type = Integer.class, defaults = "" + DFLT_ATOMIC_DEFERRED_ACK_BUFFER_SIZE) public static final String IGNITE_ATOMIC_DEFERRED_ACK_BUFFER_SIZE = "IGNITE_ATOMIC_DEFERRED_ACK_BUFFER_SIZE"; /** * Atomic cache deferred update timeout. */ @SystemProperty(value = "Atomic cache deferred update timeout", type = Integer.class, defaults = "" + DFLT_ATOMIC_DEFERRED_ACK_TIMEOUT) public static final String IGNITE_ATOMIC_DEFERRED_ACK_TIMEOUT = "IGNITE_ATOMIC_DEFERRED_ACK_TIMEOUT"; /** * Atomic cache deferred update timeout. */ @SystemProperty(value = "Atomic cache deferred update timeout", type = Integer.class, defaults = "" + DFLT_ATOMIC_CACHE_QUERY_RETRY_TIMEOUT) public static final String IGNITE_ATOMIC_CACHE_QUEUE_RETRY_TIMEOUT = "IGNITE_ATOMIC_CACHE_QUEUE_RETRY_TIMEOUT"; /** * One phase commit deferred ack request timeout. */ @SystemProperty(value = "One phase commit deferred ack request timeout", type = Integer.class, defaults = "" + DFLT_DEFERRED_ONE_PHASE_COMMIT_ACK_REQUEST_TIMEOUT) public static final String IGNITE_DEFERRED_ONE_PHASE_COMMIT_ACK_REQUEST_TIMEOUT = "IGNITE_DEFERRED_ONE_PHASE_COMMIT_ACK_REQUEST_TIMEOUT"; /** * One phase commit deferred ack request buffer size. */ @SystemProperty(value = "One phase commit deferred ack request buffer size", type = Integer.class, defaults = "" + DFLT_DEFERRED_ONE_PHASE_COMMIT_ACK_REQUEST_BUFFER_SIZE) public static final String IGNITE_DEFERRED_ONE_PHASE_COMMIT_ACK_REQUEST_BUFFER_SIZE = "IGNITE_DEFERRED_ONE_PHASE_COMMIT_ACK_REQUEST_BUFFER_SIZE"; /** * If this property set then debug console will be opened for H2 indexing SPI. * * @deprecated Since 2.8. H2 console is no longer supported. */ @Deprecated @SystemProperty(value = "Enables debug console for H2 indexing SPI", type = String.class) public static final String IGNITE_H2_DEBUG_CONSOLE = "IGNITE_H2_DEBUG_CONSOLE"; /** * This property allows to specify user defined port which H2 indexing SPI will use * to start H2 debug console on. If this property is not set or set to 0, H2 debug * console will use system-provided dynamic port. * This property is only relevant when {@link #IGNITE_H2_DEBUG_CONSOLE} property is set. * * @deprecated Since 2.8. H2 console is no longer supported. */ @Deprecated @SystemProperty(value = "User defined port which H2 indexing SPI will use to start H2 debug console on. " + "If this property is not set or set to 0, H2 debug console will use system-provided dynamic port. " + "This property is only relevant when " + IGNITE_H2_DEBUG_CONSOLE + " property is set", type = Integer.class) public static final String IGNITE_H2_DEBUG_CONSOLE_PORT = "IGNITE_H2_DEBUG_CONSOLE_PORT"; /** * If this property is set to {@code true} then shared memory space native debug will be enabled. */ @SystemProperty("Enables native debug of the shared memory space") public static final String IGNITE_IPC_SHMEM_SPACE_DEBUG = "IGNITE_IPC_SHMEM_SPACE_DEBUG"; /** * Property allowing to skip configuration consistency checks. */ @SystemProperty("Skip configuration consistency checks") public static final String IGNITE_SKIP_CONFIGURATION_CONSISTENCY_CHECK = "IGNITE_SKIP_CONFIGURATION_CONSISTENCY_CHECK"; /** * Flag indicating whether validation of keys put to cache should be disabled. */ @SystemProperty("Disables validation of keys put to cache") public static final String IGNITE_CACHE_KEY_VALIDATION_DISABLED = "IGNITE_CACHE_KEY_VALIDATION_DISABLED"; /** * Environment variable to override logging directory that has been set in logger configuration. */ @SystemProperty(value = "Logging directory. Overrides configuration value", type = String.class) public static final String IGNITE_LOG_DIR = "IGNITE_LOG_DIR"; /** * Environment variable to set work directory. The property {@link org.apache.ignite.configuration.IgniteConfiguration#setWorkDirectory} has higher * priority. */ @SystemProperty(value = "Work directory. The property IgniteConfiguration.setWorkDirectory has higher priority", type = String.class) public static final String IGNITE_WORK_DIR = "IGNITE_WORK_DIR"; /** * If this property is set to {@code true} then Ignite will append * hash code of {@link Ignite} class as hex string and append * JVM name returned by {@link RuntimeMXBean#getName()}. * <p> * This may be helpful when running Ignite in some application server * clusters or similar environments to avoid MBean name collisions. * <p> * Default is {@code false}. */ @SystemProperty("Enables Ignite to append hash code of Ignite class as hex string and append JVM name " + "returned by RuntimeMXBean.getName(). This may be helpful when running Ignite in some application server " + "clusters or similar environments to avoid MBean name collisions") public static final String IGNITE_MBEAN_APPEND_JVM_ID = "IGNITE_MBEAN_APPEND_JVM_ID"; /** * If this property is set to {@code true} then Ignite will append * hash code of class loader to MXBean name. * <p> * Default is {@code true}. */ @SystemProperty(value = "Enables Ignite to append hash code of class loader to MXBean name", defaults = "" + DFLT_MBEAN_APPEND_CLASS_LOADER_ID) public static final String IGNITE_MBEAN_APPEND_CLASS_LOADER_ID = "IGNITE_MBEAN_APPEND_CLASS_LOADER_ID"; /** * If property is set to {@code true}, then Ignite will disable MBeans registration. * This may be helpful if MBeans are not allowed e.g. for security reasons. * * Default is {@code false} */ @SystemProperty("Disable MBeans registration. This may be helpful if MBeans are not allowed " + "e.g. for security reasons") public static final String IGNITE_MBEANS_DISABLED = "IGNITE_MBEANS_DISABLED"; /** * If property is set to {@code true}, then test features will be enabled. * * Default is {@code false}. */ @SystemProperty("Enables test features") public static final String IGNITE_TEST_FEATURES_ENABLED = "IGNITE_TEST_FEATURES_ENABLED"; /** * Property controlling size of buffer holding last exception. Default value of {@code 1000}. */ @SystemProperty(value = "Size of buffer holding last exception", type = Integer.class, defaults = "" + DEFAULT_QUEUE_SIZE) public static final String IGNITE_EXCEPTION_REGISTRY_MAX_SIZE = "IGNITE_EXCEPTION_REGISTRY_MAX_SIZE"; /** * Property controlling default behavior of cache client flag. */ @SystemProperty("Starts node in client mode. Have lower priority than configuration value") public static final String IGNITE_CACHE_CLIENT = "IGNITE_CACHE_CLIENT"; /** * Property controlling whether CacheManager will start grid with isolated IP finder when default URL * is passed in. This is needed to pass TCK tests which use default URL and assume isolated cache managers * for different class loaders. */ @SystemProperty(value = "Enables CacheManager to start grid with isolated " + "IP finder when default URL is passed in. This is needed to pass TCK tests which use default URL and " + "assume isolated cache managers for different class loaders", defaults = "" + DFLT_JCACHE_DEFAULT_ISOLATED) public static final String IGNITE_JCACHE_DEFAULT_ISOLATED = "IGNITE_CACHE_CLIENT"; /** * Property controlling maximum number of SQL result rows which can be fetched into a merge table. * If there are less rows than this threshold then multiple passes throw a table will be possible, * otherwise only one pass (e.g. only result streaming is possible). */ @SystemProperty(value = "Maximum number of SQL result rows which can be fetched into a merge table. " + "If there are less rows than this threshold then multiple passes throw a table will be possible, " + "otherwise only one pass (e.g. only result streaming is possible)", type = Integer.class, defaults = "10000") public static final String IGNITE_SQL_MERGE_TABLE_MAX_SIZE = "IGNITE_SQL_MERGE_TABLE_MAX_SIZE"; /** * Property controlling number of SQL result rows that will be fetched into a merge table at once before * applying binary search for the bounds. */ @SystemProperty(value = "Number of SQL result rows that will be fetched into a merge table at once " + "before applying binary search for the bounds", type = Integer.class, defaults = "1024") public static final String IGNITE_SQL_MERGE_TABLE_PREFETCH_SIZE = "IGNITE_SQL_MERGE_TABLE_PREFETCH_SIZE"; /** Disable fallback to H2 SQL parser if the internal SQL parser fails to parse the statement. */ @SystemProperty("Disables fallback to H2 SQL parser if the internal SQL parser fails to " + "parse the statement") public static final String IGNITE_SQL_PARSER_DISABLE_H2_FALLBACK = "IGNITE_SQL_PARSER_DISABLE_H2_FALLBACK"; /** * Force all SQL queries to be processed lazily regardless of what clients request. * * @deprecated Since version 2.8. */ @Deprecated @SystemProperty("Force all SQL queries to be processed lazily regardless of what clients request") public static final String IGNITE_SQL_FORCE_LAZY_RESULT_SET = "IGNITE_SQL_FORCE_LAZY_RESULT_SET"; /** Disable SQL system views. */ @SystemProperty("Disables SQL system views") public static final String IGNITE_SQL_DISABLE_SYSTEM_VIEWS = "IGNITE_SQL_DISABLE_SYSTEM_VIEWS"; /** SQL retry timeout. */ @SystemProperty(value = "SQL retry timeout in milliseconds", type = Long.class, defaults = "30 seconds") public static final String IGNITE_SQL_RETRY_TIMEOUT = "IGNITE_SQL_RETRY_TIMEOUT"; /** Enable backward compatible handling of UUID through DDL. */ @SystemProperty("Enables backward compatible handling of UUID through DDL") public static final String IGNITE_SQL_UUID_DDL_BYTE_FORMAT = "IGNITE_SQL_UUID_DDL_BYTE_FORMAT"; /** Maximum size for affinity assignment history. */ @SystemProperty(value = "Maximum size for affinity assignment history", type = Integer.class, defaults = "" + DFLT_AFFINITY_HISTORY_SIZE) public static final String IGNITE_AFFINITY_HISTORY_SIZE = "IGNITE_AFFINITY_HISTORY_SIZE"; /** Maximum size for discovery messages history. */ @SystemProperty(value = "Maximum size for discovery messages history", type = Integer.class, defaults = "" + DFLT_DISCOVERY_HISTORY_SIZE) public static final String IGNITE_DISCOVERY_HISTORY_SIZE = "IGNITE_DISCOVERY_HISTORY_SIZE"; /** Maximum number of discovery message history used to support client reconnect. */ @SystemProperty(value = "Maximum number of discovery message history used to support client reconnect", type = Integer.class, defaults = "" + DFLT_DISCOVERY_CLIENT_RECONNECT_HISTORY_SIZE) public static final String IGNITE_DISCOVERY_CLIENT_RECONNECT_HISTORY_SIZE = "IGNITE_DISCOVERY_CLIENT_RECONNECT_HISTORY_SIZE"; /** Logging a warning message when metrics quantity exceeded a specified number. */ @SystemProperty(value = "Enables logging a warning message when metrics quantity exceeded a specified number", type = Integer.class, defaults = "" + DFLT_DISCOVERY_METRICS_QNT_WARN) public static final String IGNITE_DISCOVERY_METRICS_QNT_WARN = "IGNITE_DISCOVERY_METRICS_QNT_WARN"; /** Time interval that indicates that client reconnect throttle must be reset to zero. 2 minutes by default. */ @SystemProperty(value = "Time interval in milliseconds that indicates client reconnect throttle " + "must be reset to zero", type = Long.class, defaults = "" + DFLT_THROTTLE_RECONNECT_RESET_TIMEOUT_INTERVAL) public static final String CLIENT_THROTTLE_RECONNECT_RESET_TIMEOUT_INTERVAL = "CLIENT_THROTTLE_RECONNECT_RESET_TIMEOUT_INTERVAL"; /** Number of cache operation retries in case of topology exceptions. */ @SystemProperty(value = "Number of cache operation retries in case of topology exceptions", type = Integer.class, defaults = "" + DFLT_CACHE_RETRIES_COUNT) public static final String IGNITE_CACHE_RETRIES_COUNT = "IGNITE_CACHE_RETRIES_COUNT"; /** If this property is set to {@code true} then Ignite will log thread dump in case of partition exchange timeout. */ @SystemProperty("Enables logging thread dump in case of partition exchange timeout") public static final String IGNITE_THREAD_DUMP_ON_EXCHANGE_TIMEOUT = "IGNITE_THREAD_DUMP_ON_EXCHANGE_TIMEOUT"; /** */ @SystemProperty("Enables dump SPI stats to diagnostic log") public static final String IGNITE_IO_DUMP_ON_TIMEOUT = "IGNITE_IO_DUMP_ON_TIMEOUT"; /** */ @SystemProperty(value = "Enables diagnostic flag", defaults = "" + DFLT_DIAGNOSTIC_ENABLED) public static final String IGNITE_DIAGNOSTIC_ENABLED = "IGNITE_DIAGNOSTIC_ENABLED"; /** Cache operations that take more time than value of this property will be output to log. Set to {@code 0} to disable. */ @SystemProperty(value = "Cache operations that take more time than value of this property will be " + "output to log. Set to 0 to disable", type = Long.class, defaults = "" + DFLT_LONG_OPERATIONS_DUMP_TIMEOUT) public static final String IGNITE_LONG_OPERATIONS_DUMP_TIMEOUT = "IGNITE_LONG_OPERATIONS_DUMP_TIMEOUT"; /** Upper time limit between long running/hanging operations debug dumps. */ @SystemProperty(value = "Upper time limit between long running/hanging operations debug dumps " + "in milliseconds", type = Long.class, defaults = "" + DFLT_LONG_OPERATIONS_DUMP_TIMEOUT_LIMIT) public static final String IGNITE_LONG_OPERATIONS_DUMP_TIMEOUT_LIMIT = "IGNITE_LONG_OPERATIONS_DUMP_TIMEOUT_LIMIT"; /** JDBC driver cursor remove delay. */ @SystemProperty(value = "JDBC driver cursor remove delay in milliseconds", type = Long.class, defaults = "10 minutes") public static final String IGNITE_JDBC_DRIVER_CURSOR_REMOVE_DELAY = "IGNITE_JDBC_DRIVER_CURSOR_RMV_DELAY"; /** Long-long offheap map load factor. */ @SystemProperty(value = "Long-long offheap map load factor", type = Float.class, defaults = "" + DFLT_LONG_LONG_HASH_MAP_LOAD_FACTOR) public static final String IGNITE_LONG_LONG_HASH_MAP_LOAD_FACTOR = "IGNITE_LONG_LONG_HASH_MAP_LOAD_FACTOR"; /** Maximum number of nested listener calls before listener notification becomes asynchronous. */ @SystemProperty(value = "Maximum number of nested listener calls before listener notification " + "becomes asynchronous", type = Integer.class, defaults = "" + DFLT_MAX_NESTED_LISTENER_CALLS) public static final String IGNITE_MAX_NESTED_LISTENER_CALLS = "IGNITE_MAX_NESTED_LISTENER_CALLS"; /** Indicating whether local store keeps primary only. Backward compatibility flag. */ @SystemProperty("Enables local store keeps primary only. Backward compatibility flag") public static final String IGNITE_LOCAL_STORE_KEEPS_PRIMARY_ONLY = "IGNITE_LOCAL_STORE_KEEPS_PRIMARY_ONLY"; /** * Manages {@link OptimizedMarshaller} behavior of {@code serialVersionUID} computation for * {@link Serializable} classes. */ @SystemProperty("Manages OptimizedMarshaller behavior of serialVersionUID computation " + "for Serializable classes") public static final String IGNITE_OPTIMIZED_MARSHALLER_USE_DEFAULT_SUID = "IGNITE_OPTIMIZED_MARSHALLER_USE_DEFAULT_SUID"; /** * Manages type of serialization mechanism for {@link String} that is marshalled/unmarshalled by BinaryMarshaller. * Should be used for cases when a String contains a surrogate symbol without its pair one. This is frequently used * in algorithms that encrypts data in String format. */ @SystemProperty("Manages type of serialization mechanism for String that is " + "marshalled/unmarshalled by BinaryMarshaller. Should be used for cases when a String contains a surrogate " + "symbol without its pair one. This is frequently used in algorithms that encrypts data in String format") public static final String IGNITE_BINARY_MARSHALLER_USE_STRING_SERIALIZATION_VER_2 = "IGNITE_BINARY_MARSHALLER_USE_STRING_SERIALIZATION_VER_2"; /** Defines path to the file that contains list of classes allowed to safe deserialization.*/ @SystemProperty(value = "Path to the file that contains list of classes allowed to safe deserialization", type = String.class) public static final String IGNITE_MARSHALLER_WHITELIST = "IGNITE_MARSHALLER_WHITELIST"; /** Defines path to the file that contains list of classes disallowed to safe deserialization.*/ @SystemProperty(value = "Path to the file that contains list of classes disallowed to safe deserialization", type = String.class) public static final String IGNITE_MARSHALLER_BLACKLIST = "IGNITE_MARSHALLER_BLACKLIST"; /** * If set to {@code true}, then default selected keys set is used inside * {@code GridNioServer} which lead to some extra garbage generation when * processing selected keys. * <p> * Default value is {@code false}. Should be switched to {@code true} if there are * any problems in communication layer. */ @SystemProperty("Enables default selected keys set to be used inside GridNioServer " + "which lead to some extra garbage generation when processing selected keys. " + "Should be switched to true if there are any problems in communication layer") public static final String IGNITE_NO_SELECTOR_OPTS = "IGNITE_NO_SELECTOR_OPTS"; /** * System property to specify period in milliseconds between calls of the SQL statements cache cleanup task. * <p> * Cleanup tasks clears cache for terminated threads and for threads which did not perform SQL queries within * timeout configured via {@link #IGNITE_H2_INDEXING_CACHE_THREAD_USAGE_TIMEOUT} property. * <p> * Default value is {@code 10,000ms}. */ @SystemProperty(value = "Period in milliseconds between calls of the SQL " + "statements cache cleanup task. Cleanup tasks clears cache for terminated threads and for threads which did " + "not perform SQL queries within timeout configured via IGNITE_H2_INDEXING_CACHE_THREAD_USAGE_TIMEOUT" + " property", type = Long.class, defaults = "10 seconds") public static final String IGNITE_H2_INDEXING_CACHE_CLEANUP_PERIOD = "IGNITE_H2_INDEXING_CACHE_CLEANUP_PERIOD"; /** * System property to specify timeout in milliseconds after which thread's SQL statements cache is cleared by * cleanup task if the thread does not perform any query. * <p> * Default value is {@code 600,000ms}. */ @SystemProperty(value = "Timeout in milliseconds after which thread's SQL statements cache is cleared " + "by cleanup task if the thread does not perform any query", type = Long.class, defaults = "10 minutes") public static final String IGNITE_H2_INDEXING_CACHE_THREAD_USAGE_TIMEOUT = "IGNITE_H2_INDEXING_CACHE_THREAD_USAGE_TIMEOUT"; /** * Manages backward compatibility of {@link StreamTransformer#from(CacheEntryProcessor)} method. * <p> * If the property is {@code true}, then the wrapped {@link CacheEntryProcessor} won't be able to be loaded over * P2P class loading. * <p> * If the property is {@code false}, then another implementation of {@link StreamTransformer} will be returned, * that fixes P2P class loading for {@link CacheEntryProcessor}, but it will be incompatible with old versions * of Ignite. */ @SystemProperty("Enables backward compatibility of StreamTransformer.from(CacheEntryProcessor) method. " + "If the property is true, then the wrapped CacheEntryProcessor won't be able to be loaded " + "over P2P class loading. If the property is false, then another implementation of StreamTransformer " + "will be returned, that fixes P2P class loading for CacheEntryProcessor, " + "but it will be incompatible with old versions of Ignite") public static final String IGNITE_STREAM_TRANSFORMER_COMPATIBILITY_MODE = "IGNITE_STREAM_TRANSFORMER_COMPATIBILITY_MODE"; /** * When set to {@code true} tree-based data structures - {@code TreeMap} and {@code TreeSet} - will not be * wrapped into special holders introduced to overcome serialization issue caused by missing {@code Comparable} * interface on {@code BinaryObject}. * <p> * @deprecated Should be removed in Apache Ignite 2.0. */ @Deprecated @SystemProperty("If enabled then tree-based data structures - TreeMap and TreeSet - will " + "not be wrapped into special holders introduced to overcome serialization issue caused by missing " + "Comparable interface on BinaryObject") public static final String IGNITE_BINARY_DONT_WRAP_TREE_STRUCTURES = "IGNITE_BINARY_DONT_WRAP_TREE_STRUCTURES"; /** * When set to {@code true}, for consistent id will calculate by host name, without port, and you can use * only one node for host in cluster. */ @SystemProperty("Enables consistent ID to calculate by host name, without port, " + "and you can use only one node for host in cluster") public static final String IGNITE_CONSISTENT_ID_BY_HOST_WITHOUT_PORT = "IGNITE_CONSISTENT_ID_BY_HOST_WITHOUT_PORT"; /** * System property to specify consistent id of Ignite node. * <p> * Value of the system property will overwrite matched property * {@link org.apache.ignite.configuration.IgniteConfiguration#setConsistentId(Serializable)} in configuration. */ @SystemProperty(value = "Consistent ID of Ignite node. Value of the " + "system property will overwrite matched property IgniteConfiguration.setConsistentId(Serializable) " + "in configuration", type = String.class) public static final String IGNITE_OVERRIDE_CONSISTENT_ID = "IGNITE_OVERRIDE_CONSISTENT_ID"; /** */ @SystemProperty(value = "IO balance period in milliseconds", type = Long.class, defaults = "" + DFLT_IO_BALANCE_PERIOD) public static final String IGNITE_IO_BALANCE_PERIOD = "IGNITE_IO_BALANCE_PERIOD"; /** * When set to {@code true} fields are written by BinaryMarshaller in sorted order. Otherwise * the natural order is used. * <p> * @deprecated Should be removed in Apache Ignite 2.0. */ @Deprecated @SystemProperty("Enables fields to be written by BinaryMarshaller in sorted order. " + "By default, the natural order is used") public static final String IGNITE_BINARY_SORT_OBJECT_FIELDS = "IGNITE_BINARY_SORT_OBJECT_FIELDS"; /** * Whether Ignite can access unaligned memory addresses. * <p> * Defaults to {@code false}, meaning that unaligned access will be performed only on x86 architecture. */ @SystemProperty("Whether Ignite can access unaligned memory addresses. Defaults to false, " + "meaning that unaligned access will be performed only on x86 architecture") public static final String IGNITE_MEMORY_UNALIGNED_ACCESS = "IGNITE_MEMORY_UNALIGNED_ACCESS"; /** * When unsafe memory copy if performed below this threshold, Ignite will do it on per-byte basis instead of * calling to Unsafe.copyMemory(). * <p> * Defaults to 0, meaning that threshold is disabled. */ @SystemProperty(value = "When unsafe memory copy if performed below this threshold, Ignite will do it " + "on per-byte basis instead of calling to Unsafe.copyMemory(). 0 disables threshold", type = Long.class, defaults = "" + DFLT_MEMORY_PER_BYTE_COPY_THRESHOLD) public static final String IGNITE_MEMORY_PER_BYTE_COPY_THRESHOLD = "IGNITE_MEMORY_PER_BYTE_COPY_THRESHOLD"; /** * When set to {@code true} BinaryObject will be unwrapped before passing to IndexingSpi to preserve * old behavior query processor with IndexingSpi. * <p> * @deprecated Should be removed in Apache Ignite 2.0. */ @Deprecated @SystemProperty("If enabled BinaryObject will be unwrapped before passing to " + "IndexingSpi to preserve old behavior query processor with IndexingSpi") public static final String IGNITE_UNWRAP_BINARY_FOR_INDEXING_SPI = "IGNITE_UNWRAP_BINARY_FOR_INDEXING_SPI"; /** * System property to specify maximum payload size in bytes for {@code H2TreeIndex}. * <p> * Defaults to {@code 0}, meaning that inline index store is disabled. */ @SystemProperty(value = "Maximum payload size in bytes for H2TreeIndex. " + "0 means that inline index store is disabled", type = Integer.class, defaults = "10") public static final String IGNITE_MAX_INDEX_PAYLOAD_SIZE = "IGNITE_MAX_INDEX_PAYLOAD_SIZE"; /** * Time interval for calculating rebalance rate statistics, in milliseconds. Defaults to 60000. * @deprecated Use {@link MetricsMxBean#configureHitRateMetric(String, long)} instead. */ @Deprecated @SystemProperty(value = "Time interval for calculating rebalance rate statistics, in milliseconds", type = Integer.class, defaults = "60000") public static final String IGNITE_REBALANCE_STATISTICS_TIME_INTERVAL = "IGNITE_REBALANCE_STATISTICS_TIME_INTERVAL"; /** * When cache has entries with expired TTL, each user operation will also remove this amount of expired entries. * Defaults to {@code 5}. */ @SystemProperty(value = "When cache has entries with expired TTL, each user operation will also " + "remove this amount of expired entries", type = Integer.class, defaults = "" + DFLT_TTL_EXPIRE_BATCH_SIZE) public static final String IGNITE_TTL_EXPIRE_BATCH_SIZE = "IGNITE_TTL_EXPIRE_BATCH_SIZE"; /** * Indexing discovery history size. Protects from duplicate messages maintaining the list of IDs of recently * arrived discovery messages. * <p> * Defaults to {@code 1000}. */ @SystemProperty(value = "Indexing discovery history size. Protects from duplicate messages " + "maintaining the list of IDs of recently arrived discovery messages", type = Integer.class, defaults = "" + DFLT_INDEXING_DISCOVERY_HISTORY_SIZE) public static final String IGNITE_INDEXING_DISCOVERY_HISTORY_SIZE = "IGNITE_INDEXING_DISCOVERY_HISTORY_SIZE"; /** Cache start size for on-heap maps. Defaults to 4096. */ @SystemProperty(value = "Cache start size for on-heap maps", type = Integer.class, defaults = "" + DFLT_CACHE_START_SIZE) public static final String IGNITE_CACHE_START_SIZE = "IGNITE_CACHE_START_SIZE"; /** */ @SystemProperty("Enables local start all existing caches on client node start") public static final String IGNITE_START_CACHES_ON_JOIN = "IGNITE_START_CACHES_ON_JOIN"; /** * Skip CRC calculation flag. */ @SystemProperty("Skip CRC calculation flag") public static final String IGNITE_PDS_SKIP_CRC = "IGNITE_PDS_SKIP_CRC"; /** * WAL rebalance threshold. */ @Deprecated @SystemProperty(value = "PDS partition destroy checkpoint delay", type = Integer.class) public static final String IGNITE_PDS_PARTITION_DESTROY_CHECKPOINT_DELAY = "IGNITE_PDS_PARTITION_DESTROY_CHECKPOINT_DELAY"; /** * WAL rebalance threshold. */ @SystemProperty(value = "WAL rebalance threshold", type = Integer.class, defaults = "" + DFLT_PDS_WAL_REBALANCE_THRESHOLD) public static final String IGNITE_PDS_WAL_REBALANCE_THRESHOLD = "IGNITE_PDS_WAL_REBALANCE_THRESHOLD"; /** * Prefer historical rebalance if there's enough history regardless off all heuristics. * This property is intended for integration or performance tests. * Default is {@code false}. */ @SystemProperty("Prefer historical rebalance if there's enough history regardless off all heuristics. " + "This property is intended for integration or performance tests") public static final String IGNITE_PREFER_WAL_REBALANCE = "IGNITE_PREFER_WAL_REBALANCE"; /** Ignite page memory concurrency level. */ @SystemProperty(value = "Ignite page memory concurrency level", type = Integer.class) public static final String IGNITE_OFFHEAP_LOCK_CONCURRENCY_LEVEL = "IGNITE_OFFHEAP_LOCK_CONCURRENCY_LEVEL"; /** * When set to {@code true}, Ignite switches to compatibility mode with versions that don't * support service security permissions. In this case security permissions will be ignored * (if they set). * <p> * Default is {@code false}, which means that service security permissions will be respected. * </p> */ @SystemProperty("Enables Ignite to switch to compatibility mode with versions that " + "don't support service security permissions. In this case security permissions will be ignored (if they set)." + " Default is false, which means that service security permissions will be respected") public static final String IGNITE_SECURITY_COMPATIBILITY_MODE = "IGNITE_SECURITY_COMPATIBILITY_MODE"; /** * Ignite cluster name. * <p> * Defaults to utility cache deployment ID.. */ @SystemProperty(value = "Ignite cluster name. Defaults to utility cache deployment ID", type = String.class) public static final String IGNITE_CLUSTER_NAME = "IGNITE_CLUSTER_NAME"; /** * When client cache is started or closed special discovery message is sent to notify cluster (for example this is * needed for {@link ClusterGroup#forCacheNodes(String)} API. This timeout specifies how long to wait * after client cache start/close before sending this message. If during this timeout another client * cache changed, these events are combined into single message. * <p> * Default is 10 seconds. */ @SystemProperty(value = "When client cache is started or closed special discovery message is sent " + "to notify cluster (for example this is needed for ClusterGroup.forCacheNodes(String) API. This timeout " + "in milliseconds specifies how long to wait after client cache start/close before sending this message. If " + "during this timeout another client cache changed, these events are combined into single message", type = Long.class, defaults = "" + DFLT_CLIENT_CACHE_CHANGE_MESSAGE_TIMEOUT) public static final String IGNITE_CLIENT_CACHE_CHANGE_MESSAGE_TIMEOUT = "IGNITE_CLIENT_CACHE_CHANGE_MESSAGE_TIMEOUT"; /** * If a partition release future completion time during an exchange exceeds this threshold, the contents of * the future will be dumped to the log on exchange. Default is {@code 0} (disabled). */ @SystemProperty(value = "If a partition release future completion time during an exchange exceeds " + "this threshold (in milliseconds), the contents of the future will be dumped to the log on exchange. " + "0 means disabled", type = Integer.class, defaults = "" + DFLT_PARTITION_RELEASE_FUTURE_DUMP_THRESHOLD) public static final String IGNITE_PARTITION_RELEASE_FUTURE_DUMP_THRESHOLD = "IGNITE_PARTITION_RELEASE_FUTURE_DUMP_THRESHOLD"; /** * If this property is set, a node will forcible fail a remote node when it fails to establish a communication * connection. */ @SystemProperty("Enables node to forcible fail a remote node when it " + "fails to establish a communication connection") public static final String IGNITE_ENABLE_FORCIBLE_NODE_KILL = "IGNITE_ENABLE_FORCIBLE_NODE_KILL"; /** * Tasks stealing will be started if tasks queue size per data-streamer thread exceeds this threshold. * <p> * Default value is {@code 4}. */ @SystemProperty(value = "Tasks stealing will be started if tasks queue size per data-streamer thread " + "exceeds this threshold", type = Integer.class, defaults = "" + DFLT_DATA_STREAMING_EXECUTOR_SERVICE_TASKS_STEALING_THRESHOLD) public static final String IGNITE_DATA_STREAMING_EXECUTOR_SERVICE_TASKS_STEALING_THRESHOLD = "IGNITE_DATA_STREAMING_EXECUTOR_SERVICE_TASKS_STEALING_THRESHOLD"; /** * If this property is set, then Ignite will use Async File IO factory by default. */ @SystemProperty(value = "If this property is set, then Ignite will use Async File IO factory by default", defaults = "" + DFLT_USE_ASYNC_FILE_IO_FACTORY) public static final String IGNITE_USE_ASYNC_FILE_IO_FACTORY = "IGNITE_USE_ASYNC_FILE_IO_FACTORY"; /** * If the property is set {@link org.apache.ignite.internal.pagemem.wal.record.TxRecord} records * will be logged to WAL. * * Default value is {@code false}. */ @SystemProperty("If the property is set org.apache.ignite.internal.pagemem.wal.record.TxRecord records " + "will be logged to WAL") public static final String IGNITE_WAL_LOG_TX_RECORDS = "IGNITE_WAL_LOG_TX_RECORDS"; /** Max amount of remembered errors for {@link GridLogThrottle}. */ @SystemProperty(value = "Max amount of remembered errors for GridLogThrottle", type = Integer.class, defaults = "" + DFLT_LOG_THROTTLE_CAPACITY) public static final String IGNITE_LOG_THROTTLE_CAPACITY = "IGNITE_LOG_THROTTLE_CAPACITY"; /** * If this property is set, {@link DataStorageConfiguration#setWriteThrottlingEnabled(boolean)} * will be overridden to {@code true} regardless the initial value in the configuration. */ @SystemProperty(value = "Checkpoint throttling policy", type = String.class) public static final String IGNITE_OVERRIDE_WRITE_THROTTLING_ENABLED = "IGNITE_OVERRIDE_WRITE_THROTTLING_ENABLED"; /** * Property for setup WAL serializer version. */ @SystemProperty(value = "WAL serializer version", type = Integer.class, defaults = "" + LATEST_SERIALIZER_VERSION) public static final String IGNITE_WAL_SERIALIZER_VERSION = "IGNITE_WAL_SERIALIZER_VERSION"; /** Property for setup Ignite WAL segment sync timeout. */ @SystemProperty(value = "WAL segment sync timeout in milliseconds", type = Long.class, defaults = "" + DFLT_WAL_SEGMENT_SYNC_TIMEOUT) public static final String IGNITE_WAL_SEGMENT_SYNC_TIMEOUT = "IGNITE_WAL_SEGMENT_SYNC_TIMEOUT"; /** * If the property is set Ignite will use legacy node comparator (based on node order) inste * * Default value is {@code false}. */ @SystemProperty("Enables usage of legacy node comparator (based on node order)") public static final String IGNITE_USE_LEGACY_NODE_COMPARATOR = "IGNITE_USE_LEGACY_NODE_COMPARATOR"; /** * Property that indicates should be mapped byte buffer used or not. * Possible values: {@code true} and {@code false}. */ @SystemProperty(value = "Enables usage of the mapped byte buffer", defaults = "" + DFLT_WAL_MMAP) public static final String IGNITE_WAL_MMAP = "IGNITE_WAL_MMAP"; /** * When set to {@code true}, Data store folders are generated only by consistent id, and no consistent ID will be * set based on existing data store folders. This option also enables compatible folder generation mode as it was * before 2.3. */ @SystemProperty("When set to true, Data store folders are generated only by consistent id, " + "and no consistent ID will be set based on existing data store folders. This option also enables compatible " + "folder generation mode as it was before 2.3") public static final String IGNITE_DATA_STORAGE_FOLDER_BY_CONSISTENT_ID = "IGNITE_DATA_STORAGE_FOLDER_BY_CONSISTENT_ID"; /** Ignite JVM pause detector disabled. */ @SystemProperty("Disables JVM pause detector") public static final String IGNITE_JVM_PAUSE_DETECTOR_DISABLED = "IGNITE_JVM_PAUSE_DETECTOR_DISABLED"; /** Ignite JVM pause detector precision. */ @SystemProperty(value = "JVM pause detector precision", type = Integer.class, defaults = "" + DFLT_JVM_PAUSE_DETECTOR_PRECISION) public static final String IGNITE_JVM_PAUSE_DETECTOR_PRECISION = "IGNITE_JVM_PAUSE_DETECTOR_PRECISION"; /** Ignite JVM pause detector threshold. */ @SystemProperty(value = "JVM pause detector threshold", type = Integer.class, defaults = "" + DEFAULT_JVM_PAUSE_DETECTOR_THRESHOLD) public static final String IGNITE_JVM_PAUSE_DETECTOR_THRESHOLD = "IGNITE_JVM_PAUSE_DETECTOR_THRESHOLD"; /** Ignite JVM pause detector last events count. */ @SystemProperty(value = "JVM pause detector last events count", type = Integer.class, defaults = "" + DFLT_JVM_PAUSE_DETECTOR_LAST_EVENTS_COUNT) public static final String IGNITE_JVM_PAUSE_DETECTOR_LAST_EVENTS_COUNT = "IGNITE_JVM_PAUSE_DETECTOR_LAST_EVENTS_COUNT"; /** * Default value is {@code false}. * * @deprecated Not used. */ @Deprecated @SystemProperty("Enables WAL debug log on recovery") public static final String IGNITE_WAL_DEBUG_LOG_ON_RECOVERY = "IGNITE_WAL_DEBUG_LOG_ON_RECOVERY"; /** * Number of checkpoint history entries held in memory. */ @SystemProperty(value = "Number of checkpoint history entries held in memory", type = Integer.class, defaults = "" + DFLT_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE) public static final String IGNITE_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE = "IGNITE_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE"; /** * If this property is set to {@code true} enable logging in {@link GridClient}. */ @SystemProperty("Enables logging in GridClient") public static final String IGNITE_GRID_CLIENT_LOG_ENABLED = "IGNITE_GRID_CLIENT_LOG_ENABLED"; /** * When set to {@code true}, direct IO may be enabled. Direct IO enabled only if JAR file with corresponding * feature is available in classpath and OS and filesystem settings allows to enable this mode. * Default is {@code true}. */ @SystemProperty(value = "Enables direct IO. Direct IO enabled only if JAR " + "file with corresponding feature is available in classpath and OS and filesystem settings allows to enable " + "this mode", defaults = "true") public static final String IGNITE_DIRECT_IO_ENABLED = "IGNITE_DIRECT_IO_ENABLED"; /** * When set to {@code true}, warnings that are intended for development environments and not for production * (such as coding mistakes in code using Ignite) will not be logged. */ @SystemProperty("Enables development environments warnings") public static final String IGNITE_DEV_ONLY_LOGGING_DISABLED = "IGNITE_DEV_ONLY_LOGGING_DISABLED"; /** * When set to {@code true} (default), pages are written to page store without holding segment lock (with delay). * Because other thread may require exactly the same page to be loaded from store, reads are protected by locking. */ @SystemProperty(value = "When set to true (default), pages are written to page store without " + "holding segment lock (with delay). Because other thread may require exactly the same page to be loaded " + "from store, reads are protected by locking", defaults = "" + DFLT_DELAYED_REPLACED_PAGE_WRITE) public static final String IGNITE_DELAYED_REPLACED_PAGE_WRITE = "IGNITE_DELAYED_REPLACED_PAGE_WRITE"; /** * When set to {@code true}, WAL implementation with dedicated worker will be used even in FSYNC mode. * Default is {@code false}. */ @SystemProperty("When set to true, WAL implementation with dedicated worker will be used " + "even in FSYNC mode") public static final String IGNITE_WAL_FSYNC_WITH_DEDICATED_WORKER = "IGNITE_WAL_FSYNC_WITH_DEDICATED_WORKER"; /** * When set to {@code true}, on-heap cache cannot be enabled - see * {@link CacheConfiguration#setOnheapCacheEnabled(boolean)}. * Default is {@code false}. */ @SystemProperty("When set to true, on-heap cache cannot be enabled - see " + "CacheConfiguration.setOnheapCacheEnabled(boolean)") public static final String IGNITE_DISABLE_ONHEAP_CACHE = "IGNITE_DISABLE_ONHEAP_CACHE"; /** * When set to {@code false}, loaded pages implementation is switched to previous version of implementation, * FullPageIdTable. {@code True} value enables 'Robin Hood hashing: backward shift deletion'. * Default is {@code true}. */ @SystemProperty(value = "When set to false, loaded pages implementation is switched to previous " + "version of implementation, FullPageIdTable. True value enables 'Robin Hood hashing: backward shift " + "deletion'", defaults = "" + DFLT_LOADED_PAGES_BACKWARD_SHIFT_MAP) public static final String IGNITE_LOADED_PAGES_BACKWARD_SHIFT_MAP = "IGNITE_LOADED_PAGES_BACKWARD_SHIFT_MAP"; /** * Property for setup percentage of archive size for checkpoint trigger. Default value is 0.25 */ @SystemProperty(value = "Percentage of archive size for checkpoint trigger", type = Double.class, defaults = "" + DFLT_CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE) public static final String IGNITE_CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE = "IGNITE_CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE"; /** * Property for setup percentage of WAL archive size to calculate threshold since which removing of old archive should be started. * Default value is 0.5 */ @SystemProperty(value = "Percentage of WAL archive size to calculate threshold " + "since which removing of old archive should be started", type = Double.class, defaults = "" + DFLT_THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE) public static final String IGNITE_THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE = "IGNITE_THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE"; /** * Threshold time (in millis) to print warning to log if waiting for next wal segment took longer than the threshold. * * Default value is 1000 ms. */ @SystemProperty(value = "Threshold time (in millis) to print warning to log if waiting for next wal " + "segment took longer than the threshold", type = Long.class, defaults = DFLT_THRESHOLD_WAIT_TIME_NEXT_WAL_SEGMENT + " milliseconds") public static final String IGNITE_THRESHOLD_WAIT_TIME_NEXT_WAL_SEGMENT = "IGNITE_THRESHOLD_WAIT_TIME_NEXT_WAL_SEGMENT"; /** * Count of WAL compressor worker threads. Default value is 4. */ @SystemProperty(value = "Count of WAL compressor worker threads", type = Integer.class, defaults = "" + DFLT_WAL_COMPRESSOR_WORKER_THREAD_CNT) public static final String IGNITE_WAL_COMPRESSOR_WORKER_THREAD_CNT = "IGNITE_WAL_COMPRESSOR_WORKER_THREAD_CNT"; /** * Whenever read load balancing is enabled, that means 'get' requests will be distributed between primary and backup * nodes if it is possible and {@link CacheConfiguration#isReadFromBackup()} is {@code true}. * * Default is {@code true}. * * @see CacheConfiguration#isReadFromBackup() */ @SystemProperty(value = "Enables read load balancing, that means 'get' requests will be distributed " + "between primary and backup nodes if it is possible and CacheConfiguration.isReadFromBackup() is true. ", defaults = "" + DFLT_READ_LOAD_BALANCING) public static final String IGNITE_READ_LOAD_BALANCING = "IGNITE_READ_LOAD_BALANCING"; /** * Number of repetitions to capture a lock in the B+Tree. */ @SystemProperty(value = "Number of repetitions to capture a lock in the B+Tree", type = Integer.class, defaults = "" + IGNITE_BPLUS_TREE_LOCK_RETRIES_DEFAULT) public static final String IGNITE_BPLUS_TREE_LOCK_RETRIES = "IGNITE_BPLUS_TREE_LOCK_RETRIES"; /** * Amount of memory reserved in the heap at node start, which can be dropped to increase the chances of success when * handling OutOfMemoryError. * * Default is {@code 64kb}. */ @SystemProperty(value = "Amount of memory reserved in the heap at node start, which can be dropped " + "to increase the chances of success when handling OutOfMemoryError", type = Integer.class, defaults = "" + DFLT_FAILURE_HANDLER_RESERVE_BUFFER_SIZE) public static final String IGNITE_FAILURE_HANDLER_RESERVE_BUFFER_SIZE = "IGNITE_FAILURE_HANDLER_RESERVE_BUFFER_SIZE"; /** * The threshold of uneven distribution above which partition distribution will be logged. * * The default is '50', that means: warn about nodes with 50+% difference. */ @SystemProperty(value = "The threshold of uneven distribution above which partition distribution " + "will be logged", type = Float.class, defaults = "" + DFLT_PART_DISTRIBUTION_WARN_THRESHOLD) public static final String IGNITE_PART_DISTRIBUTION_WARN_THRESHOLD = "IGNITE_PART_DISTRIBUTION_WARN_THRESHOLD"; /** * When set to {@code false}, WAL will not be automatically disabled during rebalancing if there is no partition in * OWNING state. * Default is {@code true}. */ @SystemProperty(value = "When set to false, WAL will not be automatically disabled during " + "rebalancing if there is no partition in OWNING state", defaults = "" + DFLT_DISABLE_WAL_DURING_REBALANCING) public static final String IGNITE_DISABLE_WAL_DURING_REBALANCING = "IGNITE_DISABLE_WAL_DURING_REBALANCING"; /** * When property is set {@code false} each next exchange will try to compare with previous. * If last rebalance is equivalent with new possible one, new rebalance does not trigger. * Set the property {@code true} and each exchange will try to trigger new rebalance. * * Default is {@code false}. */ @SystemProperty("When property is set false each next exchange will try to compare with previous. " + "If last rebalance is equivalent with new possible one, new rebalance does not trigger. " + "Set the property true and each exchange will try to trigger new rebalance") public static final String IGNITE_DISABLE_REBALANCING_CANCELLATION_OPTIMIZATION = "IGNITE_DISABLE_REBALANCING_CANCELLATION_OPTIMIZATION"; /** * Sets timeout for TCP client recovery descriptor reservation. */ @SystemProperty(value = "Timeout for TCP client recovery descriptor reservation in milliseconds", type = Long.class, defaults = "" + DFLT_NIO_RECOVERY_DESCRIPTOR_RESERVATION_TIMEOUT) public static final String IGNITE_NIO_RECOVERY_DESCRIPTOR_RESERVATION_TIMEOUT = "IGNITE_NIO_RECOVERY_DESCRIPTOR_RESERVATION_TIMEOUT"; /** * When set to {@code true}, Ignite will skip partitions sizes check on partition validation after rebalance has finished. * Partitions sizes may differs on nodes when Expiry Policy is in use and it is ok due to lazy entry eviction mechanics. * * There is no need to disable partition size validation either in normal case or when expiry policy is configured for cache. * But it should be disabled manually when policy is used on per entry basis to hint Ignite to skip this check. * * Default is {@code false}. */ @SystemProperty("Enables Ignite to skip partitions sizes check on partition " + "validation after rebalance has finished. Partitions sizes may differs on nodes when Expiry Policy is in " + "use and it is ok due to lazy entry eviction mechanics. There is no need to disable partition size " + "validation either in normal case or when expiry policy is configured for cache. But it should be disabled " + "manually when policy is used on per entry basis to hint Ignite to skip this check") public static final String IGNITE_SKIP_PARTITION_SIZE_VALIDATION = "IGNITE_SKIP_PARTITION_SIZE_VALIDATION"; /** * Enables threads dumping on critical node failure. * * Default is {@code true}. */ @SystemProperty("Enables threads dumping on critical node failure") public static final String IGNITE_DUMP_THREADS_ON_FAILURE = "IGNITE_DUMP_THREADS_ON_FAILURE"; /** * Throttling time out for thread dump generation during failure handling. * * Default is failure detection timeout. {@code 0} or negative value - throttling is disabled. */ @SystemProperty(value = "Throttling time out for thread dump generation during failure handling " + "in milliseconds. Default is failure detection timeout. 0 or negative value - throttling is disabled", type = Long.class) public static final String IGNITE_DUMP_THREADS_ON_FAILURE_THROTTLING_TIMEOUT = "IGNITE_DUMP_THREADS_ON_FAILURE_THROTTLING_TIMEOUT"; /** * Throttling timeout in millis which avoid excessive PendingTree access on unwind if there is nothing to clean yet. * * Default is 500 ms. */ @SystemProperty(value = "Throttling timeout in milliseconds which avoid excessive PendingTree access on " + "unwind if there is nothing to clean yet", type = Long.class, defaults = "" + DFLT_UNWIND_THROTTLING_TIMEOUT) public static final String IGNITE_UNWIND_THROTTLING_TIMEOUT = "IGNITE_UNWIND_THROTTLING_TIMEOUT"; /** * Threshold for throttling operations logging. */ @SystemProperty(value = "Threshold in seconds for throttling operations logging", type = Integer.class, defaults = "" + DFLT_THROTTLE_LOG_THRESHOLD) public static final String IGNITE_THROTTLE_LOG_THRESHOLD = "IGNITE_THROTTLE_LOG_THRESHOLD"; /** * Number of concurrent operation for evict partitions. * * @deprecated Since version 2.10. Use {@link IgniteConfiguration#setRebalanceThreadPoolSize(int)} to manage * eviction parallelism. */ @Deprecated @SystemProperty(value = "Number of concurrent operation for evict partitions", type = Integer.class) public static final String IGNITE_EVICTION_PERMITS = "IGNITE_EVICTION_PERMITS"; /** * When set to {@code true}, Ignite will allow execute DML operation (MERGE|INSERT|UPDATE|DELETE) * within transaction for non MVCC mode. * * Default is {@code false}. */ @SystemProperty("When set to true, Ignite will allow execute DML operation " + "(MERGE|INSERT|UPDATE|DELETE) within transaction for non MVCC mode") public static final String IGNITE_ALLOW_DML_INSIDE_TRANSACTION = "IGNITE_ALLOW_DML_INSIDE_TRANSACTION"; /** * Timeout between ZooKeeper client retries, default 2s. */ @SystemProperty(value = "Timeout between ZooKeeper client retries in milliseconds", type = Long.class, defaults = "2 seconds") public static final String IGNITE_ZOOKEEPER_DISCOVERY_RETRY_TIMEOUT = "IGNITE_ZOOKEEPER_DISCOVERY_RETRY_TIMEOUT"; /** * Number of attempts to reconnect to ZooKeeper. */ @SystemProperty(value = "Number of attempts to reconnect to ZooKeeper", type = Integer.class, defaults = "10") public static final String IGNITE_ZOOKEEPER_DISCOVERY_MAX_RETRY_COUNT = "IGNITE_ZOOKEEPER_DISCOVERY_MAX_RETRY_COUNT"; /** * Maximum number for cached MVCC transaction updates. This caching is used for continuous query with MVCC caches. */ @SystemProperty(value = "Maximum number for cached MVCC transaction updates. This caching is used " + "for continuous query with MVCC caches", type = Integer.class, defaults = "" + DFLT_MVCC_TX_SIZE_CACHING_THRESHOLD) public static final String IGNITE_MVCC_TX_SIZE_CACHING_THRESHOLD = "IGNITE_MVCC_TX_SIZE_CACHING_THRESHOLD"; /** * Try reuse memory on deactivation. Useful in case of huge page memory region size. */ @SystemProperty("Try reuse memory on deactivation") public static final String IGNITE_REUSE_MEMORY_ON_DEACTIVATE = "IGNITE_REUSE_MEMORY_ON_DEACTIVATE"; /** * Maximum inactivity period for system worker in milliseconds. When this value is exceeded, worker is considered * blocked with consequent critical failure handler invocation. */ @SystemProperty(value = "Maximum inactivity period for system worker in milliseconds. When this " + "value is exceeded, worker is considered blocked with consequent critical failure handler invocation", type = Long.class) public static final String IGNITE_SYSTEM_WORKER_BLOCKED_TIMEOUT = "IGNITE_SYSTEM_WORKER_BLOCKED_TIMEOUT"; /** * Timeout for checkpoint read lock acquisition in milliseconds. */ @SystemProperty(value = "Timeout for checkpoint read lock acquisition in milliseconds", type = Long.class) public static final String IGNITE_CHECKPOINT_READ_LOCK_TIMEOUT = "IGNITE_CHECKPOINT_READ_LOCK_TIMEOUT"; /** * Timeout for waiting schema update if schema was not found for last accepted version. */ @SystemProperty(value = "Timeout for waiting schema update if schema was not found for last accepted " + "version in milliseconds", type = Long.class, defaults = "" + DFLT_WAIT_SCHEMA_UPDATE) public static final String IGNITE_WAIT_SCHEMA_UPDATE = "IGNITE_WAIT_SCHEMA_UPDATE"; /** * System property to override {@link CacheConfiguration#getRebalanceThrottle} configuration property for all caches. * {@code 0} by default, which means that override is disabled. * @deprecated Use {@link IgniteConfiguration#getRebalanceThrottle()} instead. */ @Deprecated @SystemProperty(value = "Overrides CacheConfiguration.getRebalanceThrottle " + "configuration property for all caches in milliseconds. 0 by default, which means that override is disabled", type = Long.class, defaults = "0") public static final String IGNITE_REBALANCE_THROTTLE_OVERRIDE = "IGNITE_REBALANCE_THROTTLE_OVERRIDE"; /** * Enables start caches in parallel. * * Default is {@code true}. */ @SystemProperty(value = "Enables start caches in parallel", defaults = "" + DFLT_ALLOW_START_CACHES_IN_PARALLEL) public static final String IGNITE_ALLOW_START_CACHES_IN_PARALLEL = "IGNITE_ALLOW_START_CACHES_IN_PARALLEL"; /** For test purposes only. Force Mvcc mode. */ @SystemProperty("For test purposes only. Force Mvcc mode") public static final String IGNITE_FORCE_MVCC_MODE_IN_TESTS = "IGNITE_FORCE_MVCC_MODE_IN_TESTS"; /** * Allows to log additional information about all restored partitions after binary and logical recovery phases. * * Default is {@code true}. */ @SystemProperty("Allows to log additional information about all restored partitions after " + "binary and logical recovery phases") public static final String IGNITE_RECOVERY_VERBOSE_LOGGING = "IGNITE_RECOVERY_VERBOSE_LOGGING"; /** * Disables cache interceptor triggering in case of conflicts. * * Default is {@code false}. */ @SystemProperty("Disables cache interceptor triggering in case of conflicts") public static final String IGNITE_DISABLE_TRIGGERING_CACHE_INTERCEPTOR_ON_CONFLICT = "IGNITE_DISABLE_TRIGGERING_CACHE_INTERCEPTOR_ON_CONFLICT"; /** * Sets default {@link CacheConfiguration#setDiskPageCompression disk page compression}. */ @SystemProperty(value = "Disk page compression - CacheConfiguration#setDiskPageCompression", type = DiskPageCompression.class) public static final String IGNITE_DEFAULT_DISK_PAGE_COMPRESSION = "IGNITE_DEFAULT_DISK_PAGE_COMPRESSION"; /** * Sets default {@link DataStorageConfiguration#setPageSize storage page size}. */ @SystemProperty(value = "Storage page size - DataStorageConfiguration#setPageSize", type = Integer.class) public static final String IGNITE_DEFAULT_DATA_STORAGE_PAGE_SIZE = "IGNITE_DEFAULT_DATA_STORAGE_PAGE_SIZE"; /** * Manages the type of the implementation of the service processor (implementation of the {@link IgniteServices}). * All nodes in the cluster must have the same value of this property. * <p/> * If the property is {@code true} then event-driven implementation of the service processor will be used. * <p/> * If the property is {@code false} then internal cache based implementation of service processor will be used. * <p/> * Default is {@code true}. */ @SystemProperty(value = "Manages the type of the implementation of the service processor " + "(implementation of the IgniteServices). All nodes in the cluster must have the same value of this property. " + "If the property is true then event-driven implementation of the service processor will be used. If the " + "property is false then internal cache based implementation of service processor will be used", defaults = "" + DFLT_EVENT_DRIVEN_SERVICE_PROCESSOR_ENABLED) public static final String IGNITE_EVENT_DRIVEN_SERVICE_PROCESSOR_ENABLED = "IGNITE_EVENT_DRIVEN_SERVICE_PROCESSOR_ENABLED"; /** * When set to {@code true}, cache metrics are not included into the discovery metrics update message (in this * case message contains only cluster metrics). By default cache metrics are included into the message and * calculated each time the message is sent. * <p> * Cache metrics sending can also be turned off by disabling statistics per each cache, but in this case some cache * metrics will be unavailable via JMX too. */ @SystemProperty("When set to true, cache metrics are not included into the discovery metrics " + "update message (in this case message contains only cluster metrics). By default cache metrics are included " + "into the message and calculated each time the message is sent. Cache metrics sending can also be turned off " + "by disabling statistics per each cache, but in this case some cache metrics will be unavailable via JMX too") public static final String IGNITE_DISCOVERY_DISABLE_CACHE_METRICS_UPDATE = "IGNITE_DISCOVERY_DISABLE_CACHE_METRICS_UPDATE"; /** * Maximum number of different partitions to be extracted from between expression within sql query. * In case of limit exceeding all partitions will be used. */ @SystemProperty(value = "Maximum number of different partitions to be extracted from between " + "expression within sql query. In case of limit exceeding all partitions will be used", type = Integer.class, defaults = "16") public static final String IGNITE_SQL_MAX_EXTRACTED_PARTS_FROM_BETWEEN = "IGNITE_SQL_MAX_EXTRACTED_PARTS_FROM_BETWEEN"; /** * Maximum amount of bytes that can be stored in history of {@link DistributedMetaStorage} updates. */ @SystemProperty(value = "Maximum amount of bytes that can be stored in history of DistributedMetaStorage updates", type = Long.class, defaults = "" + DFLT_MAX_HISTORY_BYTES) public static final String IGNITE_GLOBAL_METASTORAGE_HISTORY_MAX_BYTES = "IGNITE_GLOBAL_METASTORAGE_HISTORY_MAX_BYTES"; /** * Size threshold to allocate and retain additional HashMap to improve contains() * which leads to extra memory consumption. */ @SystemProperty(value = "Size threshold to allocate and retain additional HashMap to improve " + "contains() which leads to extra memory consumption", type = Integer.class, defaults = "" + DFLT_AFFINITY_BACKUPS_THRESHOLD) public static final String IGNITE_AFFINITY_BACKUPS_THRESHOLD = "IGNITE_AFFINITY_BACKUPS_THRESHOLD"; /** * Flag to disable memory optimization: * BitSets instead of HashSets to store partitions. * When number of backups per partion is > IGNITE_AFFINITY_BACKUPS_THRESHOLD we use HashMap to improve contains() * which leads to extra memory consumption, otherwise we use view on the * list of cluster nodes to reduce memory consumption on redundant data structures. */ @SystemProperty("Disables memory optimization: BitSets instead of HashSets to store " + "partitions. When number of backups per partitions is > IGNITE_AFFINITY_BACKUPS_THRESHOLD we use HashMap to " + "improve contains() which leads to extra memory consumption, otherwise we use view on the list of cluster " + "nodes to reduce memory consumption on redundant data structures") public static final String IGNITE_DISABLE_AFFINITY_MEMORY_OPTIMIZATION = "IGNITE_DISABLE_AFFINITY_MEMORY_OPTIMIZATION"; /** * Limit the maximum number of objects in memory during the recovery procedure. */ @SystemProperty(value = "Limit the maximum number of objects in memory during the recovery procedure", type = Integer.class) public static final String IGNITE_RECOVERY_SEMAPHORE_PERMITS = "IGNITE_RECOVERY_SEMAPHORE_PERMITS"; /** * Maximum size of history of server nodes (server node IDs) that ever joined to current topology. */ @SystemProperty(value = "Maximum size of history of server nodes (server node IDs) that ever joined " + "to current topology", type = Integer.class, defaults = "" + DFLT_NODE_IDS_HISTORY_SIZE) public static final String IGNITE_NODE_IDS_HISTORY_SIZE = "IGNITE_NODE_IDS_HISTORY_SIZE"; /** * Maximum number of diagnostic warning messages per category, when waiting for PME. */ @SystemProperty(value = "Maximum number of diagnostic warning messages per category, when waiting for PME", type = Integer.class, defaults = "" + DFLT_DIAGNOSTIC_WARN_LIMIT) public static final String IGNITE_DIAGNOSTIC_WARN_LIMIT = "IGNITE_DIAGNOSTIC_WARN_LIMIT"; /** * Flag to enable triggering failure handler for node if unrecoverable partition inconsistency is * discovered during partition update counters exchange. */ @SystemProperty("Enables triggering failure handler for node if unrecoverable " + "partition inconsistency is discovered during partition update counters exchange") public static final String IGNITE_FAIL_NODE_ON_UNRECOVERABLE_PARTITION_INCONSISTENCY = "IGNITE_FAIL_NODE_ON_UNRECOVERABLE_PARTITION_INCONSISTENCY"; /** * Allow use composite _key, _val columns at the INSERT/UPDATE/MERGE statements. */ @SystemProperty("Allow use composite _key, _val columns at the INSERT/UPDATE/MERGE statements") public static final String IGNITE_SQL_ALLOW_KEY_VAL_UPDATES = "IGNITE_SQL_ALLOW_KEY_VAL_UPDATES"; /** * Interval between logging of time of next auto-adjust. */ @SystemProperty(value = "Interval between logging of time of next auto-adjust in milliseconds", type = Long.class, defaults = "" + DFLT_BASELINE_AUTO_ADJUST_LOG_INTERVAL) public static final String IGNITE_BASELINE_AUTO_ADJUST_LOG_INTERVAL = "IGNITE_BASELINE_AUTO_ADJUST_LOG_INTERVAL"; /** * Starting from this number of dirty pages in checkpoint, array will be sorted with * {@link Arrays#parallelSort(Comparable[])} in case of {@link CheckpointWriteOrder#SEQUENTIAL}. */ @SystemProperty(value = "Starting from this number of dirty pages in checkpoint, array will be " + "sorted with Arrays.parallelSort(Comparable[]) in case of CheckpointWriteOrder.SEQUENTIAL", type = Integer.class, defaults = "" + DFLT_CHECKPOINT_PARALLEL_SORT_THRESHOLD) public static final String CHECKPOINT_PARALLEL_SORT_THRESHOLD = "CHECKPOINT_PARALLEL_SORT_THRESHOLD"; /** * Keep static cache configuration even if stored cache data differs from the static config. When this property * is set, static cache configuration will override persisted configuration. DDL operations are not allowed * when this system property is set. */ @SystemProperty("Keep static cache configuration even if stored cache data differs from " + "the static config. When this property is set, static cache configuration will override persisted " + "configuration. DDL operations are not allowed when this system property is set") public static final String IGNITE_KEEP_STATIC_CACHE_CONFIGURATION = "IGNITE_KEEP_STATIC_CACHE_CONFIGURATION"; /** Enable backward compatible to use 'IGNITE' as SQL system schema. */ @SystemProperty("Enable backward compatible to use 'IGNITE' as SQL system schema") public static final String IGNITE_SQL_SYSTEM_SCHEMA_NAME_IGNITE = "IGNITE_SQL_SYSTEM_SCHEMA_NAME_IGNITE"; /** * Shows if dump requests from local node to near node are allowed, when long running transaction * is found. If allowed, the compute request to near node will be made to get thread dump of transaction * owner thread. */ @SystemProperty(value = "Shows if dump requests from local node to near node are allowed, when " + "long running transaction is found. If allowed, the compute request to near node will be made to get " + "thread dump of transaction owner thread", defaults = "" + DFLT_TX_OWNER_DUMP_REQUESTS_ALLOWED) public static final String IGNITE_TX_OWNER_DUMP_REQUESTS_ALLOWED = "IGNITE_TX_OWNER_DUMP_REQUESTS_ALLOWED"; /** * Page lock tracker type. * -1 - Disable lock tracking. * 1 - HEAP_STACK * 2 - HEAP_LOG * 3 - OFF_HEAP_STACK * 4 - OFF_HEAP_LOG * * Default is 2 - HEAP_LOG. */ @SystemProperty(value = "Page lock tracker type. -1 - Disable lock tracking. 1 - HEAP_STACK. " + "2 - HEAP_LOG. 3 - OFF_HEAP_STACK. 4 - OFF_HEAP_LOG", type = Integer.class, defaults = "" + HEAP_LOG) public static final String IGNITE_PAGE_LOCK_TRACKER_TYPE = "IGNITE_PAGE_LOCK_TRACKER_TYPE"; /** * Capacity in pages for storing in page lock tracker strucuture. * * Default is 512 pages. */ @SystemProperty(value = "Capacity in pages for storing in page lock tracker strucuture", type = Integer.class, defaults = "" + DFLT_PAGE_LOCK_TRACKER_CAPACITY) public static final String IGNITE_PAGE_LOCK_TRACKER_CAPACITY = "IGNITE_PAGE_LOCK_TRACKER_CAPACITY"; /** * Page lock tracker thread for checking hangs threads interval. * * Default is 60_000 ms. */ @SystemProperty(value = "Page lock tracker thread for checking hangs threads interval", type = Integer.class, defaults = "" + DFLT_PAGE_LOCK_TRACKER_CHECK_INTERVAL) public static final String IGNITE_PAGE_LOCK_TRACKER_CHECK_INTERVAL = "IGNITE_PAGE_LOCK_TRACKER_CHECK_INTERVAL"; /** * Enables threads locks dumping on critical node failure. * * Default is {@code true}. */ @SystemProperty(value = "Enables threads locks dumping on critical node failure", defaults = "" + DFLT_DUMP_PAGE_LOCK_ON_FAILURE) public static final String IGNITE_DUMP_PAGE_LOCK_ON_FAILURE = "IGNITE_DUMP_PAGE_LOCK_ON_FAILURE"; /** * Scan the classpath on startup and log all the files containing in it. */ @SystemProperty(value = "Scan the classpath on startup and log all the files containing in it", defaults = "" + DFLT_LOG_CLASSPATH_CONTENT_ON_STARTUP) public static final String IGNITE_LOG_CLASSPATH_CONTENT_ON_STARTUP = "IGNITE_LOG_CLASSPATH_CONTENT_ON_STARTUP"; /** * Threshold timeout for long transactions, if transaction exceeds it, it will be dumped in log with * information about how much time did it spent in system time (time while aquiring locks, preparing, * commiting, etc) and user time (time when client node runs some code while holding transaction and not * waiting it). Equals 0 if not set. No long transactions are dumped in log if nor this parameter * neither {@link #IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_COEFFICIENT} is set. */ @SystemProperty(value = "Threshold timeout for long transactions, if transaction exceeds it, it " + "will be dumped in log with information about how much time did it spent in system time (time while acquiring " + "locks, preparing, committing, etc) and user time (time when client node runs some code while holding " + "transaction and not waiting it). Equals 0 if not set. No long transactions are dumped in log if nor " + "this parameter neither IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_COEFFICIENT is set", type = Long.class) public static final String IGNITE_LONG_TRANSACTION_TIME_DUMP_THRESHOLD = "IGNITE_LONG_TRANSACTION_TIME_DUMP_THRESHOLD"; /** * The coefficient for samples of completed transactions that will be dumped in log. Must be float value * between 0.0 and 1.0 inclusive. Default value is <code>0.0</code>. */ @SystemProperty(value = "The coefficient for samples of completed transactions that will be dumped " + "in log. Must be float value between 0.0 and 1.0 inclusive", type = Float.class) public static final String IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_COEFFICIENT = "IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_COEFFICIENT"; /** * The limit of samples of completed transactions that will be dumped in log per second, if * {@link #IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_COEFFICIENT} is above <code>0.0</code>. Must be integer value * greater than <code>0</code>. Default value is <code>5</code>. */ @SystemProperty(value = "The limit of samples of completed transactions that will be dumped in log " + "per second, if " + IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_COEFFICIENT + " is above 0.0. Must be integer value " + "greater than 0", type = Integer.class, defaults = "" + DFLT_TRANSACTION_TIME_DUMP_SAMPLES_PER_SECOND_LIMIT) public static final String IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_PER_SECOND_LIMIT = "IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_PER_SECOND_LIMIT"; /** * Disable onheap caching of pages lists (free lists and reuse lists). * If persistence is enabled changes to page lists are not stored to page memory immediately, they are cached in * onheap buffer and flushes to page memory on a checkpoint. This property allows to disable such onheap caching. * Default value is <code>false</code>. */ @SystemProperty("Disables onheap caching of pages lists (free lists and reuse lists). " + "If persistence is enabled changes to page lists are not stored to page memory immediately, they are " + "cached in onheap buffer and flushes to page memory on a checkpoint. This property allows to disable such " + "onheap caching") public static final String IGNITE_PAGES_LIST_DISABLE_ONHEAP_CACHING = "IGNITE_PAGES_LIST_DISABLE_ONHEAP_CACHING"; /** * The master key name that the node will use during the recovery. * <p> * If a node was unavailable during a master key change process it won't be able to join to cluster with old the * master key. Set up this property to re-encrypt cache keys on startup and join to cluster with the valid * master key name. */ @SystemProperty(value = "The master key name that the node will use during the recovery. If a node " + "was unavailable during a master key change process it won't be able to join to cluster with old the " + "master key. Set up this property to re-encrypt cache keys on startup and join to cluster with the valid " + "master key name", type = String.class) public static final String IGNITE_MASTER_KEY_NAME_TO_CHANGE_BEFORE_STARTUP = "IGNITE_MASTER_KEY_NAME_TO_CHANGE_BEFORE_STARTUP"; /** * Enables extended logging of indexes create/rebuild process. Default {@code false}. * <p/> * <b>Warning</b>: enabling that option can lead to performance degradation of index creation, rebuilding and node * restart. */ @SystemProperty("Enables extended logging of indexes create/rebuild process. " + "Warning: enabling that option can lead to performance degradation of index creation, rebuilding and " + "node restart") public static final String IGNITE_ENABLE_EXTRA_INDEX_REBUILD_LOGGING = "IGNITE_ENABLE_EXTRA_INDEX_REBUILD_LOGGING"; /** * When enabled, node will wait until all of its data is backed up before shutting down. * Please note that it will completely prevent last node in cluster from shutting down if any caches exist * that have backups configured. */ @IgniteExperimental @SystemProperty("Enables node to wait until all of its data is backed up before " + "shutting down. Please note that it will completely prevent last node in cluster from shutting down if any " + "caches exist that have backups configured") public static final String IGNITE_WAIT_FOR_BACKUPS_ON_SHUTDOWN = "IGNITE_WAIT_FOR_BACKUPS_ON_SHUTDOWN"; /** * Enables subquery rewriting optimization. * If enabled, subquery will be rewritten to JOIN where possible. * Default is {@code true}. */ @IgniteExperimental @SystemProperty("Enables subquery rewriting optimization. " + "If enabled, subquery will be rewritten to JOIN where possible") public static final String IGNITE_ENABLE_SUBQUERY_REWRITE_OPTIMIZATION = "IGNITE_ENABLE_SUBQUERY_REWRITE_OPTIMIZATION"; /** * Enables setting attribute value of {@link * TcpCommunicationSpi#ATTR_HOST_NAMES ATTR_HOST_NAMES} when value {@link * IgniteConfiguration#getLocalHost getLocalHost} is ip, for backward * compatibility. By default, {@code false}. */ @SystemProperty("Enables setting attribute value of TcpCommunicationSpi#ATTR_HOST_NAMES " + "when value IgniteConfiguration#getLocalHost is ip, for backward compatibility") public static final String IGNITE_TCP_COMM_SET_ATTR_HOST_NAMES = "IGNITE_TCP_COMM_SET_ATTR_HOST_NAMES"; /** * When above zero, prints tx key collisions once per interval. * Each transaction besides OPTIMISTIC SERIALIZABLE capture locks on all enlisted keys, for some reasons * per key lock queue may rise. This property sets the interval during which statistics are collected. * Default is 1000 ms. */ @SystemProperty(value = "When above zero, prints tx key collisions once per interval. Each " + "transaction besides OPTIMISTIC SERIALIZABLE capture locks on all enlisted keys, for some reasons per key " + "lock queue may rise. This property sets the interval during which statistics are collected", type = Integer.class, defaults = "" + DFLT_DUMP_TX_COLLISIONS_INTERVAL) public static final String IGNITE_DUMP_TX_COLLISIONS_INTERVAL = "IGNITE_DUMP_TX_COLLISIONS_INTERVAL"; /** * Set to true only during the junit tests. * Signals that the cluster is running in a test environment. * * Can be used for changing behaviour of tightly coupled code pieces during the tests. * Use it as a last resort only, prefer another toolchain like DI, mocks and etc. if possible */ @SystemProperty(value = "Set to true only during the junit tests. " + "Can be used for changing behaviour of tightly coupled code pieces during the tests. " + "Use it as a last resort only, prefer another toolchain like DI, mocks and etc. if possible", type = Boolean.class) public static final String IGNITE_TEST_ENV = "IGNITE_TEST_ENV"; /** * Enforces singleton. */ private IgniteSystemProperties() { // No-op. } /** * @param enumCls Enum type. * @param name Name of the system property or environment variable. * @return Enum value or {@code null} if the property is not set. */ public static <E extends Enum<E>> E getEnum(Class<E> enumCls, String name) { return getEnum(enumCls, name, null); } /** * @param name Name of the system property or environment variable. * @return Enum value or the given default. */ public static <E extends Enum<E>> E getEnum(String name, E dflt) { return getEnum(dflt.getDeclaringClass(), name, dflt); } /** * @param enumCls Enum type. * @param name Name of the system property or environment variable. * @param dflt Default value. * @return Enum value or the given default. */ private static <E extends Enum<E>> E getEnum(Class<E> enumCls, String name, E dflt) { assert enumCls != null; String val = getString(name); if (val == null) return dflt; try { return Enum.valueOf(enumCls, val); } catch (IllegalArgumentException ignore) { return dflt; } } /** * Gets either system property or environment variable with given name. * * @param name Name of the system property or environment variable. * @return Value of the system property or environment variable. * Returns {@code null} if neither can be found for given name. */ @Nullable public static String getString(String name) { assert name != null; String v = System.getProperty(name); if (v == null) v = System.getenv(name); return v; } /** * Gets either system property or environment variable with given name. * * @param name Name of the system property or environment variable. * @param dflt Default value. * @return Value of the system property or environment variable. * Returns {@code null} if neither can be found for given name. */ @Nullable public static String getString(String name, String dflt) { String val = getString(name); return val == null ? dflt : val; } /** * Gets either system property or environment variable with given name. * The result is transformed to {@code boolean} using {@code Boolean.valueOf()} method. * * @param name Name of the system property or environment variable. * @return Boolean value of the system property or environment variable. * Returns {@code False} in case neither system property * nor environment variable with given name is found. */ public static boolean getBoolean(String name) { return getBoolean(name, false); } /** * Gets either system property or environment variable with given name. * The result is transformed to {@code boolean} using {@code Boolean.valueOf()} method. * * @param name Name of the system property or environment variable. * @param dflt Default value. * @return Boolean value of the system property or environment variable. * Returns default value in case neither system property * nor environment variable with given name is found. */ public static boolean getBoolean(String name, boolean dflt) { String val = getString(name); return val == null ? dflt : Boolean.parseBoolean(val); } /** * Gets either system property or environment variable with given name. * The result is transformed to {@code int} using {@code Integer.parseInt()} method. * * @param name Name of the system property or environment variable. * @param dflt Default value. * @return Integer value of the system property or environment variable. * Returns default value in case neither system property * nor environment variable with given name is found. */ public static int getInteger(String name, int dflt) { String s = getString(name); if (s == null) return dflt; int res; try { res = Integer.parseInt(s); } catch (NumberFormatException ignore) { res = dflt; } return res; } /** * Gets either system property or environment variable with given name. * The result is transformed to {@code float} using {@code Float.parseFloat()} method. * * @param name Name of the system property or environment variable. * @param dflt Default value. * @return Float value of the system property or environment variable. * Returns default value in case neither system property * nor environment variable with given name is found. */ public static float getFloat(String name, float dflt) { String s = getString(name); if (s == null) return dflt; float res; try { res = Float.parseFloat(s); } catch (NumberFormatException ignore) { res = dflt; } return res; } /** * Gets either system property or environment variable with given name. * The result is transformed to {@code long} using {@code Long.parseLong()} method. * * @param name Name of the system property or environment variable. * @param dflt Default value. * @return Integer value of the system property or environment variable. * Returns default value in case neither system property * nor environment variable with given name is found. */ public static long getLong(String name, long dflt) { String s = getString(name); if (s == null) return dflt; long res; try { res = Long.parseLong(s); } catch (NumberFormatException ignore) { res = dflt; } return res; } /** * Gets either system property or environment variable with given name. * The result is transformed to {@code double} using {@code Double.parseDouble()} method. * * @param name Name of the system property or environment variable. * @param dflt Default value. * @return Integer value of the system property or environment variable. * Returns default value in case neither system property * nor environment variable with given name is found. */ public static double getDouble(String name, double dflt) { String s = getString(name); if (s == null) return dflt; double res; try { res = Double.parseDouble(s); } catch (NumberFormatException ignore) { res = dflt; } return res; } /** * Gets snapshot of system properties. * Snapshot could be used for thread safe iteration over system properties. * Non-string properties are removed before return. * * @return Snapshot of system properties. */ public static Properties snapshot() { Properties sysProps = (Properties)System.getProperties().clone(); Iterator<Map.Entry<Object, Object>> iter = sysProps.entrySet().iterator(); while (iter.hasNext()) { Map.Entry entry = iter.next(); if (!(entry.getValue() instanceof String) || !(entry.getKey() instanceof String)) iter.remove(); } return sysProps; } }
apache-2.0
PearsonEducation/StatsAgg
src/main/java/com/pearson/statsagg/web_ui/Alert_SuspensionAssociations.java
8557
package com.pearson.statsagg.web_ui; import com.pearson.statsagg.database_objects.suspensions.Suspension; import com.pearson.statsagg.database_objects.suspensions.SuspensionsDao; import com.pearson.statsagg.database_objects.alerts.Alert; import com.pearson.statsagg.database_objects.alerts.AlertsDao; import java.io.PrintWriter; import java.util.Set; import javax.servlet.annotation.WebServlet; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import com.pearson.statsagg.globals.GlobalVariables; import com.pearson.statsagg.utilities.core_utils.StackTrace; import com.pearson.statsagg.utilities.string_utils.StringUtilities; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * @author Jeffrey Schmidt */ @WebServlet(name = "Alert-SuspensionAssociations", urlPatterns = {"/Alert-SuspensionAssociations"}) public class Alert_SuspensionAssociations extends HttpServlet { private static final Logger logger = LoggerFactory.getLogger(Alert_SuspensionAssociations.class.getName()); public static final String PAGE_NAME = "Alert - Suspension Associations"; /** * Handles the HTTP <code>GET</code> method. * * @param request servlet request * @param response servlet response */ @Override protected void doGet(HttpServletRequest request, HttpServletResponse response) { processGetRequest(request, response); } /** * Handles the HTTP <code>POST</code> method. * * @param request servlet request * @param response servlet response */ @Override protected void doPost(HttpServletRequest request, HttpServletResponse response) { processGetRequest(request, response); } /** * Returns a short description of the servlet. * * @return a String containing servlet description */ @Override public String getServletInfo() { return PAGE_NAME; } protected void processGetRequest(HttpServletRequest request, HttpServletResponse response) { if ((request == null) || (response == null)) { return; } try { request.setCharacterEncoding("UTF-8"); response.setCharacterEncoding("UTF-8"); response.setContentType("text/html"); } catch (Exception e) { logger.error(e.toString() + System.lineSeparator() + StackTrace.getStringFromStackTrace(e)); } PrintWriter out = null; String name = request.getParameter("Name"); boolean excludeNavbar = StringUtilities.isStringValueBooleanTrue(request.getParameter("ExcludeNavbar")); String alert_SuspensionAssociations = getAlert_SuspensionAssociations(name, excludeNavbar); try { StringBuilder htmlBuilder = new StringBuilder(); StatsAggHtmlFramework statsAggHtmlFramework = new StatsAggHtmlFramework(); String htmlHeader = statsAggHtmlFramework.createHtmlHeader("StatsAgg - " + PAGE_NAME, ""); String htmlBody = statsAggHtmlFramework.createHtmlBody( "<div id=\"page-content-wrapper\">\n" + "<!-- Keep all page content within the page-content inset div! -->\n" + " <div class=\"page-content inset statsagg_page_content_font\">\n" + " <div class=\"content-header\"> \n" + " <div class=\"pull-left content-header-h2-min-width-statsagg\"> <h2> " + PAGE_NAME + " </h2> </div>\n" + " </div> " + " <div class=\"statsagg_force_word_wrap\">" + alert_SuspensionAssociations + " </div>\n" + " </div>\n" + "</div>\n", excludeNavbar); htmlBuilder.append("<!DOCTYPE html>\n<html>\n").append(htmlHeader).append(htmlBody).append("</html>"); Document htmlDocument = Jsoup.parse(htmlBuilder.toString()); String htmlFormatted = htmlDocument.toString(); out = response.getWriter(); out.println(htmlFormatted); } catch (Exception e) { logger.error(e.toString() + System.lineSeparator() + StackTrace.getStringFromStackTrace(e)); } finally { if (out != null) { out.close(); } } } private String getAlert_SuspensionAssociations(String alertName, boolean excludeNavbar) { if (alertName == null) { return "<b>No alert specified</b>"; } AlertsDao alertsDao = new AlertsDao(); Alert alert = alertsDao.getAlertByName(alertName); if (alert == null) return "<b>Alert not found</b>"; StringBuilder outputString = new StringBuilder(); outputString.append("<b>Alert Name</b> = ").append(StatsAggHtmlFramework.htmlEncode(alert.getName())).append("<br>"); Set<Integer> suspensionIds; synchronized(GlobalVariables.suspensionIdAssociationsByAlertId) { suspensionIds = GlobalVariables.suspensionIdAssociationsByAlertId.get(alert.getId()); } if (suspensionIds == null) { outputString.append("<b>Total Associations</b> = ").append("0"); return outputString.toString(); } int associationCount = suspensionIds.size(); outputString.append("<b>Total Associations</b> = ").append(associationCount).append("<br><br>"); if (associationCount <= 0) return outputString.toString(); outputString.append("<b>Associations...</b>").append("<br>"); outputString.append("<ul>"); Map<String,String> suspensionStrings = new HashMap<>(); SuspensionsDao suspensionsDao = null; try { suspensionsDao = new SuspensionsDao(false); for (Integer suspensionId : suspensionIds) { if (suspensionId == null) continue; Suspension suspension = suspensionsDao.getSuspension(suspensionId); if ((suspension == null) || (suspension.getName() == null)) continue; String suspensionDetailsUrl = "<a href=\"SuspensionDetails?ExcludeNavbar=true&amp;Name=" + StatsAggHtmlFramework.urlEncode(suspension.getName()) + "\">" + StatsAggHtmlFramework.htmlEncode(suspension.getName()) + "</a>"; boolean isSuspensionActive = Suspension.isSuspensionActive(suspension); StringBuilder status = new StringBuilder(); if (isSuspensionActive) status.append("(active"); else status.append("(inactive"); if ((suspension.isSuspendNotificationOnly() != null) && suspension.isSuspendNotificationOnly()) status.append(", suspend notification only"); else if ((suspension.isSuspendNotificationOnly() != null) && !suspension.isSuspendNotificationOnly()) status.append(", suspend entire alert"); status.append(")"); if (isSuspensionActive) suspensionStrings.put(suspension.getName(), "<li>" + "<b>" + suspensionDetailsUrl + "&nbsp" + status.toString() + "</b>" + "</li>"); else suspensionStrings.put(suspension.getName(), "<li>" + suspensionDetailsUrl + "&nbsp" + status.toString() + "</li>"); } } catch (Exception e) { logger.error(e.toString() + System.lineSeparator() + StackTrace.getStringFromStackTrace(e)); } finally { try { if (suspensionsDao != null) suspensionsDao.close(); } catch (Exception e) { logger.error(e.toString() + System.lineSeparator() + StackTrace.getStringFromStackTrace(e)); } } List<String> sortedSuspensionStrings = new ArrayList<>(suspensionStrings.keySet()); Collections.sort(sortedSuspensionStrings); for (String suspensionString : sortedSuspensionStrings) { String suspensionOutputString = suspensionStrings.get(suspensionString); outputString.append(suspensionOutputString); } outputString.append("</ul>"); return outputString.toString(); } }
apache-2.0
consulo/consulo-napile
src/org/napile/idea/plugin/editor/hierarchy/util/NapileSourceComparator.java
1954
package org.napile.idea.plugin.editor.hierarchy.util; import java.util.Comparator; import org.napile.compiler.lang.psi.NapileClass; import org.napile.compiler.lang.psi.NapileFile; import org.napile.idea.plugin.projectView.NapileClassTreeNode; import com.intellij.ide.projectView.impl.nodes.ProjectViewProjectNode; import com.intellij.ide.projectView.impl.nodes.PsiDirectoryNode; import com.intellij.ide.projectView.impl.nodes.PsiFileNode; import com.intellij.ide.util.treeView.NodeDescriptor; /** * @see com.intellij.ide.util.treeView.SourceComparator */ public class NapileSourceComparator implements Comparator<NodeDescriptor> { public static final NapileSourceComparator INSTANCE = new NapileSourceComparator(); private NapileSourceComparator() { } public int compare(NodeDescriptor nodeDescriptor1, NodeDescriptor nodeDescriptor2) { int weight1 = getWeight(nodeDescriptor1); int weight2 = getWeight(nodeDescriptor2); if(weight1 != weight2) { return weight1 - weight2; } if(!(nodeDescriptor1.getParentDescriptor() instanceof ProjectViewProjectNode)) { if(nodeDescriptor1 instanceof PsiDirectoryNode || nodeDescriptor1 instanceof PsiFileNode) { return nodeDescriptor1.toString().compareToIgnoreCase(nodeDescriptor2.toString()); } if(nodeDescriptor1 instanceof NapileClassTreeNode && nodeDescriptor2 instanceof NapileClassTreeNode) { if(((NapileClass)nodeDescriptor1.getElement()).getParent() instanceof NapileFile) { return nodeDescriptor1.toString().compareToIgnoreCase(nodeDescriptor2.toString()); } } } int index1 = nodeDescriptor1.getIndex(); int index2 = nodeDescriptor2.getIndex(); if(index1 == index2) return 0; return index1 < index2 ? -1 : +1; } private static int getWeight(NodeDescriptor descriptor) { if(descriptor instanceof PsiDirectoryNode) { return ((PsiDirectoryNode) descriptor).isFQNameShown() ? 7 : 0; } else { return 2; } } }
apache-2.0
bq/corbel
restor/src/main/java/com/bq/corbel/resources/rem/model/RestorObject.java
1064
package com.bq.corbel.resources.rem.model; import java.io.InputStream; /** * @author Alberto J. Rubio */ public class RestorObject { private final String mediaType; private final InputStream inputStream; private final Long contentLength; private final String etag; public RestorObject(String mediaType, InputStream inputStream, Long contentLength, String etag) { this.mediaType = mediaType; this.inputStream = inputStream; this.contentLength = contentLength; this.etag = etag; } public RestorObject(String mediaType, InputStream inputStream, Long contentLength) { this.mediaType = mediaType; this.inputStream = inputStream; this.contentLength = contentLength; this.etag = null; } public String getMediaType() { return mediaType; } public InputStream getInputStream() { return inputStream; } public Long getContentLength() { return contentLength; } public String getEtag() { return etag; } }
apache-2.0
jentfoo/aws-sdk-java
aws-java-sdk-elasticache/src/main/java/com/amazonaws/services/elasticache/model/CreateSnapshotRequest.java
7633
/* * Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.elasticache.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.AmazonWebServiceRequest; /** * <p> * Represents the input of a <code>CreateSnapshot</code> operation. * </p> * * @see <a href="http://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateSnapshot" target="_top">AWS API * Documentation</a> */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class CreateSnapshotRequest extends com.amazonaws.AmazonWebServiceRequest implements Serializable, Cloneable { /** * <p> * The identifier of an existing replication group. The snapshot is created from this replication group. * </p> */ private String replicationGroupId; /** * <p> * The identifier of an existing cluster. The snapshot is created from this cluster. * </p> */ private String cacheClusterId; /** * <p> * A name for the snapshot being created. * </p> */ private String snapshotName; /** * <p> * The identifier of an existing replication group. The snapshot is created from this replication group. * </p> * * @param replicationGroupId * The identifier of an existing replication group. The snapshot is created from this replication group. */ public void setReplicationGroupId(String replicationGroupId) { this.replicationGroupId = replicationGroupId; } /** * <p> * The identifier of an existing replication group. The snapshot is created from this replication group. * </p> * * @return The identifier of an existing replication group. The snapshot is created from this replication group. */ public String getReplicationGroupId() { return this.replicationGroupId; } /** * <p> * The identifier of an existing replication group. The snapshot is created from this replication group. * </p> * * @param replicationGroupId * The identifier of an existing replication group. The snapshot is created from this replication group. * @return Returns a reference to this object so that method calls can be chained together. */ public CreateSnapshotRequest withReplicationGroupId(String replicationGroupId) { setReplicationGroupId(replicationGroupId); return this; } /** * <p> * The identifier of an existing cluster. The snapshot is created from this cluster. * </p> * * @param cacheClusterId * The identifier of an existing cluster. The snapshot is created from this cluster. */ public void setCacheClusterId(String cacheClusterId) { this.cacheClusterId = cacheClusterId; } /** * <p> * The identifier of an existing cluster. The snapshot is created from this cluster. * </p> * * @return The identifier of an existing cluster. The snapshot is created from this cluster. */ public String getCacheClusterId() { return this.cacheClusterId; } /** * <p> * The identifier of an existing cluster. The snapshot is created from this cluster. * </p> * * @param cacheClusterId * The identifier of an existing cluster. The snapshot is created from this cluster. * @return Returns a reference to this object so that method calls can be chained together. */ public CreateSnapshotRequest withCacheClusterId(String cacheClusterId) { setCacheClusterId(cacheClusterId); return this; } /** * <p> * A name for the snapshot being created. * </p> * * @param snapshotName * A name for the snapshot being created. */ public void setSnapshotName(String snapshotName) { this.snapshotName = snapshotName; } /** * <p> * A name for the snapshot being created. * </p> * * @return A name for the snapshot being created. */ public String getSnapshotName() { return this.snapshotName; } /** * <p> * A name for the snapshot being created. * </p> * * @param snapshotName * A name for the snapshot being created. * @return Returns a reference to this object so that method calls can be chained together. */ public CreateSnapshotRequest withSnapshotName(String snapshotName) { setSnapshotName(snapshotName); return this; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getReplicationGroupId() != null) sb.append("ReplicationGroupId: ").append(getReplicationGroupId()).append(","); if (getCacheClusterId() != null) sb.append("CacheClusterId: ").append(getCacheClusterId()).append(","); if (getSnapshotName() != null) sb.append("SnapshotName: ").append(getSnapshotName()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof CreateSnapshotRequest == false) return false; CreateSnapshotRequest other = (CreateSnapshotRequest) obj; if (other.getReplicationGroupId() == null ^ this.getReplicationGroupId() == null) return false; if (other.getReplicationGroupId() != null && other.getReplicationGroupId().equals(this.getReplicationGroupId()) == false) return false; if (other.getCacheClusterId() == null ^ this.getCacheClusterId() == null) return false; if (other.getCacheClusterId() != null && other.getCacheClusterId().equals(this.getCacheClusterId()) == false) return false; if (other.getSnapshotName() == null ^ this.getSnapshotName() == null) return false; if (other.getSnapshotName() != null && other.getSnapshotName().equals(this.getSnapshotName()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getReplicationGroupId() == null) ? 0 : getReplicationGroupId().hashCode()); hashCode = prime * hashCode + ((getCacheClusterId() == null) ? 0 : getCacheClusterId().hashCode()); hashCode = prime * hashCode + ((getSnapshotName() == null) ? 0 : getSnapshotName().hashCode()); return hashCode; } @Override public CreateSnapshotRequest clone() { return (CreateSnapshotRequest) super.clone(); } }
apache-2.0
ggeorg/WebXView
WebXView/src/plasma/fx/scene/webxview/WebXViewInputHandler.java
1198
package plasma.fx.scene.webxview; import java.io.File; import org.apache.batik.util.ParsedURL; /** * This is the interface expected from classes which can handle specific types * of input for the WebXView. The simplest implementation will simply handle * WebX documents. Other, more sophisticated implementations will handle other * types of documents and convert them into WebX before displaying them in * WebXView. */ public interface WebXViewInputHandler { /** * Returns the list of mime types handled by this handler. */ String[] getHandledMimeTypes(); /** * Returns the list of file extensions handled by this handler */ String[] getHandledExtensions(); /** * Returns a description for this handler */ String getDescription(); /** * Returns true if the input file can be handled by the handler */ boolean accept(File f); /** * Returns true if the input URI can be handled by the handler * * @param purl * URL describing the candidate input */ boolean accept(ParsedURL purl); /** * Handles the given input for the given WebXView */ void handle(ParsedURL purl, WebXView webxview) throws Exception; }
apache-2.0
brainysmith/shibboleth-common
src/main/java/edu/internet2/middleware/shibboleth/common/attribute/encoding/AttributeEncoder.java
2255
/* * Licensed to the University Corporation for Advanced Internet Development, * Inc. (UCAID) under one or more contributor license agreements. See the * NOTICE file distributed with this work for additional information regarding * copyright ownership. The UCAID licenses this file to You under the Apache * License, Version 2.0 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.internet2.middleware.shibboleth.common.attribute.encoding; import edu.internet2.middleware.shibboleth.common.attribute.BaseAttribute; /** * Attribute encoders convert {@link BaseAttribute}s into protocol specific representations. * * Encoders may have contain a category that can be used to distingush encoder types from * each other. This inteded to be used to look up an encoder that can be used to encode * attributes in accordance with a defined specification or tranmission protcol. * * Encoders MUST be thread-safe and stateless. * * @param <EncodedType> the type of object created by encoding the attribute */ public interface AttributeEncoder<EncodedType> { /** * Get the name of the attribute. * * @return name of the attribute */ public String getAttributeName(); /** * Sets the name of the attribute. * * @param attributeName name of the attribute */ public void setAttributeName(String attributeName); /** * Enocdes the attribute into a protocol specific representations. * * @param attribute the attribute to encode * * @return the Object the attribute was encoded into * * @throws AttributeEncodingException if unable to successfully encode attribute */ public EncodedType encode(BaseAttribute attribute) throws AttributeEncodingException; }
apache-2.0
Nepxion/Coroutine
coroutine-spring-boot-dubbo-server-example/src/main/java/com/nepxion/coroutine/DubboServerApplication.java
1199
package com.nepxion.coroutine; /** * <p>Title: Nepxion Coroutine</p> * <p>Description: Nepxion Coroutine For Distribution</p> * <p>Copyright: Copyright (c) 2017-2050</p> * <p>Company: Nepxion</p> * @author Haojun Ren * @version 1.0 */ import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.boot.context.embedded.EmbeddedServletContainerFactory; import org.springframework.boot.context.embedded.tomcat.TomcatEmbeddedServletContainerFactory; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.ImportResource; @SpringBootApplication @ImportResource({ "classpath*:dubbo-server-context-coroutine.xml" }) public class DubboServerApplication { public static void main(String[] args) throws Exception { SpringApplication.run(DubboServerApplication.class, args); } @Bean public EmbeddedServletContainerFactory createEmbeddedServletContainerFactory() { TomcatEmbeddedServletContainerFactory tomcatFactory = new TomcatEmbeddedServletContainerFactory(); tomcatFactory.setPort(9083); return tomcatFactory; } }
apache-2.0
LaurenceYang/EasyHttp
sample/src/main/java/com/yang/demo/activity/RxPostActivity.java
3246
package com.yang.demo.activity; import android.app.ProgressDialog; import android.os.Bundle; import android.support.annotation.Nullable; import android.support.v7.app.AppCompatActivity; import android.text.Editable; import android.text.TextUtils; import android.widget.Button; import android.widget.EditText; import android.widget.TextView; import android.widget.Toast; import com.yang.demo.R; import com.yang.demo.entity.PostEntity; import com.yang.easyhttp.request.EasyRequestParams; import com.yang.easyhttprx.RxEasyHttp; import com.yang.easyhttprx.converter.RxEasyCustomConverter; import org.reactivestreams.Subscription; import butterknife.BindView; import butterknife.ButterKnife; import butterknife.OnClick; import io.reactivex.FlowableSubscriber; import io.reactivex.android.schedulers.AndroidSchedulers; /** * Created by yangyang on 2017/2/17. */ public class RxPostActivity extends AppCompatActivity { @BindView(R.id.comment) EditText comment; @BindView(R.id.submit) Button submit; @BindView(R.id.result) TextView result; ProgressDialog dialog; @Override protected void onCreate(@Nullable Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.post_main); ButterKnife.bind(this); dialog = new ProgressDialog(this); } @OnClick(R.id.submit) public void submit() { Editable content = comment.getText(); if (TextUtils.isEmpty(content)) { Toast.makeText(this, "comment is empty", Toast.LENGTH_SHORT); return; } EasyRequestParams params = new EasyRequestParams(); params.put("content", content.toString()); String url = "http://book.km.com/app/index.php?c=version&a=feedback"; RxEasyHttp.post(url, params, new RxEasyCustomConverter<PostEntity>() { @Override public void doNothing() { // 防止范型类型擦除引起范型类型不能正确获取问题. } }) .observeOn(AndroidSchedulers.mainThread()) .subscribe(new FlowableSubscriber<PostEntity>() { @Override public void onSubscribe(Subscription s) { s.request(Long.MAX_VALUE); dialog.show(); } @Override public void onNext(PostEntity entity) { Toast.makeText(RxPostActivity.this, "提交成功", Toast.LENGTH_LONG).show(); result.setText("status : " + entity.getStatus() + "\n" + "message : " + entity.getMessage()); } @Override public void onError(Throwable t) { Toast.makeText(RxPostActivity.this, "提交失败", Toast.LENGTH_LONG).show(); result.setText(t.getMessage()); dialog.cancel(); } @Override public void onComplete() { dialog.cancel(); } }); } }
apache-2.0
googleapis/google-api-java-client-services
clients/google-api-services-monitoring/v1/1.31.0/com/google/api/services/monitoring/v1/model/SpanContext.java
3289
/* * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ /* * This code was generated by https://github.com/googleapis/google-api-java-client-services/ * Modify at your own risk. */ package com.google.api.services.monitoring.v1.model; /** * The context of a span. This is attached to an Exemplar in Distribution values during * aggregation.It contains the name of a span with format: * projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID] * * <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is * transmitted over HTTP when working with the Cloud Monitoring API. For a detailed explanation see: * <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a> * </p> * * @author Google, Inc. */ @SuppressWarnings("javadoc") public final class SpanContext extends com.google.api.client.json.GenericJson { /** * The resource name of the span. The format is: * projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID] [TRACE_ID] is a unique * identifier for a trace within a project; it is a 32-character hexadecimal encoding of a 16-byte * array.[SPAN_ID] is a unique identifier for a span within a trace; it is a 16-character * hexadecimal encoding of an 8-byte array. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String spanName; /** * The resource name of the span. The format is: * projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID] [TRACE_ID] is a unique * identifier for a trace within a project; it is a 32-character hexadecimal encoding of a 16-byte * array.[SPAN_ID] is a unique identifier for a span within a trace; it is a 16-character * hexadecimal encoding of an 8-byte array. * @return value or {@code null} for none */ public java.lang.String getSpanName() { return spanName; } /** * The resource name of the span. The format is: * projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID] [TRACE_ID] is a unique * identifier for a trace within a project; it is a 32-character hexadecimal encoding of a 16-byte * array.[SPAN_ID] is a unique identifier for a span within a trace; it is a 16-character * hexadecimal encoding of an 8-byte array. * @param spanName spanName or {@code null} for none */ public SpanContext setSpanName(java.lang.String spanName) { this.spanName = spanName; return this; } @Override public SpanContext set(String fieldName, Object value) { return (SpanContext) super.set(fieldName, value); } @Override public SpanContext clone() { return (SpanContext) super.clone(); } }
apache-2.0
jentfoo/aws-sdk-java
aws-java-sdk-codedeploy/src/main/java/com/amazonaws/services/codedeploy/model/RevisionLocationType.java
1864
/* * Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.codedeploy.model; import javax.annotation.Generated; /** * */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public enum RevisionLocationType { S3("S3"), GitHub("GitHub"), String("String"), AppSpecContent("AppSpecContent"); private String value; private RevisionLocationType(String value) { this.value = value; } @Override public String toString() { return this.value; } /** * Use this in place of valueOf. * * @param value * real value * @return RevisionLocationType corresponding to the value * * @throws IllegalArgumentException * If the specified value does not map to one of the known values in this enum. */ public static RevisionLocationType fromValue(String value) { if (value == null || "".equals(value)) { throw new IllegalArgumentException("Value cannot be null or empty!"); } for (RevisionLocationType enumEntry : RevisionLocationType.values()) { if (enumEntry.toString().equals(value)) { return enumEntry; } } throw new IllegalArgumentException("Cannot create enum from " + value + " value!"); } }
apache-2.0
OpenClassKnowledgeBase/OCKB
app/models/Post.java
3837
package models; import java.util.*; import play.db.ebean.*; import play.data.format.*; import javax.persistence.*; /*For the database*/ import com.avaje.ebean.Page; import play.data.validation.Constraints.*; @SuppressWarnings("serial") @Entity /*THIS IS IMPORTANT*/ public class Post extends Model implements Comparable<Post>{ public String userName; public Boolean isSticky; @ManyToOne @JoinColumn(name="category_id") public Category category; @OneToMany(cascade=CascadeType.ALL, mappedBy="parent_post") public List<Comment> commentList; @Required public String title; @Required @Column(columnDefinition = "TEXT") public String content; @Formats.DateTime(pattern="yyyy-MM-dd hh:mm:ss") public Date datePosted = new Date(); @Formats.DateTime(pattern="yyyy-MM-dd hh:mm:ss") public Date latestActivity = new Date(); public Long comments; public Long votes; public String usersVoted; @Id public Long id; public Post (Category category, String title, String content) { this.category = category; this.title = title; this.content = content; this.usersVoted = ""; } //help initiate queries public static Finder<Long,Post> find = new Finder<Long,Post>(Long.class, Post.class); /*Implement the CRUD operations*/ public static List<Post> all(){ return find.all(); } public static void create(Category category, String title, String content, String username, Boolean isSticky, String usersVoted){ Post post = new Post(category, title, content); post.userName = username; post.comments = (long) 0; post.votes = (long) 0; post.isSticky = isSticky; post.usersVoted = usersVoted; post.save(); } public static List<Post> findAll(){ return find.all(); } public static void delete(Long id){ find.ref(id).delete(); } public static Post getPost (Long id) { return Post.find.byId(id); } //use to compare commentLists @Override public int compareTo(Post post) { //needs to be reversed to be in descending order if (this.commentList.size() < post.commentList.size()) { return 1; } if (this.commentList.size() > post.commentList.size()) { return -1; } else { return 0; } } public static List<Post> getSortedByComments(){ List<Post> allComments = Post.all(); Collections.sort(allComments); return allComments; } /** * Return a page of posts * * @param page Page to display * @param pageSize Number of posts per page * @param sortBy Posts property used for sorting * @param order Sort order (either or asc or desc) * @param filter Filter applied on the name column */ public static Page<Post> getPosts(Long cid, int page, int pageSize, String sortBy, String order, String filter) { return find.where() .eq("category_id", cid) .eq("isSticky", false) .ilike("title", "%" + filter + "%") .ilike("content", "%" + filter + "%") .orderBy(sortBy + " " + order) .findPagingList(pageSize) .getPage(page); } /** <script> function vote(id, voteAmount) { $("#"+id).text( parseInt($("#"+id).text().trim()) + voteAmount); $("#upvote"+id).addClass("btn-disabled"); $("#downvote"+id).addClass("btn-disabled"); if (voteAmount > 0) { $("#glyphup"+id).addClass("voted-color"); } else { $("#glyphdown"+id).addClass("voted-color"); } $("#"+id).addClass("voted-color"); } </script> */ // var listAction = #{jsAction @Post.upvote(currentPost.id, -1) /} }
apache-2.0
spring-projects/spring-data-examples
jpa/deferred/src/main/java/example/model/Customer1128.java
628
package example.model; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; @Entity public class Customer1128 { @Id @GeneratedValue(strategy = GenerationType.AUTO) private long id; private String firstName; private String lastName; protected Customer1128() {} public Customer1128(String firstName, String lastName) { this.firstName = firstName; this.lastName = lastName; } @Override public String toString() { return String.format("Customer1128[id=%d, firstName='%s', lastName='%s']", id, firstName, lastName); } }
apache-2.0
Geomatys/sis
core/sis-referencing/src/test/java/org/apache/sis/referencing/factory/GIGS2002.java
4297
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.sis.referencing.factory; import org.opengis.util.FactoryException; import org.apache.sis.internal.system.Loggers; // Test imports import org.junit.Rule; import org.junit.Test; import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.FixMethodOrder; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; import org.junit.runners.MethodSorters; import org.apache.sis.test.DependsOn; import org.apache.sis.test.LoggingWatcher; /** * Tests {@link org.apache.sis.referencing.factory.sql.EPSGDataAccess#createEllipsoid(String)}. * This is part of <cite>Geospatial Integrity of Geoscience Software</cite> (GIGS) tests implemented in GeoAPI. * * <div class="note"><b>Note:</b> * this test is defined in this package instead than in the {@code sql} sub-package because of the need to access * package-private methods in {@link ConcurrentAuthorityFactory}, and for keeping all GIGS tests together.</div> * * @author Martin Desruisseaux (Geomatys) * @version 0.7 * @since 0.7 * @module */ @DependsOn({ GIGS2001.class, // Units created from EPSG codes GIGS3002.class // Ellipsoids created from properties }) @RunWith(JUnit4.class) @FixMethodOrder(MethodSorters.JVM) // Intentionally want some randomness public final strictfp class GIGS2002 extends org.opengis.test.referencing.gigs.GIGS2002 { /** * A JUnit {@link Rule} for listening to log events. This field is public because JUnit requires us to * do so, but should be considered as an implementation details (it should have been a private field). */ @Rule public final LoggingWatcher loggings = new LoggingWatcher(Loggers.CRS_FACTORY); /** * Creates a new test using the default authority factory. */ public GIGS2002() { super(TestFactorySource.factory); } /** * Creates the factory to use for all tests in this class. * * @throws FactoryException if an error occurred while creating the factory. */ @BeforeClass public static void createFactory() throws FactoryException { TestFactorySource.createFactory(); } /** * Forces release of JDBC connections after the tests in this class. * * @throws FactoryException if an error occurred while closing the connections. */ @AfterClass public static void close() throws FactoryException { TestFactorySource.close(); } /** * Overrides the GeoAPI test for verifying the log messages emitted during the construction of deprecated objects. * * @throws FactoryException if an error occurred while creating the object. */ @Test @Override public void testClarkeMichigan() throws FactoryException { super.testClarkeMichigan(); loggings.assertNextLogContains("EPSG:7009"); } /** * Overrides the GeoAPI test for verifying the log messages emitted during the construction of deprecated objects. * * @throws FactoryException if an error occurred while creating the object. */ @Test @Override public void testPopularVisualisationSphere() throws FactoryException { super.testPopularVisualisationSphere(); loggings.assertNextLogContains("EPSG:7059"); } /** * Verifies that no unexpected warning has been emitted in this test. */ @After public void assertNoUnexpectedLog() { loggings.assertNoUnexpectedLog(); } }
apache-2.0
consulo/consulo-csharp
csharp-psi-impl/src/main/java/consulo/csharp/lang/parser/decl/TypeDeclarationParsing.java
2295
/* * Copyright 2013-2017 consulo.io * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package consulo.csharp.lang.parser.decl; import consulo.csharp.lang.parser.CSharpBuilderWrapper; import consulo.csharp.lang.parser.SharedParsingHelpers; import consulo.csharp.lang.psi.CSharpSoftTokens; import com.intellij.lang.PsiBuilder; import com.intellij.psi.tree.TokenSet; /** * @author VISTALL * @since 28.11.13. */ public class TypeDeclarationParsing extends SharedParsingHelpers { private static final TokenSet WHERE_SET = TokenSet.create(CSharpSoftTokens.WHERE_KEYWORD); public static void parse(CSharpBuilderWrapper builder, PsiBuilder.Marker marker) { boolean isEnum = builder.getTokenType() == ENUM_KEYWORD; builder.advanceLexer(); expectOrReportIdentifier(builder, STUB_SUPPORT); reportErrorUntil(builder, "Expected ':', '<', '{' or 'where'", TokenSet.create(COLON, LT, LBRACE), WHERE_SET); GenericParameterParsing.parseList(builder); reportErrorUntil(builder, "Expected ':', '{' or 'where'", TokenSet.create(COLON, LBRACE), WHERE_SET); if(builder.getTokenType() == COLON) { PsiBuilder.Marker mark = builder.mark(); builder.advanceLexer(); // colon if(parseTypeList(builder, STUB_SUPPORT)) { builder.error("Expected type"); } mark.done(EXTENDS_LIST); } reportErrorUntil(builder, "Expected '{' or 'where'", TokenSet.create(LBRACE), WHERE_SET); GenericParameterParsing.parseGenericConstraintList(builder); reportErrorUntil(builder, "Expected '{'", TokenSet.create(LBRACE), TokenSet.EMPTY); if(expect(builder, LBRACE, "'{' expected")) { DeclarationParsing.parseAll(builder, false, isEnum); expect(builder, RBRACE, "'}' expected"); expect(builder, SEMICOLON, null); } done(marker, TYPE_DECLARATION); } }
apache-2.0
tangzhimang/Weather
app/src/main/java/com/example/moon/weather/Bean/Basic.java
1420
package com.example.moon.weather.Bean; /** * Created by Administrator on 2016/4/1. */ public class Basic { private String city; private String cnty; private String id; private String lat; private String lon; private Update update; public String getCity() { return city; } public void setCity(String city) { this.city = city; } public String getCnty() { return cnty; } public void setCnty(String cnty) { this.cnty = cnty; } public String getId() { return id; } public void setId(String id) { this.id = id; } public String getLat() { return lat; } public void setLat(String lat) { this.lat = lat; } public String getLon() { return lon; } public void setLon(String lon) { this.lon = lon; } public Update getUpdate() { return update; } public void setUpdate(Update update) { this.update = update; } public static class Update { private String loc; private String utc; public String getLoc() { return loc; } public void setLoc(String loc) { this.loc = loc; } public String getUtc() { return utc; } public void setUtc(String utc) { this.utc = utc; } } }
apache-2.0
andy-goryachev/JsonPretty
src/goryachev/common/io/PersisterStreamRef.java
480
// Copyright © 2013-2020 Andy Goryachev <andy@goryachev.com> package goryachev.common.io; /** * Internal reference class used in serialization of lists, maps and tables * to deduplicate strings and other large values. * Should not be seen outside of this package. */ public class PersisterStreamRef { private final int index; public PersisterStreamRef(int index) { this.index = index; } public int getIndex() { return index; } }
apache-2.0
coolgeng/leetcode
examples/PalindromePartitioning/PalindromePartitioning.java
1437
import java.util.List; import java.util.ArrayList; import java.util.Map; import java.util.HashMap; class PalindromePartitioning { Map<String, List<List<String>>> cache = new HashMap<String, List<List<String>>>(); private boolean isPalindrome(String s) { for (int i = 0 ;i < s.length()/ 2; i++) { if (s.charAt(i) != s.charAt(s.length() - 1 -i)) { return false; } } return true; } public List<List<String>> partition(String s) { List<List<String>> ans = cache.get(s); if (ans != null) { return ans; } ans = new ArrayList<List<String>>(); for (int i = 1 ; i < s.length() ; i ++) { String prefix = s.substring(0, i); if (isPalindrome(prefix)) { for (List<String> subbands : partition(s.substring(i))) { List<String> temp = new ArrayList<String>(); temp.add(prefix); temp.addAll(subbands); ans.add(temp); } } } if (isPalindrome(s)) { List<String> temp = new ArrayList<String>(); temp.add(s); ans.add(temp); } cache.put(s, ans); return ans; } public static void main(String[] args) { PalindromePartitioning pp = new PalindromePartitioning(); String s = "aaabaac"; List<List<String>> ans = pp.partition(s); for (List<String> list: ans) { for (String str: list) { System.out.print(str + " "); } System.out.println(" "); } // String str1 = "1234567"; // System.out.println(str1.substring(1)); } }
apache-2.0
garywong89/PetStoreAPI
src/gen/java/io/swagger/api/UserApiService.java
1504
package io.swagger.api; import io.swagger.api.*; import io.swagger.model.*; import org.glassfish.jersey.media.multipart.FormDataContentDisposition; import java.util.List; import io.swagger.model.User; import java.util.List; import io.swagger.api.NotFoundException; import java.io.InputStream; import javax.ws.rs.core.Response; import javax.ws.rs.core.SecurityContext; @javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaJerseyServerCodegen", date = "2016-12-21T20:13:35.180Z") public abstract class UserApiService { public abstract Response createUser(User body,SecurityContext securityContext) throws NotFoundException; public abstract Response createUsersWithArrayInput(List<User> body,SecurityContext securityContext) throws NotFoundException; public abstract Response createUsersWithListInput(List<User> body,SecurityContext securityContext) throws NotFoundException; public abstract Response deleteUser(String username,SecurityContext securityContext) throws NotFoundException; public abstract Response getUserByName(String username,SecurityContext securityContext) throws NotFoundException; public abstract Response loginUser(String username,String password,SecurityContext securityContext) throws NotFoundException; public abstract Response logoutUser(SecurityContext securityContext) throws NotFoundException; public abstract Response updateUser(String username,User body,SecurityContext securityContext) throws NotFoundException; }
apache-2.0
torakiki/sambox
src/main/java/org/sejda/sambox/pdmodel/font/FontFormat.java
1075
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.sejda.sambox.pdmodel.font; /** * Font file format. * * @author John Hewson */ public enum FontFormat { /** * TrueType font. */ TTF, /** * OpenType font. */ OTF, /** * Type 1 (binary) font. */ PFB }
apache-2.0
pkarmstr/NYBC
solr-4.2.1/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/SolrStopwordsCarrot2LexicalDataFactory.java
5282
package org.apache.solr.handler.clustering.carrot2; /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.util.Collection; import java.util.Set; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.util.CharArraySet; import org.apache.lucene.analysis.util.TokenFilterFactory; import org.apache.lucene.analysis.commongrams.CommonGramsFilterFactory; import org.apache.lucene.analysis.core.StopFilterFactory; import org.apache.solr.analysis.TokenizerChain; import org.apache.solr.schema.IndexSchema; import org.carrot2.core.LanguageCode; import org.carrot2.core.attribute.Init; import org.carrot2.core.attribute.Processing; import org.carrot2.text.linguistic.DefaultLexicalDataFactory; import org.carrot2.text.linguistic.ILexicalData; import org.carrot2.text.linguistic.ILexicalDataFactory; import org.carrot2.text.util.MutableCharArray; import org.carrot2.util.attribute.Attribute; import org.carrot2.util.attribute.Bindable; import org.carrot2.util.attribute.Input; import org.slf4j.Logger; import com.google.common.collect.HashMultimap; import com.google.common.collect.Multimap; /** * An implementation of Carrot2's {@link ILexicalDataFactory} that adds stop * words from a field's StopFilter to the default stop words used in Carrot2, * for all languages Carrot2 supports. Completely replacing Carrot2 stop words * with Solr's wouldn't make much sense because clustering needs more aggressive * stop words removal. In other words, if something is a stop word during * indexing, then it should also be a stop word during clustering, but not the * other way round. */ @Bindable public class SolrStopwordsCarrot2LexicalDataFactory implements ILexicalDataFactory { final static Logger logger = org.slf4j.LoggerFactory .getLogger(SolrStopwordsCarrot2LexicalDataFactory.class); @Init @Input @Attribute(key = "solrIndexSchema") private IndexSchema schema; @Processing @Input @Attribute(key = "solrFieldNames") private Set<String> fieldNames; /** * A lazily-built cache of stop words per field. */ private Multimap<String, CharArraySet> solrStopWords = HashMultimap.create(); /** * Carrot2's default lexical resources to use in addition to Solr's stop * words. */ private DefaultLexicalDataFactory carrot2LexicalDataFactory = new DefaultLexicalDataFactory(); /** * Obtains stop words for a field from the associated * {@link StopFilterFactory}, if any. */ private Collection<CharArraySet> getSolrStopWordsForField(String fieldName) { // No need to synchronize here, Carrot2 ensures that instances // of this class are not used by multiple threads at a time. if (!solrStopWords.containsKey(fieldName)) { final Analyzer fieldAnalyzer = schema.getFieldType(fieldName) .getAnalyzer(); if (fieldAnalyzer instanceof TokenizerChain) { final TokenFilterFactory[] filterFactories = ((TokenizerChain) fieldAnalyzer) .getTokenFilterFactories(); for (TokenFilterFactory factory : filterFactories) { if (factory instanceof StopFilterFactory) { // StopFilterFactory holds the stop words in a CharArraySet solrStopWords.put(fieldName, ((StopFilterFactory) factory).getStopWords()); } if (factory instanceof CommonGramsFilterFactory) { solrStopWords.put(fieldName, ((CommonGramsFilterFactory) factory) .getCommonWords()); } } } } return solrStopWords.get(fieldName); } @Override public ILexicalData getLexicalData(LanguageCode languageCode) { final ILexicalData carrot2LexicalData = carrot2LexicalDataFactory .getLexicalData(languageCode); return new ILexicalData() { @Override public boolean isStopLabel(CharSequence word) { // Nothing in Solr maps to the concept of a stop label, // so return Carrot2's default here. return carrot2LexicalData.isStopLabel(word); } @Override public boolean isCommonWord(MutableCharArray word) { // Loop over the fields involved in clustering first for (String fieldName : fieldNames) { for (CharArraySet stopWords : getSolrStopWordsForField(fieldName)) { if (stopWords.contains(word)) { return true; } } } // Check default Carrot2 stop words too return carrot2LexicalData.isCommonWord(word); } }; } }
apache-2.0
aws/aws-sdk-java
aws-java-sdk-route53resolver/src/main/java/com/amazonaws/services/route53resolver/model/transform/DisassociateResolverQueryLogConfigRequestMarshaller.java
2604
/* * Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.route53resolver.model.transform; import javax.annotation.Generated; import com.amazonaws.SdkClientException; import com.amazonaws.services.route53resolver.model.*; import com.amazonaws.protocol.*; import com.amazonaws.annotation.SdkInternalApi; /** * DisassociateResolverQueryLogConfigRequestMarshaller */ @Generated("com.amazonaws:aws-java-sdk-code-generator") @SdkInternalApi public class DisassociateResolverQueryLogConfigRequestMarshaller { private static final MarshallingInfo<String> RESOLVERQUERYLOGCONFIGID_BINDING = MarshallingInfo.builder(MarshallingType.STRING) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("ResolverQueryLogConfigId").build(); private static final MarshallingInfo<String> RESOURCEID_BINDING = MarshallingInfo.builder(MarshallingType.STRING) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("ResourceId").build(); private static final DisassociateResolverQueryLogConfigRequestMarshaller instance = new DisassociateResolverQueryLogConfigRequestMarshaller(); public static DisassociateResolverQueryLogConfigRequestMarshaller getInstance() { return instance; } /** * Marshall the given parameter object. */ public void marshall(DisassociateResolverQueryLogConfigRequest disassociateResolverQueryLogConfigRequest, ProtocolMarshaller protocolMarshaller) { if (disassociateResolverQueryLogConfigRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(disassociateResolverQueryLogConfigRequest.getResolverQueryLogConfigId(), RESOLVERQUERYLOGCONFIGID_BINDING); protocolMarshaller.marshall(disassociateResolverQueryLogConfigRequest.getResourceId(), RESOURCEID_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } } }
apache-2.0
resty-gwt/resty-gwt
restygwt/src/main/java/org/fusesource/restygwt/client/AbstractNestedRequestCallback.java
2266
/** * Copyright (C) 2009-2012 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.fusesource.restygwt.client; import com.google.gwt.http.client.Request; import com.google.gwt.http.client.RequestCallback; import com.google.gwt.http.client.Response; public abstract class AbstractNestedRequestCallback implements RequestCallback { protected final Method method; protected RequestCallback requestCallback; public AbstractNestedRequestCallback(Method method, RequestCallback callback) { this.method = method; requestCallback = callback; } @Override public void onResponseReceived(Request request, Response response) { method.request = request; method.response = response; if (response == null) { requestCallback.onError(request, Defaults.getExceptionMapper().createNoResponseException()); } else if (isFailedStatus(response)) { doError(request, response); } else { doReceive(request, response); } } protected void doError(Request request, Response response) { requestCallback.onError(request, Defaults.getExceptionMapper().createFailedStatusException(method, response)); } protected abstract void doReceive(Request request, Response response); @Override public void onError(Request request, Throwable exception) { method.request = request; requestCallback.onError(request, exception); } protected boolean isFailedStatus(Response response) { return !method.isExpected(response.getStatusCode()); } }
apache-2.0
mobgen/halo-android
sdk/halo-sdk/src/main/java/com/mobgen/halo/android/sdk/core/management/device/DeviceRepository.java
6987
package com.mobgen.halo.android.sdk.core.management.device; import android.support.annotation.NonNull; import android.support.annotation.Nullable; import com.mobgen.halo.android.framework.common.exceptions.HaloParsingException; import com.mobgen.halo.android.framework.common.helpers.logger.Halog; import com.mobgen.halo.android.framework.common.utils.AssertionUtils; import com.mobgen.halo.android.framework.network.client.response.Parser; import com.mobgen.halo.android.framework.network.exceptions.HaloNetException; import com.mobgen.halo.android.framework.network.exceptions.HaloNotFoundException; import com.mobgen.halo.android.sdk.core.management.models.Device; import com.mobgen.halo.android.sdk.core.management.segmentation.HaloSegmentationTag; import java.util.List; /** * The device repository that interacts with all the device elements. */ public class DeviceRepository { /** * The parser. */ private Parser.Factory mParser; /** * The device local data source. */ private DeviceLocalDatasource mDeviceLocalDatasource; /** * The device remote data source. */ private DeviceRemoteDatasource mDeviceRemoteDatasource; /** * The cached device unique instance. */ private Device mCachedDevice; /** * Constructor for the repository. * * @param deviceLocalDatasource The local repository. * @param deviceRemoteDatasource The remote repository. */ public DeviceRepository(@NonNull Parser.Factory parser, @NonNull DeviceRemoteDatasource deviceRemoteDatasource, @NonNull DeviceLocalDatasource deviceLocalDatasource) { AssertionUtils.notNull(parser, "parser"); AssertionUtils.notNull(deviceRemoteDatasource, "deviceRemoteDatasource"); AssertionUtils.notNull(deviceLocalDatasource, "deviceLocalDatasource"); mParser = parser; mDeviceRemoteDatasource = deviceRemoteDatasource; mDeviceLocalDatasource = deviceLocalDatasource; } /** * Syncs the current device with the one in the cloud. Intended to be called on startup. * * @param tags The tags. * @return The device synchronized. * @throws HaloNetException Error while performing the network request. * @throws HaloParsingException Error while serializing the device. */ @NonNull public synchronized Device syncDevice(@NonNull List<HaloSegmentationTag> tags) throws HaloNetException, HaloParsingException { AssertionUtils.notNull(tags, "tags"); mCachedDevice = getCachedDevice(); if (!mCachedDevice.isAnonymous()) { try { mCachedDevice = mDeviceRemoteDatasource.getDevice(mCachedDevice); } catch (HaloNotFoundException e) { Halog.w(getClass(), "There is a cached device that is not present in the server. Creating a new one"); Halog.e(getClass(), "Creating new device", e); clearCachedDevice(); return syncDevice(tags); } } mCachedDevice.addTags(tags, true); return sendDevice(); } /** * Updates the device with the one in the server. * * @return The device returned. * @throws HaloNetException Network exception. * @throws HaloParsingException Parsing exception. */ @NonNull public synchronized Device sendDevice() throws HaloParsingException, HaloNetException { mCachedDevice = getCachedDevice(); try { mCachedDevice = mDeviceRemoteDatasource.updateDevice(mCachedDevice); } catch (HaloNotFoundException e) { Halog.w(getClass(), "There is a cached device that is not present in the server. Creating a new one"); Halog.e(getClass(), "Making the device anonymous", e); mCachedDevice.makeAnonymous(); return sendDevice(); } mDeviceLocalDatasource.cacheDevice(Device.serialize(mCachedDevice, mParser)); return mCachedDevice; } /** * Provides the cached device. If in memory this one is brought. Otherwise, the new device will be provided. * * @return The device. */ @NonNull public synchronized Device getCachedDevice() { if (mCachedDevice == null) { mCachedDevice = getAlwaysDevice(); } return mCachedDevice; } /** * Provides the in memory device. This is the only device that can be gotten from the memory. * * @return The device provided. */ @Nullable public Device getDeviceInMemory() { return mCachedDevice; } /** * Sets in a synchronized way the notifications token into the current device. * * @param notificationToken The notifications token. * @return True if the token has changed */ public synchronized boolean pushNotificationToken(@Nullable String notificationToken) { mCachedDevice = getCachedDevice(); boolean changed = mCachedDevice.getNotificationsToken() == null ? notificationToken != null : !mCachedDevice.getNotificationsToken().equals(notificationToken); mCachedDevice.setNotificationsToken(notificationToken); return changed; } /** * Adds the tags to the device. * * @param tags The tags. */ public synchronized void addTags(@Nullable List<HaloSegmentationTag> tags, boolean shouldOverrideTags) { mCachedDevice = getCachedDevice(); if (tags != null) { mCachedDevice.addTags(tags, shouldOverrideTags); } } /** * Remove the tags given the names. * * @param tagNames The tag names. */ public synchronized void removeTags(@Nullable List<String> tagNames) { mCachedDevice = getCachedDevice(); if (tagNames != null) { for (String tag : tagNames) { if (tag != null) { mCachedDevice.removeTag(new HaloSegmentationTag(tag, null)); } } } } /** * Provides always a device, even if it is an empty one. * * @return The device provided. */ private Device getAlwaysDevice() { Device resultingDevice = null; try { //Bring cached String cached = mDeviceLocalDatasource.getCachedDevice(); if (cached != null) { resultingDevice = Device.deserialize(cached, mParser); } } catch (HaloParsingException e) { //Just log the case Halog.e(getClass(), "The device stored was malformed. Overload it", e); } //Initialize if null for some reason if (resultingDevice == null) { resultingDevice = new Device(); } return resultingDevice; } /** * Clears the cached device. */ private void clearCachedDevice() { mCachedDevice = null; mDeviceLocalDatasource.clearCurrentDevice(); } }
apache-2.0
5hawnknight/hpa-design-example
src/main/java/com/knight/exmaple/handlers/StatusCodesDashboardHandler.java
282
package com.knight.exmaple.handlers; import com.knight.exmaple.pageobjects.StatusCodesDashboard; /** * Created by shawn knight on 5/29/15. * Email: shawn.knight.work@gmail.com */ public interface StatusCodesDashboardHandler { StatusCodesDashboard statusCodesDashboard(); }
apache-2.0
hamboomger/hackanton
src/main/java/com/hackanton/application/config/ApplicationStartConfig.java
778
package com.hackanton.application.config; import com.hackanton.user.Role; import com.hackanton.user.RoleRepository; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import javax.annotation.PostConstruct; /** * @author ddorochov */ @Component public class ApplicationStartConfig { private String[] roles = {"USER", "ADMIN"}; private final RoleRepository roleRepository; @Autowired public ApplicationStartConfig(RoleRepository roleRepository) { this.roleRepository = roleRepository; } @PostConstruct public void addRolesIfNeeded() { for(String roleName : roles) { Role role = roleRepository.findByName(roleName); if(role == null) { roleRepository.save(new Role(roleName)); } } } }
apache-2.0
456838/usefulCode
YHamburgGit/app/src/main/java/com/salton123/mengmei/controller/activity/EditConsigneeAty.java
6045
package com.salton123.mengmei.controller.activity; import android.os.Bundle; import android.view.View; import android.widget.CheckBox; import android.widget.EditText; import com.salton123.base.ActivityFrameIOS; import com.salton123.mengmei.model.bean.User; import com.salton123.common.net.HttpResponseHandler; import com.salton123.mengmei.R; import com.salton123.mengmei.model.engine.BmobDataEngine; import com.salton123.mengmei.model.bean.Consignee; import cn.bmob.v3.exception.BmobException; import cn.bmob.v3.listener.SaveListener; import cn.bmob.v3.listener.UpdateListener; /** * User: 巫金生(newSalton@outlook.com) * Date: 2016/2/13 14:12 * Time: 14:12 * Description: */ public class EditConsigneeAty extends ActivityFrameIOS { private EditText et_name, et_phoneNum, et_address; private CheckBox cb_default; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); AppendMainBody(R.layout.aty_edit_consignee); } @Override public void InitView() { et_name = (EditText) findViewById(R.id.et_name); et_phoneNum = (EditText) findViewById(R.id.et_phoneNum); et_address = (EditText) findViewById(R.id.et_address); cb_default = (CheckBox) findViewById(R.id.cb_default); SetTopAdditionImageListener(R.drawable.ic_delete, new View.OnClickListener() { @Override public void onClick(View v) { if (_Consignee != null) { _Consignee.delete(new UpdateListener() { @Override public void done(BmobException e) { if(e ==null){ setResult(RESULT_OK); EditConsigneeAty.this.finish(); }else { ShowToast("删除失败:" + e.getMessage()); } } }); } } }); } @Override public void InitListener() { findViewById(R.id.btn_save).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { if (_Consignee != null) { _Consignee.setAddress(et_address.getText().toString().trim()); _Consignee.setPhoneNum(et_phoneNum.getText().toString().trim()); _Consignee.setReceiverName(et_name.getText().toString().trim()); _Consignee.setIsdefault(cb_default.isChecked()); //将其他是默认的设置为非默认 BmobDataEngine.SetConsigneeListNotDefault(User.getCurrentUser().getObjectId(), new HttpResponseHandler<String>() { @Override public void onSuccess(String content) { _Consignee.update(new UpdateListener() { @Override public void done(BmobException e) { if (e ==null){ ShowToast("修改成功"); setResult(RESULT_OK); EditConsigneeAty.this.finish(); }else{ ShowToast("修改失败:" + e.getMessage()); } } }); } @Override public void onFailure(String content) { } }); } else { Consignee consignee = new Consignee(); consignee.setAddress(et_address.getText().toString().trim()); consignee.setPhoneNum(et_phoneNum.getText().toString().trim()); consignee.setReceiverName(et_name.getText().toString().trim()); consignee.setIsdefault(cb_default.isChecked()); consignee.setOwnerId(User.getCurrentUser().getObjectId()); BmobDataEngine.SetConsigneeListNotDefault(User.getCurrentUser().getObjectId(), new HttpResponseHandler<String>() { @Override public void onSuccess(String content) { // ShowToast("修改成功"); } @Override public void onFailure(String content) { // ShowToast("修改失败:" + content); } }); consignee.save(new SaveListener<String>() { @Override public void done(String s, BmobException e) { if (e ==null){ ShowToast("保存成功"); setResult(RESULT_OK); EditConsigneeAty.this.finish(); }else { ShowToast("保存失败:" + e.getMessage()); } } }); } } }); } Consignee _Consignee; @Override public void InitData() { try { _Consignee = (Consignee) getIntent().getExtras().get("consignee"); if (_Consignee != null) { et_name.setText(_Consignee.getReceiverName()); et_phoneNum.setText(_Consignee.getPhoneNum()); et_address.setText(_Consignee.getAddress()); HideTopAdditionalImage(); } } catch (Exception e) { } } }
apache-2.0
rabix/bunny
rabix-bindings/src/main/java/org/rabix/bindings/BindingWrongVersionException.java
428
package org.rabix.bindings; public class BindingWrongVersionException extends BindingException { private static final long serialVersionUID = -5493500881508866987L; public BindingWrongVersionException(Throwable t) { super(t); } public BindingWrongVersionException(String message) { super(message); } public BindingWrongVersionException(String message, Throwable t) { super(message, t); } }
apache-2.0
Uni-Sol/batik
sources/org/apache/batik/parser/FragmentIdentifierHandler.java
3449
/* Copyright 2000-2001 The Apache Software Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.apache.batik.parser; /** * This interface must be implemented and then registred as the * handler of a <code>PreserveAspectRatioParser</code> instance * in order to be notified of parsing events. * * @author <a href="mailto:stephane@hillion.org">Stephane Hillion</a> * @version $Id$ */ public interface FragmentIdentifierHandler extends PreserveAspectRatioHandler, TransformListHandler { /** * Invoked when the fragment identifier starts. * @exception ParseException if an error occured while processing the * fragment identifier */ void startFragmentIdentifier() throws ParseException; /** * Invoked when an ID has been parsed. * @param s The string that represents the parsed ID. * @exception ParseException if an error occured while processing the * fragment identifier */ void idReference(String s) throws ParseException; /** * Invoked when 'viewBox(x,y,width,height)' has been parsed. * @param x x coordinate of the viewbox * @param y y coordinate of the viewbox * @param width width of the viewbox * @param height height of the viewbox * @exception ParseException if an error occured while processing the * fragment identifier */ void viewBox(float x, float y, float width, float height) throws ParseException; /** * Invoked when a view target specification starts. * @exception ParseException if an error occured while processing the * fragment identifier */ void startViewTarget() throws ParseException; /** * Invoked when a identifier has been parsed within a view target * specification. * @param name the target name. * @exception ParseException if an error occured while processing the * fragment identifier */ void viewTarget(String name) throws ParseException; /** * Invoked when a view target specification ends. * @exception ParseException if an error occured while processing the * fragment identifier */ void endViewTarget() throws ParseException; /** * Invoked when a 'zoomAndPan' specification has been parsed. * @param magnify true if 'magnify' has been parsed. * @exception ParseException if an error occured while processing the * fragment identifier */ void zoomAndPan(boolean magnify); /** * Invoked when the fragment identifier ends. * @exception ParseException if an error occured while processing the * fragment identifier */ void endFragmentIdentifier() throws ParseException; }
apache-2.0
googleads/googleads-java-lib
examples/admanager_axis/src/main/java/admanager/axis/v202111/publisherquerylanguageservice/GetLineItemsNamedLike.java
6605
// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package admanager.axis.v202111.publisherquerylanguageservice; import static com.google.api.ads.common.lib.utils.Builder.DEFAULT_CONFIGURATION_FILENAME; import com.google.api.ads.admanager.axis.factory.AdManagerServices; import com.google.api.ads.admanager.axis.utils.v202111.Pql; import com.google.api.ads.admanager.axis.utils.v202111.StatementBuilder; import com.google.api.ads.admanager.axis.v202111.ApiError; import com.google.api.ads.admanager.axis.v202111.ApiException; import com.google.api.ads.admanager.axis.v202111.PublisherQueryLanguageServiceInterface; import com.google.api.ads.admanager.axis.v202111.ResultSet; import com.google.api.ads.admanager.lib.client.AdManagerSession; import com.google.api.ads.common.lib.auth.OfflineCredentials; import com.google.api.ads.common.lib.auth.OfflineCredentials.Api; import com.google.api.ads.common.lib.conf.ConfigurationLoadException; import com.google.api.ads.common.lib.exception.OAuthException; import com.google.api.ads.common.lib.exception.ValidationException; import com.google.api.ads.common.lib.utils.CsvFiles; import com.google.api.client.auth.oauth2.Credential; import java.io.File; import java.io.IOException; import java.rmi.RemoteException; /** * This example gets all line items which have a name beginning with "line item". This example may * take a while to run. * * <p>Credentials and properties in {@code fromFile()} are pulled from the "ads.properties" file. * See README for more info. */ public class GetLineItemsNamedLike { /** * Runs the example. * * @param adManagerServices the services factory. * @param session the session. * @throws ApiException if the API request failed with one or more service errors. * @throws RemoteException if the API request failed due to other errors. * @throws IOException if unable to write the response to a file. */ public static void runExample(AdManagerServices adManagerServices, AdManagerSession session) throws IOException { // Get the PublisherQueryLanguageService. PublisherQueryLanguageServiceInterface pqlService = adManagerServices.get(session, PublisherQueryLanguageServiceInterface.class); // Create statement to select all line items. StatementBuilder statementBuilder = new StatementBuilder() .select("Id, Name, Status") .from("Line_Item") .where("Name LIKE 'line item%'") .orderBy("Id ASC") .offset(0) .limit(StatementBuilder.SUGGESTED_PAGE_LIMIT); // Default for result sets. ResultSet combinedResultSet = null; ResultSet resultSet; int i = 0; do { // Get line items like 'line item%'. resultSet = pqlService.select(statementBuilder.toStatement()); // Combine result sets with previous ones. combinedResultSet = combinedResultSet == null ? resultSet : Pql.combineResultSets(combinedResultSet, resultSet); System.out.printf( "%d) %d line items beginning at offset %d were found.%n", i++, resultSet.getRows() == null ? 0 : resultSet.getRows().length, statementBuilder.getOffset()); statementBuilder.increaseOffsetBy(StatementBuilder.SUGGESTED_PAGE_LIMIT); } while (resultSet.getRows() != null && resultSet.getRows().length > 0); // Change to your file location. String filePath = File.createTempFile("Line-Items-Named-Like-", ".csv").toString(); // Write the result set to a CSV. CsvFiles.writeCsv(Pql.resultSetToStringArrayList(combinedResultSet), filePath); System.out.printf("Line items saved to: %s%n", filePath); } public static void main(String[] args) { AdManagerSession session; try { // Generate a refreshable OAuth2 credential. Credential oAuth2Credential = new OfflineCredentials.Builder() .forApi(Api.AD_MANAGER) .fromFile() .build() .generateCredential(); // Construct a AdManagerSession. session = new AdManagerSession.Builder().fromFile().withOAuth2Credential(oAuth2Credential).build(); } catch (ConfigurationLoadException cle) { System.err.printf( "Failed to load configuration from the %s file. Exception: %s%n", DEFAULT_CONFIGURATION_FILENAME, cle); return; } catch (ValidationException ve) { System.err.printf( "Invalid configuration in the %s file. Exception: %s%n", DEFAULT_CONFIGURATION_FILENAME, ve); return; } catch (OAuthException oe) { System.err.printf( "Failed to create OAuth credentials. Check OAuth settings in the %s file. " + "Exception: %s%n", DEFAULT_CONFIGURATION_FILENAME, oe); return; } AdManagerServices adManagerServices = new AdManagerServices(); try { runExample(adManagerServices, session); } catch (ApiException apiException) { // ApiException is the base class for most exceptions thrown by an API request. Instances // of this exception have a message and a collection of ApiErrors that indicate the // type and underlying cause of the exception. Every exception object in the admanager.axis // packages will return a meaningful value from toString // // ApiException extends RemoteException, so this catch block must appear before the // catch block for RemoteException. System.err.println("Request failed due to ApiException. Underlying ApiErrors:"); if (apiException.getErrors() != null) { int i = 0; for (ApiError apiError : apiException.getErrors()) { System.err.printf(" Error %d: %s%n", i++, apiError); } } } catch (RemoteException re) { System.err.printf("Request failed unexpectedly due to RemoteException: %s%n", re); } catch (IOException ioe) { System.err.printf("Example failed due to IOException: %s%n", ioe); } } }
apache-2.0
OpenNTF/org.openntf.domino
domino/externals/tinkerpop/src/main/java/com/tinkerpop/pipes/util/AbstractMetaPipe.java
638
package com.tinkerpop.pipes.util; import com.tinkerpop.pipes.AbstractPipe; import com.tinkerpop.pipes.Pipe; /** * @author Marko A. Rodriguez (http://markorodriguez.com) */ @SuppressWarnings({ "rawtypes" }) public abstract class AbstractMetaPipe<S, E> extends AbstractPipe<S, E> implements MetaPipe { public void enablePath(final boolean enable) { for (final Pipe pipe : this.getPipes()) { pipe.enablePath(enable); } super.enablePath(enable); } public void reset() { for (final Pipe pipe : this.getPipes()) { pipe.reset(); } super.reset(); } }
apache-2.0
Wikidata/Wikidata-Toolkit
wdtk-datamodel/src/test/java/org/wikidata/wdtk/datamodel/implementation/PropertyUpdateImplTest.java
4703
/* * #%L * Wikidata Toolkit Data Model * %% * Copyright (C) 2014 Wikidata Toolkit Developers * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package org.wikidata.wdtk.datamodel.implementation; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.wikidata.wdtk.datamodel.implementation.JsonTestUtils.producesJson; import static org.wikidata.wdtk.datamodel.implementation.JsonTestUtils.toJson; import java.util.Collections; import java.util.Map; import org.junit.jupiter.api.Test; import org.wikidata.wdtk.datamodel.helpers.Datamodel; import org.wikidata.wdtk.datamodel.helpers.PropertyUpdateBuilder; import org.wikidata.wdtk.datamodel.helpers.StatementUpdateBuilder; import org.wikidata.wdtk.datamodel.interfaces.AliasUpdate; import org.wikidata.wdtk.datamodel.interfaces.PropertyIdValue; import org.wikidata.wdtk.datamodel.interfaces.PropertyUpdate; import org.wikidata.wdtk.datamodel.interfaces.StatementUpdate; import org.wikidata.wdtk.datamodel.interfaces.TermUpdate; public class PropertyUpdateImplTest { private static final PropertyIdValue P1 = Datamodel.makeWikidataPropertyIdValue("P1"); private static final StatementUpdate STATEMENTS = StatementUpdateBuilder.create().remove("ID123").build(); private static final TermUpdate LABELS = LabeledDocumentUpdateImplTest.LABELS; private static final TermUpdate DESCRIPTIONS = TermedDocumentUpdateImplTest.DESCRIPTIONS; private static final AliasUpdate ALIAS = TermedDocumentUpdateImplTest.ALIAS; private static final Map<String, AliasUpdate> ALIASES = TermedDocumentUpdateImplTest.ALIASES; @Test public void testFields() { PropertyUpdate update = new PropertyUpdateImpl(P1, 123, LABELS, DESCRIPTIONS, ALIASES, STATEMENTS); assertEquals(P1, update.getEntityId()); assertEquals(123, update.getBaseRevisionId()); assertSame(LABELS, update.getLabels()); assertSame(DESCRIPTIONS, update.getDescriptions()); assertEquals(ALIASES, update.getAliases()); assertSame(STATEMENTS, update.getStatements()); } @Test public void testEmpty() { PropertyUpdate empty = new PropertyUpdateImpl(P1, 123, TermUpdate.EMPTY, TermUpdate.EMPTY, Collections.emptyMap(), StatementUpdate.EMPTY); assertTrue(empty.isEmpty()); PropertyUpdate nonempty = new PropertyUpdateImpl(P1, 123, TermUpdate.EMPTY, DESCRIPTIONS, Collections.emptyMap(), StatementUpdate.EMPTY); assertFalse(nonempty.isEmpty()); } @Test @SuppressWarnings("unlikely-arg-type") public void testEquality() { PropertyUpdate update = new PropertyUpdateImpl(P1, 123, LABELS, DESCRIPTIONS, ALIASES, STATEMENTS); assertFalse(update.equals(null)); assertFalse(update.equals(this)); assertTrue(update.equals(update)); assertTrue(update.equals(new PropertyUpdateImpl(P1, 123, LABELS, DESCRIPTIONS, ALIASES, STATEMENTS))); assertFalse(update.equals(new PropertyUpdateImpl(P1, 123, LABELS, TermUpdate.EMPTY, ALIASES, STATEMENTS))); } @Test public void testHashCode() { assertEquals( new PropertyUpdateImpl(P1, 123, LABELS, DESCRIPTIONS, ALIASES, STATEMENTS).hashCode(), new PropertyUpdateImpl(P1, 123, LABELS, DESCRIPTIONS, ALIASES, STATEMENTS).hashCode()); } @Test public void testJson() { assertThat( new PropertyUpdateImpl( P1, 123, TermUpdate.EMPTY, TermUpdate.EMPTY, Collections.emptyMap(), StatementUpdate.EMPTY), producesJson("{}")); assertThat(PropertyUpdateBuilder.forEntityId(P1).updateLabels(LABELS).build(), producesJson("{'labels':" + toJson(LABELS) + "}")); assertThat(PropertyUpdateBuilder.forEntityId(P1).updateDescriptions(DESCRIPTIONS).build(), producesJson("{'descriptions':" + toJson(LABELS) + "}")); assertThat(PropertyUpdateBuilder.forEntityId(P1).updateAliases("en", ALIAS).build(), producesJson("{'aliases':{'en':" + toJson(ALIAS) + "}}")); assertThat(PropertyUpdateBuilder.forEntityId(P1).updateStatements(STATEMENTS).build(), producesJson("{'claims':" + toJson(STATEMENTS) + "}")); } }
apache-2.0
OurFriendIrony/MediaNotifier
app/src/main/java/uk/co/ourfriendirony/medianotifier/clients/rawg/game/search/GameSearchPlatform.java
3203
package uk.co.ourfriendirony.medianotifier.clients.rawg.game.search; import com.fasterxml.jackson.annotation.JsonAnyGetter; import com.fasterxml.jackson.annotation.JsonAnySetter; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonPropertyOrder; import java.util.HashMap; import java.util.Map; @JsonInclude(JsonInclude.Include.NON_NULL) @JsonPropertyOrder({ "id", "name", "slug", "image", "year_end", "year_start", "games_count", "image_background" }) public class GameSearchPlatform { @JsonProperty("id") private Integer id; @JsonProperty("name") private String name; @JsonProperty("slug") private String slug; @JsonProperty("image") private Object image; @JsonProperty("year_end") private Object yearEnd; @JsonProperty("year_start") private Object yearStart; @JsonProperty("games_count") private Integer gamesCount; @JsonProperty("image_background") private String imageBackground; @JsonIgnore private Map<String, Object> additionalProperties = new HashMap<String, Object>(); @JsonProperty("id") public Integer getId() { return id; } @JsonProperty("id") public void setId(Integer id) { this.id = id; } @JsonProperty("name") public String getName() { return name; } @JsonProperty("name") public void setName(String name) { this.name = name; } @JsonProperty("slug") public String getSlug() { return slug; } @JsonProperty("slug") public void setSlug(String slug) { this.slug = slug; } @JsonProperty("image") public Object getImage() { return image; } @JsonProperty("image") public void setImage(Object image) { this.image = image; } @JsonProperty("year_end") public Object getYearEnd() { return yearEnd; } @JsonProperty("year_end") public void setYearEnd(Object yearEnd) { this.yearEnd = yearEnd; } @JsonProperty("year_start") public Object getYearStart() { return yearStart; } @JsonProperty("year_start") public void setYearStart(Object yearStart) { this.yearStart = yearStart; } @JsonProperty("games_count") public Integer getGamesCount() { return gamesCount; } @JsonProperty("games_count") public void setGamesCount(Integer gamesCount) { this.gamesCount = gamesCount; } @JsonProperty("image_background") public String getImageBackground() { return imageBackground; } @JsonProperty("image_background") public void setImageBackground(String imageBackground) { this.imageBackground = imageBackground; } @JsonAnyGetter public Map<String, Object> getAdditionalProperties() { return this.additionalProperties; } @JsonAnySetter public void setAdditionalProperty(String name, Object value) { this.additionalProperties.put(name, value); } }
apache-2.0
sayan801/snomedct-for-India-experiments
src-csno/Source/CSNOLib/ForCSNOLib/src/in/cdac/medinfo/csnotk/csnolib/agents/SNOMEDAgent.java
21219
/******************************************************************************* * Copyright 2014 Centre for Development of Advanced Computing(C-DAC), Pune * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package in.cdac.medinfo.csnotk.csnolib.agents; import in.cdac.medinfo.csnotk.csnolib.api.ISNOMEDAgent; import in.cdac.medinfo.csnotk.csnolib.api.SearchAgent; import in.cdac.medinfo.csnotk.csnolib.commons.Constants; import in.cdac.medinfo.csnotk.csnolib.commons.EnumSuffix; import in.cdac.medinfo.csnotk.csnolib.db.QueryManager; import in.cdac.medinfo.csnotk.csnolib.model.Concept; import in.cdac.medinfo.csnotk.csnolib.model.Description; import in.cdac.medinfo.csnotk.csnolib.util.CSNOLogger; import in.cdac.medinfo.csnotk.csnolib.util.PropertyReader; import java.io.IOException; import java.sql.SQLException; import java.util.LinkedHashSet; import java.util.List; import java.util.Properties; import java.util.Set; /** * A concrete implementation of {@link ISNOMEDAgent} * */ public class SNOMEDAgent implements ISNOMEDAgent { private static final long serialVersionUID = 1L; private SearchAgent searchAgent = new LuceneSearchAgent(); /** * @param matchTerm * @return both active and inactive descriptions */ @Override public Set<Description> search(String matchTerm) { //matchTerm = applyAsciiFoldingFilter(matchTerm); Properties properties = null; int allHits = 10; try { properties = PropertyReader.loadSystemProperties(); allHits = Integer.parseInt(properties.getProperty("all.hits")); } catch (IOException e) { //Get the Stack trace and form the exception message. StackTraceElement arrStackTraceElement[]; arrStackTraceElement = e.getStackTrace(); String strMessage = e.getClass() + Constants.NEW_LINE + Constants.CLASS_NAME + arrStackTraceElement[0].getClassName() + Constants.NEW_LINE + Constants.METHOD_NAME + arrStackTraceElement[0].getMethodName() + Constants.NEW_LINE + Constants.LINE_NUMBER + arrStackTraceElement[0].getLineNumber()+ Constants.NEW_LINE + Constants.MESSAGE_DESCRIPTION + e.getMessage(); //Log the Exception CSNOLogger.logException(strMessage); } Set<Description> searchedDescriptions = searchAgent.search(matchTerm,allHits); return searchedDescriptions; } /** * @param matchTerm * @return active concepts which have active descriptions only */ @Override public Set<Description> searchActiveTerm(String matchTerm) { Set<Description> output = new LinkedHashSet<Description>(); //matchTerm = applyAsciiFoldingFilter(matchTerm); Properties properties = null; int allHits = 10; try { properties = PropertyReader.loadSystemProperties(); allHits = Integer.parseInt(properties.getProperty("all.hits")); } catch (IOException e) { //Get the Stack trace and form the exception message. StackTraceElement arrStackTraceElement[]; arrStackTraceElement = e.getStackTrace(); String strMessage = e.getClass() + Constants.NEW_LINE + Constants.CLASS_NAME + arrStackTraceElement[0].getClassName() + Constants.NEW_LINE + Constants.METHOD_NAME + arrStackTraceElement[0].getMethodName() + Constants.NEW_LINE + Constants.LINE_NUMBER + arrStackTraceElement[0].getLineNumber()+ Constants.NEW_LINE + Constants.MESSAGE_DESCRIPTION + e.getMessage(); //Log the Exception CSNOLogger.logException(strMessage); } Set<Description> searchedDescriptions = searchAgent.search(matchTerm,allHits); try{ if(null != searchedDescriptions) { for(Description description : searchedDescriptions) { if(1==description.getStatus()&& 1 == new Concept(description.getConceptId()).getActiveStatus()){ output.add(description); } } } } catch (SQLException e) { //Get the Stack trace and form the exception message. StackTraceElement arrStackTraceElement[]; arrStackTraceElement = e.getStackTrace(); String strMessage = e.getClass() + Constants.NEW_LINE + Constants.CLASS_NAME + arrStackTraceElement[0].getClassName() + Constants.NEW_LINE + Constants.METHOD_NAME + arrStackTraceElement[0].getMethodName() + Constants.NEW_LINE + Constants.LINE_NUMBER + arrStackTraceElement[0].getLineNumber()+ Constants.NEW_LINE + Constants.MESSAGE_DESCRIPTION + e.getMessage(); //Log the Exception CSNOLogger.logException(strMessage); } return output; } /** * @param id *@return concept */ @Override public Concept getConcept(String id) { if(null == id) return null; Concept concept = null; try { concept = new Concept(id); } catch (SQLException e) { //Get the Stack trace and form the exception message. StackTraceElement arrStackTraceElement[]; arrStackTraceElement = e.getStackTrace(); String strMessage = e.getClass() + Constants.NEW_LINE + Constants.CLASS_NAME + arrStackTraceElement[0].getClassName() + Constants.NEW_LINE + Constants.METHOD_NAME + arrStackTraceElement[0].getMethodName() + Constants.NEW_LINE + Constants.LINE_NUMBER + arrStackTraceElement[0].getLineNumber()+ Constants.NEW_LINE + Constants.MESSAGE_DESCRIPTION + e.getMessage(); //Log the Exception CSNOLogger.logException(strMessage); return null; } return concept; } /** * @param matchTerm * @param suffix * @return descriptions */ @Override public Set<Description> search(String matchTerm, EnumSuffix suffix) { Properties properties = null; int allHits = 10; try { properties = PropertyReader.loadSystemProperties(); allHits = Integer.parseInt(properties.getProperty("all.hits")); } catch (IOException e) { //Get the Stack trace and form the exception message. StackTraceElement arrStackTraceElement[]; arrStackTraceElement = e.getStackTrace(); String strMessage = e.getClass() + Constants.NEW_LINE + Constants.CLASS_NAME + arrStackTraceElement[0].getClassName() + Constants.NEW_LINE + Constants.METHOD_NAME + arrStackTraceElement[0].getMethodName() + Constants.NEW_LINE + Constants.LINE_NUMBER + arrStackTraceElement[0].getLineNumber()+ Constants.NEW_LINE + Constants.MESSAGE_DESCRIPTION + e.getMessage(); //Log the Exception CSNOLogger.logException(strMessage);; } Set<Description> searchedDescriptions = searchAgent.search(matchTerm, suffix, allHits); return searchedDescriptions; } /** * @param suffix * @return descriptions */ @Override public Set<Description> search(EnumSuffix suffix) { Properties properties = null; int allHits = 10; try { properties = PropertyReader.loadSystemProperties(); allHits = Integer.parseInt(properties.getProperty("all.hits")); } catch (IOException e) { //Get the Stack trace and form the exception message. StackTraceElement arrStackTraceElement[]; arrStackTraceElement = e.getStackTrace(); String strMessage = e.getClass() + Constants.NEW_LINE + Constants.CLASS_NAME + arrStackTraceElement[0].getClassName() + Constants.NEW_LINE + Constants.METHOD_NAME + arrStackTraceElement[0].getMethodName() + Constants.NEW_LINE + Constants.LINE_NUMBER + arrStackTraceElement[0].getLineNumber()+ Constants.NEW_LINE + Constants.MESSAGE_DESCRIPTION + e.getMessage(); //Log the Exception CSNOLogger.logException(strMessage); } Set<Description> searchedDescriptions = searchAgent.search(suffix, allHits); return searchedDescriptions; } /** * @param suffix * @param root * @return concepts */ @Override public List<Concept> search(EnumSuffix suffix, int root) { return null; } /** *@param matchTerm *@return both active and inactive suggestions */ @Override public Set<String> getSuggestions(String matchTerm) { //matchTerm = applyAsciiFoldingFilter(matchTerm); Set<String> output = new LinkedHashSet<String>(); //This block added for not returning any suggestions if ConceptID/Desc ID is input by user /** * JAVA SCRIPT LONG LIMIT: +/-9007199254740992 * JAVA LONG LIMIT: +/- 9223372036854775807 * Sample SNOMED ID: 900000000000441000 */ try { if(null != matchTerm ) { matchTerm = matchTerm.trim(); if(!matchTerm.isEmpty()) { Long.parseLong(matchTerm); return output; } } } catch (NumberFormatException e) { //Get the Stack trace and form the exception message. StackTraceElement arrStackTraceElement[]; arrStackTraceElement = e.getStackTrace(); String strMessage = e.getClass() + Constants.NEW_LINE + Constants.CLASS_NAME + arrStackTraceElement[0].getClassName() + Constants.NEW_LINE + Constants.METHOD_NAME + arrStackTraceElement[0].getMethodName() + Constants.NEW_LINE + Constants.LINE_NUMBER + arrStackTraceElement[0].getLineNumber()+ Constants.NEW_LINE + Constants.MESSAGE_DESCRIPTION + e.getMessage(); //Log the Exception CSNOLogger.logException(strMessage); } Properties properties = null; int topHits = -1; int allHits = -1; try { properties = PropertyReader.loadSystemProperties(); topHits = Integer.parseInt(properties.getProperty("top.suggest.hits")); allHits = Integer.parseInt(properties.getProperty("all.suggest.hits")); } catch (IOException e) { //Get the Stack trace and form the exception message. StackTraceElement arrStackTraceElement[]; arrStackTraceElement = e.getStackTrace(); String strMessage = e.getClass() + Constants.NEW_LINE + Constants.CLASS_NAME + arrStackTraceElement[0].getClassName() + Constants.NEW_LINE + Constants.METHOD_NAME + arrStackTraceElement[0].getMethodName() + Constants.NEW_LINE + Constants.LINE_NUMBER + arrStackTraceElement[0].getLineNumber()+ Constants.NEW_LINE + Constants.MESSAGE_DESCRIPTION + e.getMessage(); //Log the Exception CSNOLogger.logException(strMessage); } Set<Description> descriptions = searchAgent.search(matchTerm, allHits); if(null != descriptions) { for(Description description : descriptions) { if(output.size()>= topHits) { break; } output.add(description.getTerm()); } } return output; } /** *@param matchTerm *@return active suggestions only */ @Override public Set<String> getActiveSuggestions(String matchTerm) { //matchTerm = applyAsciiFoldingFilter(matchTerm); Set<String> output = new LinkedHashSet<String>(); //This block added for not returning any suggestions if ConceptID/Desc ID is input by user /** * JAVA SCRIPT LONG LIMIT: +/-9007199254740992 * JAVA LONG LIMIT: +/- 9223372036854775807 * Sample SNOMED ID: 900000000000441000 */ try { if(null != matchTerm ) { matchTerm = matchTerm.trim(); if(!matchTerm.isEmpty()) { Long.parseLong(matchTerm); return output; } } } catch (NumberFormatException e) { //Get the Stack trace and form the exception message. StackTraceElement arrStackTraceElement[]; arrStackTraceElement = e.getStackTrace(); String strMessage = e.getClass() + Constants.NEW_LINE + Constants.CLASS_NAME + arrStackTraceElement[0].getClassName() + Constants.NEW_LINE + Constants.METHOD_NAME + arrStackTraceElement[0].getMethodName() + Constants.NEW_LINE + Constants.LINE_NUMBER + arrStackTraceElement[0].getLineNumber()+ Constants.NEW_LINE + Constants.MESSAGE_DESCRIPTION + e.getMessage(); //Log the Exception CSNOLogger.logException(strMessage); } Properties properties = null; int topHits = -1; int allHits = -1; try { properties = PropertyReader.loadSystemProperties(); topHits = Integer.parseInt(properties.getProperty("top.suggest.hits")); allHits = Integer.parseInt(properties.getProperty("all.suggest.hits")); } catch (IOException e) { //Get the Stack trace and form the exception message. StackTraceElement arrStackTraceElement[]; arrStackTraceElement = e.getStackTrace(); String strMessage = e.getClass() + Constants.NEW_LINE + Constants.CLASS_NAME + arrStackTraceElement[0].getClassName() + Constants.NEW_LINE + Constants.METHOD_NAME + arrStackTraceElement[0].getMethodName() + Constants.NEW_LINE + Constants.LINE_NUMBER + arrStackTraceElement[0].getLineNumber()+ Constants.NEW_LINE + Constants.MESSAGE_DESCRIPTION + e.getMessage(); //Log the Exception CSNOLogger.logException(strMessage); } Set<Description> descriptions = searchAgent.search(matchTerm, allHits); try { if(null != descriptions) { for(Description description : descriptions) { if(output.size()>= topHits) { break; } if(1== description.getStatus() && 1== new Concept(description.getConceptId()).getActiveStatus()) { output.add(description.getTerm()); } } } } catch (SQLException e) { //Get the Stack trace and form the exception message. StackTraceElement arrStackTraceElement[]; arrStackTraceElement = e.getStackTrace(); String strMessage = e.getClass() + Constants.NEW_LINE + Constants.CLASS_NAME + arrStackTraceElement[0].getClassName() + Constants.NEW_LINE + Constants.METHOD_NAME + arrStackTraceElement[0].getMethodName() + Constants.NEW_LINE + Constants.LINE_NUMBER + arrStackTraceElement[0].getLineNumber()+ Constants.NEW_LINE + Constants.MESSAGE_DESCRIPTION + e.getMessage(); //Log the Exception CSNOLogger.logException(strMessage); } return output; } /** * @param ids * @return concepts */ @Override public List<Concept> getConcepts(List<String> ids) { if(null == ids) return null; List<Concept> concepts = null; QueryManager qm = new QueryManager(); try { concepts = qm.getConcepts(ids); } catch (SQLException e) { //Get the Stack trace and form the exception message. StackTraceElement arrStackTraceElement[]; arrStackTraceElement = e.getStackTrace(); String strMessage = e.getClass() + Constants.NEW_LINE + Constants.CLASS_NAME + arrStackTraceElement[0].getClassName() + Constants.NEW_LINE + Constants.METHOD_NAME + arrStackTraceElement[0].getMethodName() + Constants.NEW_LINE + Constants.LINE_NUMBER + arrStackTraceElement[0].getLineNumber()+ Constants.NEW_LINE + Constants.MESSAGE_DESCRIPTION + e.getMessage(); //Log the Exception CSNOLogger.logException(strMessage); } return concepts; } /** * @param ids * @return concepts */ @Override public List<Concept> getSortedConcepts(List<String> ids) { if(null == ids) return null; List<Concept> concepts = null; QueryManager qm = new QueryManager(); try { concepts = qm.getSortedConcepts(ids); } catch (SQLException e) { //Get the Stack trace and form the exception message. StackTraceElement arrStackTraceElement[]; arrStackTraceElement = e.getStackTrace(); String strMessage = e.getClass() + Constants.NEW_LINE + Constants.CLASS_NAME + arrStackTraceElement[0].getClassName() + Constants.NEW_LINE + Constants.METHOD_NAME + arrStackTraceElement[0].getMethodName() + Constants.NEW_LINE + Constants.LINE_NUMBER + arrStackTraceElement[0].getLineNumber()+ Constants.NEW_LINE + Constants.MESSAGE_DESCRIPTION + e.getMessage(); //Log the Exception CSNOLogger.logException(strMessage); } return concepts; } @Override public Set<Concept> getAllDescendentConcepts(String id) { if(null == id) return null; else { try { QueryManager qm = new QueryManager(); return qm.getAllDescendentConcepts(id); } catch (SQLException e) { //Get the Stack trace and form the exception message. StackTraceElement arrStackTraceElement[]; arrStackTraceElement = e.getStackTrace(); String strMessage = e.getClass() + Constants.NEW_LINE + Constants.CLASS_NAME + arrStackTraceElement[0].getClassName() + Constants.NEW_LINE + Constants.METHOD_NAME + arrStackTraceElement[0].getMethodName() + Constants.NEW_LINE + Constants.LINE_NUMBER + arrStackTraceElement[0].getLineNumber()+ Constants.NEW_LINE + Constants.MESSAGE_DESCRIPTION + e.getMessage(); //Log the Exception CSNOLogger.logException(strMessage); } } return null; } // @Override // public Set<Concept> getAllDescendentConcepts(String id) // { // if(null == id) // return null; // Set<Concept> lstMasterconcepts = new HashSet<Concept>(); // // Concept concept = getConcept(id.trim()); // Set<Relationship> sets = concept.getRelationshipsByType(RelationshipTypes.IS_A.getValue(),true); // if(sets.size() > 0) // { // List<String> relationConceptId = new ArrayList<String>(); // for(Relationship relationship : sets) // { // relationConceptId.add(relationship.getSourceConceptId()); // } // List<Concept> tmpConceptlst = getConcepts(relationConceptId); // ExecutorService service = Executors.newCachedThreadPool();//.newFixedThreadPool(tmpConceptlst.size());//.newSingleThreadExecutor(); // //callableDescedants objDescedants; // List<Future<Set<Concept>>> list = new ArrayList<Future<Set<Concept>>>(); // // for(Concept chdConcept : tmpConceptlst) // { // list.add(service.submit(new callableDescedants(chdConcept.getId()))); // } // // // for (Future<Set<Concept>> future : list) // { // try // { // lstMasterconcepts.addAll(future.get()); // //System.out.println(lstMasterconcepts.size()); // } // catch (InterruptedException e) // { // e.printStackTrace(); // } // catch (ExecutionException e) // { // e.printStackTrace(); // } // } // lstMasterconcepts.addAll(tmpConceptlst); // service.shutdown(); // return lstMasterconcepts; // } // else // return lstMasterconcepts; // // } // class callableDescedants implements Callable<Set<Concept>> // { // private String id = null; // // public callableDescedants(String id){ // this.id = id; // } // // @Override // public Set<Concept> call() throws Exception // { // //Concept concept = ; // return _getAllDescendentConcepts(getConcept(id.trim())); // } // // Set<Concept> _getAllDescendentConcepts(Concept concept) // { // Set<Concept> lstMasterconcepts = new HashSet<Concept>(); // Set<Relationship> sets = concept.getRelationshipsByType(RelationshipTypes.IS_A.getValue(),true); // if(sets.size() > 0) // { // List<String> relationConceptId = new ArrayList<String>(); // for(Relationship relationship : sets) // { // relationConceptId.add(relationship.getSourceConceptId()); // } // // List<Concept> tmpConceptlst = getConcepts(relationConceptId); // for (Concept chdConcept : tmpConceptlst) // { // lstMasterconcepts.addAll(_getAllDescendentConcepts(chdConcept)); // } // lstMasterconcepts.addAll(tmpConceptlst); // // //System.out.println("Returning : " + lstMasterconcepts.size() + " Thread " +Thread.currentThread().toString() ); // return lstMasterconcepts; // } // else // return lstMasterconcepts; // } // // } // private String applyAsciiFoldingFilter(String matchTerm) // { // String targetMatchTerm = null; // Analyzer objAnalyzer = new Analyzer() // { // @Override // protected TokenStreamComponents createComponents(String arg0, Reader reader) // { // Tokenizer source = new StandardTokenizer(Version.LUCENE_47, reader); // TokenStream filter = new ASCIIFoldingFilter(source);//new StandardTokenizer(Version.LUCENE_47, reader); // filter = new StandardFilter(Version.LUCENE_47, filter); // return new TokenStreamComponents(source, filter); // } // }; // // try // { // // TokenStream stream = objAnalyzer.tokenStream(null, new StringReader(matchTerm)); // // get the CharTermAttribute from the TokenStream // CharTermAttribute termAtt = stream.addAttribute(CharTermAttribute.class); // stream.reset(); // // print all tokens until stream is exhausted // while (stream.incrementToken()) // { // if(targetMatchTerm == null) // targetMatchTerm = termAtt.toString(); // else // targetMatchTerm +=" "+termAtt.toString(); // } // stream.end(); // stream.close(); // } // catch (IOException e) // { // //Get the Stack trace and form the exception message. // StackTraceElement arrStackTraceElement[]; // arrStackTraceElement = e.getStackTrace(); // String strMessage = e.getClass() + Constants.NEW_LINE + Constants.CLASS_NAME + arrStackTraceElement[0].getClassName() + Constants.NEW_LINE + Constants.METHOD_NAME + arrStackTraceElement[0].getMethodName() + Constants.NEW_LINE + Constants.LINE_NUMBER + arrStackTraceElement[0].getLineNumber()+ Constants.NEW_LINE + Constants.MESSAGE_DESCRIPTION + e.getMessage(); // //Log the Exception // CSNOLogger.logException(strMessage); // return null; // } // return targetMatchTerm; // } }
apache-2.0
mangobin/SSNOC
src/main/java/edu/cmu/sv/ws/ssnoc/data/dao/IUserDAO.java
995
package edu.cmu.sv.ws.ssnoc.data.dao; import java.util.List; import edu.cmu.sv.ws.ssnoc.data.po.UserPO; /** * Interface specifying the contract that all implementations will implement to * provide persistence of User information in the system. * */ public interface IUserDAO { /** * This method will save the information of the user into the database. * * @param userPO * - User information to be saved. */ long save(UserPO userPO); /** * This method will load all the users in the * database. * * @return - List of all users. */ List<UserPO> loadUsers(); /** * This method with search for a user by his userName in the database. The * search performed is a case insensitive search to allow case mismatch * situations. * * @param userName * - User name to search for. * * @return - UserPO with the user information if a match is found. */ UserPO findByName(String userName); UserPO findByUserID(long userID); }
apache-2.0
Orange-OpenSource/elpaaso-core
cloud-paas-db/src/test/java/liquibase/database/core/DbaasPostgresDatabase.java
1982
/** * Copyright (C) 2015 Orange * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package liquibase.database.core; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Set; /** * This class extends liquibase support of Postgresql Database for postgresql databases created with DBaaS<br> * By default liquibase assumes that all objects created in public schema are owned by the connected user<br> * But in postgresql databases created by DBaaS, some 'system' objects are created in public schema but are are not owned by the user<br> * This implementation enables to avoid those objects from being dropped by liquibase dropAll command used in tests<br> */ public class DbaasPostgresDatabase extends PostgresDatabase { private static Logger LOGGER = LoggerFactory.getLogger(DbaasPostgresDatabase.class); public DbaasPostgresDatabase() { super(); LOGGER.info("Creating DbaasPostgresDatabase"); } @Override public int getPriority() { return getHigherPriorityToReplaceDefaultPosgresqlDbImpl(); } private int getHigherPriorityToReplaceDefaultPosgresqlDbImpl() { return PRIORITY_DATABASE; } @Override public Set<String> getSystemViews() { LOGGER.debug("getSystemViews from DbaasPostgresDatabase - adding pg_stat_statements") ; Set<String> systemViews = super.getSystemViews(); systemViews.add("pg_stat_statements"); return systemViews; } }
apache-2.0
sdinot/hipparchus
hipparchus-geometry/src/test/java/org/hipparchus/geometry/spherical/twod/SubCircleTest.java
6924
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * This is not the original file distributed by the Apache Software Foundation * It has been modified by the Hipparchus project */ package org.hipparchus.geometry.spherical.twod; import org.hipparchus.geometry.euclidean.threed.Rotation; import org.hipparchus.geometry.euclidean.threed.Vector3D; import org.hipparchus.geometry.partitioning.RegionFactory; import org.hipparchus.geometry.partitioning.Side; import org.hipparchus.geometry.partitioning.SubHyperplane.SplitSubHyperplane; import org.hipparchus.geometry.spherical.oned.ArcsSet; import org.hipparchus.geometry.spherical.oned.Sphere1D; import org.hipparchus.util.MathUtils; import org.junit.Assert; import org.junit.Test; public class SubCircleTest { @Test public void testFullCircle() { Circle circle = new Circle(Vector3D.PLUS_K, 1.0e-10); SubCircle set = circle.wholeHyperplane(); Assert.assertEquals(MathUtils.TWO_PI, set.getSize(), 1.0e-10); Assert.assertTrue(circle == set.getHyperplane()); Assert.assertTrue(circle != set.copySelf().getHyperplane()); } @Test public void testSide() { Circle xzPlane = new Circle(Vector3D.PLUS_J, 1.0e-10); SubCircle sc1 = create(Vector3D.PLUS_K, Vector3D.PLUS_I, Vector3D.PLUS_J, 1.0e-10, 1.0, 3.0, 5.0, 6.0); Assert.assertEquals(Side.BOTH, sc1.split(xzPlane).getSide()); SubCircle sc2 = create(Vector3D.PLUS_K, Vector3D.PLUS_I, Vector3D.PLUS_J, 1.0e-10, 1.0, 3.0); Assert.assertEquals(Side.MINUS, sc2.split(xzPlane).getSide()); SubCircle sc3 = create(Vector3D.PLUS_K, Vector3D.PLUS_I, Vector3D.PLUS_J, 1.0e-10, 5.0, 6.0); Assert.assertEquals(Side.PLUS, sc3.split(xzPlane).getSide()); SubCircle sc4 = create(Vector3D.PLUS_J, Vector3D.PLUS_K, Vector3D.PLUS_I, 1.0e-10, 5.0, 6.0); Assert.assertEquals(Side.HYPER, sc4.split(xzPlane).getSide()); SubCircle sc5 = create(Vector3D.MINUS_J, Vector3D.PLUS_I, Vector3D.PLUS_K, 1.0e-10, 5.0, 6.0); Assert.assertEquals(Side.HYPER, sc5.split(xzPlane).getSide()); } @Test public void testSPlit() { Circle xzPlane = new Circle(Vector3D.PLUS_J, 1.0e-10); SubCircle sc1 = create(Vector3D.PLUS_K, Vector3D.PLUS_I, Vector3D.PLUS_J, 1.0e-10, 1.0, 3.0, 5.0, 6.0); SplitSubHyperplane<Sphere2D> split1 = sc1.split(xzPlane); ArcsSet plus1 = (ArcsSet) ((SubCircle) split1.getPlus()).getRemainingRegion(); ArcsSet minus1 = (ArcsSet) ((SubCircle) split1.getMinus()).getRemainingRegion(); Assert.assertEquals(1, plus1.asList().size()); Assert.assertEquals(5.0, plus1.asList().get(0).getInf(), 1.0e-10); Assert.assertEquals(6.0, plus1.asList().get(0).getSup(), 1.0e-10); Assert.assertEquals(1, minus1.asList().size()); Assert.assertEquals(1.0, minus1.asList().get(0).getInf(), 1.0e-10); Assert.assertEquals(3.0, minus1.asList().get(0).getSup(), 1.0e-10); SubCircle sc2 = create(Vector3D.PLUS_K, Vector3D.PLUS_I, Vector3D.PLUS_J, 1.0e-10, 1.0, 3.0); SplitSubHyperplane<Sphere2D> split2 = sc2.split(xzPlane); Assert.assertNull(split2.getPlus()); ArcsSet minus2 = (ArcsSet) ((SubCircle) split2.getMinus()).getRemainingRegion(); Assert.assertEquals(1, minus2.asList().size()); Assert.assertEquals(1.0, minus2.asList().get(0).getInf(), 1.0e-10); Assert.assertEquals(3.0, minus2.asList().get(0).getSup(), 1.0e-10); SubCircle sc3 = create(Vector3D.PLUS_K, Vector3D.PLUS_I, Vector3D.PLUS_J, 1.0e-10, 5.0, 6.0); SplitSubHyperplane<Sphere2D> split3 = sc3.split(xzPlane); ArcsSet plus3 = (ArcsSet) ((SubCircle) split3.getPlus()).getRemainingRegion(); Assert.assertEquals(1, plus3.asList().size()); Assert.assertEquals(5.0, plus3.asList().get(0).getInf(), 1.0e-10); Assert.assertEquals(6.0, plus3.asList().get(0).getSup(), 1.0e-10); Assert.assertNull(split3.getMinus()); SubCircle sc4 = create(Vector3D.PLUS_J, Vector3D.PLUS_K, Vector3D.PLUS_I, 1.0e-10, 5.0, 6.0); SplitSubHyperplane<Sphere2D> split4 = sc4.split(xzPlane); Assert.assertEquals(Side.HYPER, sc4.split(xzPlane).getSide()); Assert.assertNull(split4.getPlus()); Assert.assertNull(split4.getMinus()); SubCircle sc5 = create(Vector3D.MINUS_J, Vector3D.PLUS_I, Vector3D.PLUS_K, 1.0e-10, 5.0, 6.0); SplitSubHyperplane<Sphere2D> split5 = sc5.split(xzPlane); Assert.assertEquals(Side.HYPER, sc5.split(xzPlane).getSide()); Assert.assertNull(split5.getPlus()); Assert.assertNull(split5.getMinus()); } @Test public void testSideSplitConsistency() { double tolerance = 1.0e-6; Circle hyperplane = new Circle(new Vector3D(9.738804529764676E-5, -0.6772824575010357, -0.7357230887208355), tolerance); SubCircle sub = new SubCircle(new Circle(new Vector3D(2.1793884139073498E-4, 0.9790647032675541, -0.20354915700704285), tolerance), new ArcsSet(4.7121441684170700, 4.7125386635004760, tolerance)); SplitSubHyperplane<Sphere2D> split = sub.split(hyperplane); Assert.assertNotNull(split.getMinus()); Assert.assertNull(split.getPlus()); Assert.assertEquals(Side.MINUS, sub.split(hyperplane).getSide()); } private SubCircle create(Vector3D pole, Vector3D x, Vector3D y, double tolerance, double ... limits) { RegionFactory<Sphere1D> factory = new RegionFactory<Sphere1D>(); Circle circle = new Circle(pole, tolerance); Circle phased = (Circle) Circle.getTransform(new Rotation(circle.getXAxis(), circle.getYAxis(), x, y)).apply(circle); ArcsSet set = (ArcsSet) factory.getComplement(new ArcsSet(tolerance)); for (int i = 0; i < limits.length; i += 2) { set = (ArcsSet) factory.union(set, new ArcsSet(limits[i], limits[i + 1], tolerance)); } return new SubCircle(phased, set); } }
apache-2.0
markhobson/microbrowser
spi/src/main/java/org/hobsoft/microbrowser/spi/CheckableControl.java
1047
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.hobsoft.microbrowser.spi; import org.hobsoft.microbrowser.Control; /** * Defines an HTML form checkbox or radio control. */ public interface CheckableControl extends Control { /** * Gets the value of this control in its checked state. * * @return the form control checked value */ String getCheckedValue(); /** * Gets whether this control can be unchecked. * * @return {@code true} if this control can be unchecked */ boolean isUncheckable(); }
apache-2.0
arnozhang/Android-SlideSupport-ListLayouts
sources/android-slidesupport-listlayouts/src/main/java/com/straw/library/slide/handler/DelayTimeSlideHandler.java
2202
/* * Copyright (C) 2015 Arno Zhang * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.straw.library.slide.handler; import android.os.Handler; import android.os.Looper; import com.straw.library.slide.support.SlideMode; import com.straw.library.slide.support.SlideSupportLayout; public class DelayTimeSlideHandler extends SlideBaseHandler { private int mDelayMillSeconds; private Handler mUIHandler; public DelayTimeSlideHandler() { } public DelayTimeSlideHandler(int delayMillSeconds) { mDelayMillSeconds = delayMillSeconds; } public void setDelayMillSeconds(int delayMillSeconds) { mDelayMillSeconds = delayMillSeconds; } private void ensureUIHandler() { if (mUIHandler == null) { mUIHandler = new Handler(Looper.getMainLooper()); } } @Override public void onSlide(SlideSupportLayout layout, SlideMode mode) { super.onSlide(layout, mode); ensureUIHandler(); mUIHandler.postDelayed(new Runnable() { @Override public void run() { notifySlideFinish(); } }, mDelayMillSeconds); } @Override public void onUnSlide(SlideSupportLayout layout, SlideMode mode, boolean immediately) { super.onUnSlide(layout, mode, immediately); ensureUIHandler(); mUIHandler.postDelayed(new Runnable() { @Override public void run() { notifyUnSlideFinish(); } }, mDelayMillSeconds); } @Override public boolean needHandleThisTouch(SlideSupportLayout layout, float x, float y) { return false; } }
apache-2.0
molnarp/civsim
src/civsim/tree/Filter.java
254
/* * To change this template, choose Tools | Templates * and open the template in the editor. */ package civsim.tree; /** * * @author Peter Molnar <mp@nanasoft.hu> */ public interface Filter<T> { public boolean filterNode(Node<T> node); }
apache-2.0
kocherovms/metracer
src/it/java/com/develorium/metracer/BaseClass.java
7175
/* * Copyright 2015-2016 Michael Kocherov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.develorium.metracer; import java.lang.*; import java.util.*; import java.io.*; import java.util.regex.*; import org.junit.*; import org.junit.Assert; public class BaseClass { protected static final String TestProgramMainClassName = "com.develorium.metracertest.Main"; protected static Process testProgramProcess = null; protected static String pid = null; protected abstract static class Scenario extends InputStream implements Environment { public String pid = null; public ByteArrayOutputStream stdout = new ByteArrayOutputStream(); public ByteArrayOutputStream stderr = new ByteArrayOutputStream(); public Date startTime = new Date(); public Integer exitCode = null; private PrintStream stdoutStream = new PrintStream(stdout); private PrintStream stderrStream = new PrintStream(stderr); private int[] extractedStdoutSize = { 0 }; private int[] extractedStderrSize = { 0 }; public abstract String[] getLaunchArguments(); public int process() { return 0; } public long getDuration() { Date currentTime = new Date(); return currentTime.getTime() - startTime.getTime(); } @Override public int read() { try { Thread.currentThread().sleep(50); } catch(InterruptedException e) { } return process(); } @Override public InputStream getStdin() { return this; } @Override public PrintStream getStdout() { return stdoutStream; } @Override public PrintStream getStderr() { return stderrStream; } @Override public void exit(int theExitCode) { exitCode = theExitCode; } public void printNewStdout() { String newStdout = getNewPortionOfBuffer(stdout, extractedStdoutSize); if(!newStdout.isEmpty()) System.out.println(newStdout); } public void printNewStderr() { String newStderr = getNewPortionOfBuffer(stderr, extractedStderrSize); if(!newStderr.isEmpty()) System.out.println(newStderr); } public void dump() { System.out.format("Captured stdout output:\n%s\n", stdout.toString()); System.out.format("Captured stderr output:\n%s\n", stderr.toString()); } private String getNewPortionOfBuffer(ByteArrayOutputStream theBuffer, int[] theExtractedSize) { String text = theBuffer.toString(); if(text.length() <= theExtractedSize[0]) return ""; String rv = text.substring(extractedStdoutSize[0]); theExtractedSize[0] = text.length(); return rv; } } protected static class JvmListingScenario extends Scenario { @Override public String[] getLaunchArguments() { return new String[] { "-l" }; } } @BeforeClass public static void launchTestProgram() throws Throwable { String[] args = { "java", "-cp", String.format("%s/target/test-classes", System.getProperty("basedir")), TestProgramMainClassName }; Process p = java.lang.Runtime.getRuntime().exec(args); // wait for program to start String magicMessage = "kms@ sample program started"; InputStream stdout = p.getInputStream(); BufferedReader reader = new BufferedReader(new InputStreamReader(stdout)); String line = null; while((line = reader.readLine()) != null) { if(line.startsWith(magicMessage)) { System.out.println("Test program started"); testProgramProcess = p; break; } } Assert.assertTrue(testProgramProcess != null); Scenario scenario = new JvmListingScenario(); runMetracerScenario(scenario); String capturedOutput = scenario.stdout.toString(); System.out.println(capturedOutput); for (String capturedOutputLine : capturedOutput.split("\n", 1000)){ if(capturedOutputLine.contains(TestProgramMainClassName)) { if(pid != null) throw new RuntimeException(String.format("More that one running test program detected (%s): can't decide to which to connect to", TestProgramMainClassName)); Scanner scanner = new Scanner(capturedOutputLine); System.out.format("Searching for PID within \"%s\"\n", capturedOutputLine); Assert.assertTrue(scanner.hasNextInt()); pid = "" + scanner.nextInt(); } } Assert.assertTrue(pid != null && !pid.isEmpty()); System.out.format("Resolved PID is %s\n\n", pid); } @AfterClass public static void shutdownTestProgram() throws Throwable { if(testProgramProcess != null) { testProgramProcess.destroy(); testProgramProcess = null; System.out.println("Test program destroyed"); } if(pid != null && !pid.isEmpty()) System.out.format("Waiting for PID %s is gone\n", pid); int attemptsCount = 20; // 250 * 20 -> 5 seconds for a termination while(pid != null && !pid.isEmpty()) { Scenario scenario = new JvmListingScenario(); runMetracerScenario(scenario); String capturedOutput = scenario.stdout.toString(); System.out.println(capturedOutput); boolean isPidStillPresent = false; for (String capturedOutputLine : capturedOutput.split("\n", 1000)) { if(capturedOutputLine.contains(TestProgramMainClassName)) { Scanner scanner = new Scanner(capturedOutputLine); System.out.format("Searching for PID within \"%s\"\n", capturedOutputLine); Assert.assertTrue(scanner.hasNextInt()); String localPid = "" + scanner.nextInt(); isPidStillPresent = isPidStillPresent || localPid.equals(pid); } } if(!isPidStillPresent) { System.out.format("PID %s is gone, finishing test suite!\n", pid); pid = null; } else { Assert.assertTrue(--attemptsCount > 0); System.out.format("PID %s is still present, waiting\n", pid); try { Thread.currentThread().sleep(250); } catch(InterruptedException e) { } } } } @Before public void printStartSeparator() { System.out.println("------TEST STARTED------"); } @After public void printEndSeparator() { System.out.println("------TEST FINISHED------\n"); } protected static void runMetracerScenario(Scenario theScenario) throws Throwable { String[] launchArguments = theScenario.getLaunchArguments(); StringBuilder launchArgumentsStringified = new StringBuilder(); for(String launchArgument: launchArguments) launchArgumentsStringified.append(launchArgument + " "); System.out.format(">>> Launching scenario %s with arguments: %s\n", theScenario.getClass().getSimpleName(), launchArgumentsStringified.toString()); boolean isFinishedOk = false; try { Main.main(launchArguments, theScenario); isFinishedOk = true; } finally { if(!isFinishedOk) theScenario.dump(); System.out.format("<<< Scenario %s finished in %d ms\n", theScenario.getClass().getSimpleName(), theScenario.getDuration()); } } }
apache-2.0
kermitt2/grobid
grobid-core/src/main/java/org/grobid/core/engines/tagging/DummyTagger.java
1066
package org.grobid.core.engines.tagging; import com.google.common.base.Joiner; import org.grobid.core.GrobidModel; import org.grobid.core.GrobidModels; import org.grobid.core.exceptions.GrobidException; import java.io.IOException; import java.util.ArrayList; import java.util.List; /** * This tagger just return one label <dummy> */ public class DummyTagger implements GenericTagger { public static final String DUMMY_LABEL = "<dummy>"; public DummyTagger(GrobidModel model) { if(!model.equals(GrobidModels.DUMMY)) { throw new GrobidException("Cannot use a non-dummy model with the dummy tagger. All dummies or no dummies. "); } } @Override public String label(Iterable<String> data) { final List<String> output = new ArrayList<>(); data.forEach(d -> output.add(d + "\t" + DUMMY_LABEL)); return Joiner.on('\n').join(output); } @Override public String label(String data) { return "<dummy>"; } @Override public void close() throws IOException { } }
apache-2.0
waans11/incubator-asterixdb
hyracks-fullstack/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/api/ILSMIndexAccessor.java
5989
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.hyracks.storage.am.lsm.common.api; import java.util.List; import org.apache.hyracks.api.exceptions.HyracksDataException; import org.apache.hyracks.dataflow.common.data.accessors.ITupleReference; import org.apache.hyracks.storage.am.common.api.IIndexAccessor; import org.apache.hyracks.storage.am.common.api.IndexException; import org.apache.hyracks.storage.am.common.api.TreeIndexException; /** * Client handle for performing operations * (insert/delete/update/search/diskorderscan/merge/flush) on an {@link ILSMHarness}. * An {@link ILSMIndexAccessor} is not thread safe, but different {@link ILSMIndexAccessor}s * can concurrently operate on the same {@link ILSMIndex} (i.e., the {@link ILSMIndex} must allow * concurrent operations). */ public interface ILSMIndexAccessor extends IIndexAccessor { void scheduleFlush(ILSMIOOperationCallback callback) throws HyracksDataException; void scheduleMerge(ILSMIOOperationCallback callback, List<ILSMDiskComponent> components) throws HyracksDataException, IndexException; void scheduleFullMerge(ILSMIOOperationCallback callback) throws HyracksDataException, IndexException; /** * Deletes the tuple from the memory component only. * * @throws HyracksDataException * @throws IndexException */ void physicalDelete(ITupleReference tuple) throws HyracksDataException, IndexException; /** * Attempts to insert the given tuple. * If the insert would have to wait for a flush to complete, then this method returns false to * allow the caller to avoid potential deadlock situations. * Otherwise, returns true (insert was successful). * * @param tuple * Tuple to be inserted. * @throws HyracksDataException * If the BufferCache throws while un/pinning or un/latching. * @throws IndexException * If an index-specific constraint is violated, e.g., the key * already exists. */ boolean tryInsert(ITupleReference tuple) throws HyracksDataException, IndexException; /** * Attempts to delete the given tuple. * If the delete would have to wait for a flush to complete, then this method returns false to * allow the caller to avoid potential deadlock situations. * Otherwise, returns true (delete was successful). * * @param tuple * Tuple to be deleted. * @throws HyracksDataException * If the BufferCache throws while un/pinning or un/latching. * @throws IndexException * If there is no matching tuple in the index. */ boolean tryDelete(ITupleReference tuple) throws HyracksDataException, IndexException; /** * Attempts to update the given tuple. * If the update would have to wait for a flush to complete, then this method returns false to * allow the caller to avoid potential deadlock situations. * Otherwise, returns true (update was successful). * * @param tuple * Tuple whose match in the index is to be update with the given * tuples contents. * @throws HyracksDataException * If the BufferCache throws while un/pinning or un/latching. * @throws IndexException * If there is no matching tuple in the index. */ boolean tryUpdate(ITupleReference tuple) throws HyracksDataException, IndexException; /** * This operation is only supported by indexes with the notion of a unique key. * If tuple's key already exists, then this operation attempts to performs an update. * Otherwise, it attempts to perform an insert. * If the operation would have to wait for a flush to complete, then this method returns false to * allow the caller to avoid potential deadlock situations. * Otherwise, returns true (insert/update was successful). * * @param tuple * Tuple to be deleted. * @throws HyracksDataException * If the BufferCache throws while un/pinning or un/latching. * @throws IndexException * If there is no matching tuple in the index. */ boolean tryUpsert(ITupleReference tuple) throws HyracksDataException, IndexException; void forcePhysicalDelete(ITupleReference tuple) throws HyracksDataException, IndexException; void forceInsert(ITupleReference tuple) throws HyracksDataException, IndexException; void forceDelete(ITupleReference tuple) throws HyracksDataException, IndexException; void scheduleReplication(List<ILSMDiskComponent> diskComponents, boolean bulkload, LSMOperationType opType) throws HyracksDataException; /** * Force a flush of the in-memory component. * * @throws HyracksDataException * @throws TreeIndexException */ void flush(ILSMIOOperation operation) throws HyracksDataException, IndexException; /** * Merge all on-disk components. * * @throws HyracksDataException * @throws TreeIndexException */ void merge(ILSMIOOperation operation) throws HyracksDataException, IndexException; }
apache-2.0
Blazebit/blaze-faces
blaze-faces/src/test/java/com/blazebit/blazefaces/component/calendar/CalendarTest.java
2539
/* * Copyright 2011-2012 Blazebit * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.blazebit.blazefaces.component.calendar; import static org.junit.Assert.*; import org.junit.After; import org.junit.Before; import org.junit.Test; public class CalendarTest { // private CalendarRenderer renderer; // // @Before // public void setup() { // renderer = new CalendarRenderer(); // } // // @After // public void teardown() { // renderer = null; // } // // @Test // public void dateAsStringShouldBeNullIfValueIsNull() { // Calendar calendar = new Calendar(); // String dateAsString = CalendarUtils.getValueAsString(null, calendar); // // assertEquals(null, dateAsString); // } // // @Test // public void dateAsStringShouldBeSubmittedValueIfExists() { // Calendar calendar = new Calendar(); // calendar.setSubmittedValue("05.07.2010"); // String dateAsString = CalendarUtils.getValueAsString(null, calendar); // // assertEquals("05.07.2010", dateAsString); // } // // @Test // public void convertedValueShouldBeNullWhenEmptyStringIsSubmitted() { // Calendar calendar = new Calendar(); // // Object convertedValue = renderer.getConvertedValue(null, calendar, ""); // assertNull(convertedValue); // // convertedValue = renderer.getConvertedValue(null, calendar, " "); // assertNull(convertedValue); // } // // @Test // public void shouldConvertPattern() { // String pattern = "dd.MM.yyyy"; // assertEquals("dd.mm.yy", CalendarUtils.convertPattern(pattern)); // // pattern = "dd/MM/yy"; // assertEquals("dd/mm/y", CalendarUtils.convertPattern(pattern)); // // pattern = "d, MMM, yyyy"; // assertEquals("d, M, yy", CalendarUtils.convertPattern(pattern)); // // pattern = "dd-MMMMMM-yyyy"; // assertEquals("dd-MM-yy", CalendarUtils.convertPattern(pattern)); // // pattern = "dd-MM-yyyy EEE"; // assertEquals("dd-mm-yy D", CalendarUtils.convertPattern(pattern)); // // pattern = "dd-MM-yyyy EEEEEE"; // assertEquals("dd-mm-yy DD", CalendarUtils.convertPattern(pattern)); // } }
apache-2.0
requery/sqlite-android
sqlite-android/src/main/java/io/requery/android/database/sqlite/SQLiteDatabaseConfiguration.java
6660
/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // modified from original source see README at the top level of this project package io.requery.android.database.sqlite; import java.util.ArrayList; import java.util.List; import java.util.Locale; import java.util.regex.Pattern; /** * Describes how to configure a database. * <p> * The purpose of this object is to keep track of all of the little * configuration settings that are applied to a database after it * is opened so that they can be applied to all connections in the * connection pool uniformly. * </p><p> * Each connection maintains its own copy of this object so it can * keep track of which settings have already been applied. * </p> * * @hide */ public final class SQLiteDatabaseConfiguration { // The pattern we use to strip email addresses from database paths // when constructing a label to use in log messages. private static final Pattern EMAIL_IN_DB_PATTERN = Pattern.compile("[\\w\\.\\-]+@[\\w\\.\\-]+"); /** * Special path used by in-memory databases. */ public static final String MEMORY_DB_PATH = ":memory:"; /** * The database path. */ public final String path; /** * The label to use to describe the database when it appears in logs. * This is derived from the path but is stripped to remove PII. */ public final String label; /** * The flags used to open the database. */ public @SQLiteDatabase.OpenFlags int openFlags; /** * The maximum size of the prepared statement cache for each database connection. * Must be non-negative. * * Default is 25. */ public int maxSqlCacheSize; /** * The database locale. * * Default is the value returned by {@link Locale#getDefault()}. */ public Locale locale; /** * True if foreign key constraints are enabled. * * Default is false. */ public boolean foreignKeyConstraintsEnabled; /** * The custom functions to register. * * This interface is deprecated; see {@link SQLiteFunction} */ @Deprecated public final List<SQLiteCustomFunction> customFunctions = new ArrayList<>(); /** * The {@link SQLiteFunction}s to register. */ public final List<SQLiteFunction> functions = new ArrayList<>(); /** * The custom extensions to register. */ public final List<SQLiteCustomExtension> customExtensions = new ArrayList<>(); /** * Creates a database configuration with the required parameters for opening a * database and default values for all other parameters. * * @param path The database path. * @param openFlags Open flags for the database, such as {@link SQLiteDatabase#OPEN_READWRITE}. */ public SQLiteDatabaseConfiguration(String path, @SQLiteDatabase.OpenFlags int openFlags) { if (path == null) { throw new IllegalArgumentException("path must not be null."); } this.path = path; label = stripPathForLogs(path); this.openFlags = openFlags; // Set default values for optional parameters. maxSqlCacheSize = 25; locale = Locale.getDefault(); } /** * Creates a database configuration with the required parameters for opening a * database and default values for all other parameters. * * @param path The database path. * @param openFlags Open flags for the database, such as {@link SQLiteDatabase#OPEN_READWRITE}. * @param functions custom functions to use. * @param extensions custom extensions to use. */ public SQLiteDatabaseConfiguration(String path, @SQLiteDatabase.OpenFlags int openFlags, List<SQLiteCustomFunction> customFunctions, List<SQLiteFunction> functions, List<SQLiteCustomExtension> extensions) { this(path, openFlags); this.customFunctions.addAll(customFunctions); this.customExtensions.addAll(extensions); this.functions.addAll(functions); } /** * Creates a database configuration as a copy of another configuration. * * @param other The other configuration. */ SQLiteDatabaseConfiguration(SQLiteDatabaseConfiguration other) { if (other == null) { throw new IllegalArgumentException("other must not be null."); } this.path = other.path; this.label = other.label; updateParametersFrom(other); } /** * Updates the non-immutable parameters of this configuration object * from the other configuration object. * * @param other The object from which to copy the parameters. */ void updateParametersFrom(SQLiteDatabaseConfiguration other) { if (other == null) { throw new IllegalArgumentException("other must not be null."); } if (!path.equals(other.path)) { throw new IllegalArgumentException("other configuration must refer to " + "the same database."); } openFlags = other.openFlags; maxSqlCacheSize = other.maxSqlCacheSize; locale = other.locale; foreignKeyConstraintsEnabled = other.foreignKeyConstraintsEnabled; customFunctions.clear(); customFunctions.addAll(other.customFunctions); customExtensions.clear(); customExtensions.addAll(other.customExtensions); functions.clear(); functions.addAll(other.functions); } /** * Returns true if the database is in-memory. * @return True if the database is in-memory. */ public boolean isInMemoryDb() { return path.equalsIgnoreCase(MEMORY_DB_PATH); } private static String stripPathForLogs(String path) { if (path.indexOf('@') == -1) { return path; } return EMAIL_IN_DB_PATTERN.matcher(path).replaceAll("XX@YY"); } }
apache-2.0
goodwinnk/intellij-community
platform/lang-impl/src/com/intellij/codeInsight/daemon/impl/DefaultHighlightInfoProcessor.java
8628
/* * Copyright 2000-2015 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.codeInsight.daemon.impl; import com.intellij.codeHighlighting.Pass; import com.intellij.codeHighlighting.TextEditorHighlightingPass; import com.intellij.openapi.application.ApplicationManager; import com.intellij.openapi.editor.Document; import com.intellij.openapi.editor.Editor; import com.intellij.openapi.editor.colors.EditorColorsScheme; import com.intellij.openapi.editor.ex.MarkupModelEx; import com.intellij.openapi.editor.impl.DocumentMarkupModel; import com.intellij.openapi.editor.impl.EditorMarkupModelImpl; import com.intellij.openapi.editor.markup.MarkupModel; import com.intellij.openapi.project.DumbService; import com.intellij.openapi.project.Project; import com.intellij.openapi.util.ProperTextRange; import com.intellij.openapi.util.TextRange; import com.intellij.psi.PsiDocumentManager; import com.intellij.psi.PsiFile; import com.intellij.psi.util.PsiUtilBase; import com.intellij.util.Alarm; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import java.util.List; public class DefaultHighlightInfoProcessor extends HighlightInfoProcessor { @Override public void highlightsInsideVisiblePartAreProduced(@NotNull final HighlightingSession session, @Nullable Editor editor, @NotNull final List<? extends HighlightInfo> infos, @NotNull TextRange priorityRange, @NotNull TextRange restrictRange, final int groupId) { final PsiFile psiFile = session.getPsiFile(); final Project project = psiFile.getProject(); final Document document = PsiDocumentManager.getInstance(project).getDocument(psiFile); if (document == null) return; final long modificationStamp = document.getModificationStamp(); final TextRange priorityIntersection = priorityRange.intersection(restrictRange); ShowAutoImportPassFactory autoImportPassFactory = project.getComponent(ShowAutoImportPassFactory.class); ((HighlightingSessionImpl)session).applyInEDT(() -> { if (modificationStamp != document.getModificationStamp()) return; if (priorityIntersection != null) { MarkupModel markupModel = DocumentMarkupModel.forDocument(document, project, true); EditorColorsScheme scheme = session.getColorsScheme(); UpdateHighlightersUtil.setHighlightersInRange(project, document, priorityIntersection, scheme, infos, (MarkupModelEx)markupModel, groupId); } if (editor != null && !editor.isDisposed()) { // usability: show auto import popup as soon as possible if (!DumbService.isDumb(project)) { TextEditorHighlightingPass highlightingPass = autoImportPassFactory.createHighlightingPass(psiFile, editor); if (highlightingPass != null) highlightingPass.doApplyInformationToEditor(); } ErrorStripeUpdateManager.getInstance(project).repaintErrorStripePanel(editor); } }); } @Override public void highlightsOutsideVisiblePartAreProduced(@NotNull final HighlightingSession session, @Nullable Editor editor, @NotNull final List<? extends HighlightInfo> infos, @NotNull final TextRange priorityRange, @NotNull final TextRange restrictedRange, final int groupId) { final PsiFile psiFile = session.getPsiFile(); final Project project = psiFile.getProject(); final Document document = PsiDocumentManager.getInstance(project).getDocument(psiFile); if (document == null) return; final long modificationStamp = document.getModificationStamp(); ((HighlightingSessionImpl)session).applyInEDT(() -> { if (project.isDisposed() || modificationStamp != document.getModificationStamp()) return; EditorColorsScheme scheme = session.getColorsScheme(); UpdateHighlightersUtil.setHighlightersOutsideRange(project, document, psiFile, infos, scheme, restrictedRange.getStartOffset(), restrictedRange.getEndOffset(), ProperTextRange.create(priorityRange), groupId); if (editor != null) { ErrorStripeUpdateManager.getInstance(project).repaintErrorStripePanel(editor); } }); } @Override public void allHighlightsForRangeAreProduced(@NotNull HighlightingSession session, @NotNull TextRange elementRange, @Nullable List<? extends HighlightInfo> infos) { PsiFile psiFile = session.getPsiFile(); killAbandonedHighlightsUnder(psiFile, elementRange, infos, session); } private static void killAbandonedHighlightsUnder(@NotNull PsiFile psiFile, @NotNull final TextRange range, @Nullable final List<? extends HighlightInfo> infos, @NotNull final HighlightingSession highlightingSession) { final Project project = psiFile.getProject(); final Document document = PsiDocumentManager.getInstance(project).getDocument(psiFile); if (document == null) return; DaemonCodeAnalyzerEx.processHighlights(document, project, null, range.getStartOffset(), range.getEndOffset(), existing -> { if (existing.isBijective() && existing.getGroup() == Pass.UPDATE_ALL && range.equalsToRange(existing.getActualStartOffset(), existing.getActualEndOffset())) { if (infos != null) { for (HighlightInfo created : infos) { if (existing.equalsByActualOffset(created)) return true; } } // seems that highlight info "existing" is going to disappear // remove it earlier ((HighlightingSessionImpl)highlightingSession).queueDisposeHighlighterFor(existing); } return true; }); } @Override public void infoIsAvailable(@NotNull HighlightingSession session, @NotNull HighlightInfo info, @NotNull TextRange priorityRange, @NotNull TextRange restrictedRange, int groupId) { ((HighlightingSessionImpl)session).queueHighlightInfo(info, restrictedRange, groupId); } @Override public void progressIsAdvanced(@NotNull HighlightingSession highlightingSession, @Nullable Editor editor, double progress) { PsiFile file = highlightingSession.getPsiFile(); repaintTrafficIcon(file, editor, progress); } private final Alarm repaintIconAlarm = new Alarm(Alarm.ThreadToUse.SWING_THREAD); private void repaintTrafficIcon(@NotNull final PsiFile file, @Nullable Editor editor, double progress) { if (ApplicationManager.getApplication().isCommandLine()) return; if (repaintIconAlarm.isEmpty() || progress >= 1) { repaintIconAlarm.addRequest(() -> { Project myProject = file.getProject(); if (myProject.isDisposed()) return; Editor myeditor = editor; if (myeditor == null) { myeditor = PsiUtilBase.findEditor(file); } if (myeditor == null || myeditor.isDisposed()) return; EditorMarkupModelImpl markup = (EditorMarkupModelImpl)myeditor.getMarkupModel(); markup.repaintTrafficLightIcon(); ErrorStripeUpdateManager.getInstance(myProject).repaintErrorStripePanel(myeditor); }, 50, null); } } }
apache-2.0
wayne8668/axon-ddd
dms-root/dms-oauth/src/main/java/com/dms/oauth/inf/user/view/UserView.java
1623
package com.dms.oauth.inf.user.view; import java.util.Date; import org.springframework.data.annotation.Id; public class UserView { @Id private String idUser; private String idClient; private String userName; private String realName; private String tel; private String status; private Date creationDate; private Date updateDate; private String pwd; private String salt; public String getIdUser() { return idUser; } public void setIdUser(String idUser) { this.idUser = idUser; } public String getIdClient() { return idClient; } public void setIdClient(String idClient) { this.idClient = idClient; } public String getUserName() { return userName; } public void setUserName(String userName) { this.userName = userName; } public String getRealName() { return realName; } public void setRealName(String realName) { this.realName = realName; } public String getTel() { return tel; } public void setTel(String tel) { this.tel = tel; } public String getStatus() { return status; } public void setStatus(String status) { this.status = status; } public Date getCreationDate() { return creationDate; } public void setCreationDate(Date creationDate) { this.creationDate = creationDate; } public Date getUpdateDate() { return updateDate; } public void setUpdateDate(Date updateDate) { this.updateDate = updateDate; } public String getPwd() { return pwd; } public void setPwd(String pwd) { this.pwd = pwd; } public String getSalt() { return salt; } public void setSalt(String salt) { this.salt = salt; } }
apache-2.0
jamespedwards42/jedipus
src/main/java/com/fabahaba/jedipus/pubsub/SingleSubscriber.java
7170
package com.fabahaba.jedipus.pubsub; import com.fabahaba.jedipus.client.RedisClient; import com.fabahaba.jedipus.executor.RedisClientExecutor; import java.util.Collection; import java.util.Collections; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.function.Consumer; public class SingleSubscriber implements RedisSubscriber { protected final MsgConsumer defaultConsumer; private final RedisClientExecutor clientExecutor; private final int soTimeoutMillis; private final Consumer<RedisSubscriber> onSocketTimeout; private final Consumer<String> pongConsumer; private final Set<String> subscriptions = Collections.newSetFromMap(new ConcurrentHashMap<>()); private final Set<String> psubscriptions = Collections.newSetFromMap(new ConcurrentHashMap<>()); private long subCount = Long.MAX_VALUE; private volatile RedisClient previousClient = null; protected SingleSubscriber(final RedisClientExecutor clientExecutor, final int soTimeoutMillis, final Consumer<RedisSubscriber> onSocketTimeout, final MsgConsumer defaultConsumer, final Consumer<String> pongConsumer) { this.clientExecutor = clientExecutor; this.soTimeoutMillis = soTimeoutMillis; this.onSocketTimeout = onSocketTimeout; this.defaultConsumer = defaultConsumer; this.pongConsumer = pongConsumer; } @Override public final void run() { while (subCount > 0) { final boolean consumedMsg = clientExecutor.apply(client -> { subscribeNewClient(client); return client.consumePubSub(soTimeoutMillis, this) ? Boolean.TRUE : Boolean.FALSE; }).booleanValue(); if (!consumedMsg) { onSocketTimeout.accept(this); } } } @Override public long getSubCount() { return subCount; } @Override public final void subscribe(final MsgConsumer msgConsumer, final String... channels) { clientExecutor.accept(client -> { subscribeNewClient(client); client.subscribe(channels); if (msgConsumer != null) { registerConsumer(msgConsumer, channels); } client.flush(); }); for (final String channel : channels) { subscriptions.add(channel); } } @Override public void subscribe(final MsgConsumer msgConsumer, final Collection<String> channels) { clientExecutor.accept(client -> { subscribeNewClient(client); client.subscribe(channels); if (msgConsumer != null) { registerConsumer(msgConsumer, channels); } client.flush(); }); for (final String channel : channels) { subscriptions.add(channel); } } @Override public final void psubscribe(final MsgConsumer msgConsumer, final String... patterns) { clientExecutor.accept(client -> { subscribeNewClient(client); client.psubscribe(patterns); if (msgConsumer != null) { registerPConsumer(msgConsumer, patterns); } client.flush(); }); for (final String pattern : patterns) { psubscriptions.add(pattern); } } @Override public void psubscribe(final MsgConsumer msgConsumer, final Collection<String> patterns) { clientExecutor.accept(client -> { subscribeNewClient(client); client.psubscribe(patterns); if (msgConsumer != null) { registerPConsumer(msgConsumer, patterns); } client.flush(); }); for (final String pattern : patterns) { psubscriptions.add(pattern); } } @Override public final void unsubscribe(final String... channels) { if (channels.length == 0) { subscriptions.clear(); } else { for (final String channel : channels) { subscriptions.remove(channel); } } clientExecutor.accept(client -> { subscribeNewClient(client); client.unsubscribe(channels); client.flush(); }); } @Override public void unsubscribe(final Collection<String> channels) { if (channels.isEmpty()) { subscriptions.clear(); } else { for (final String channel : channels) { subscriptions.remove(channel); } } clientExecutor.accept(client -> { subscribeNewClient(client); client.unsubscribe(channels); client.flush(); }); } @Override public final void punsubscribe(final String... patterns) { if (patterns.length == 0) { psubscriptions.clear(); } else { for (final String pattern : patterns) { psubscriptions.remove(pattern); } } clientExecutor.accept(client -> { subscribeNewClient(client); client.punsubscribe(patterns); client.flush(); }); } @Override public void punsubscribe(final Collection<String> patterns) { if (patterns.isEmpty()) { psubscriptions.clear(); } else { for (final String pattern : patterns) { psubscriptions.remove(pattern); } } clientExecutor.accept(client -> { subscribeNewClient(client); client.punsubscribe(patterns); client.flush(); }); } private void subscribeNewClient(final RedisClient client) { if (previousClient == null) { this.previousClient = client; return; } if (previousClient == client) { return; } if (!subscriptions.isEmpty()) { client.subscribe(subscriptions); } if (!psubscriptions.isEmpty()) { client.psubscribe(psubscriptions); } this.previousClient = client; } @Override public final void onSubscribed(final String channel, final long subCount) { this.subCount = subCount; onSubscribed(channel); } @Override public final void onUnsubscribed(final String channel, final long subCount) { this.subCount = subCount; onUnsubscribed(channel); } @Override public void ping() { clientExecutor.accept(client -> { client.pubsubPing(); client.flush(); }); } @Override public void ping(final String pong) { clientExecutor.accept(client -> { client.pubsubPing(pong); client.flush(); }); } @Override public void onPong(final String pong) { pongConsumer.accept(pong); } @Override public void close() { clientExecutor.close(); } protected void onSubscribed(final String channel) { defaultConsumer.onSubscribed(channel); } public void onUnsubscribed(final String channel) { defaultConsumer.onUnsubscribed(channel); } @Override public void onMsg(final String channel, final byte[] payload) { defaultConsumer.accept(channel, payload); } @Override public void onPMsg(final String pattern, final String channel, final byte[] payload) { defaultConsumer.accept(pattern, channel, payload); } @Override public void registerConsumer(final MsgConsumer msgConsumer, final String... channels) { } @Override public void unRegisterConsumer(final MsgConsumer msgConsumer, final String... channels) { } @Override public void registerConsumer(final MsgConsumer msgConsumer, final Collection<String> channels) { } @Override public void unRegisterConsumer(final MsgConsumer msgConsumer, final Collection<String> channels) { } }
apache-2.0
PathVisio/pathvisio
modules/org.pathvisio.desktop/src/org/pathvisio/desktop/data/DBConnDerby.java
3637
/******************************************************************************* * PathVisio, a tool for data visualization and analysis using biological pathways * Copyright 2006-2019 BiGCaT Bioinformatics * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. ******************************************************************************/ package org.pathvisio.desktop.data; import java.awt.Component; import javax.swing.JFileChooser; import org.bridgedb.gui.SimpleFileFilter; import org.bridgedb.rdb.construct.DataDerby; import org.pathvisio.core.preferences.GlobalPreference; import org.pathvisio.core.preferences.PreferenceManager; /** * user interface functions for single-file Derby databases. */ public class DBConnDerby extends DataDerby implements DBConnectorSwing { static final String DB_EXT_NAME_GEX = "Expression datasets"; static final String DB_EXT_NAME_GDB = "Synonym databases"; //TODO: reduce redundancy between openChooseDbDialog and openNewDbDialog, public String openChooseDbDialog(Component parent) { JFileChooser jfc = new JFileChooser(); jfc.setDialogType(JFileChooser.OPEN_DIALOG); if (getDbType() == TYPE_GDB) { jfc.setCurrentDirectory(PreferenceManager.getCurrent().getFile(GlobalPreference.DIR_LAST_USED_PGDB)); jfc.addChoosableFileFilter(new SimpleFileFilter(DB_EXT_NAME_GDB, "*.bridge|*.pgdb", true)); } else { jfc.setCurrentDirectory(PreferenceManager.getCurrent().getFile(GlobalPreference.DIR_LAST_USED_PGEX)); jfc.addChoosableFileFilter(new SimpleFileFilter(DB_EXT_NAME_GEX, "*." + DB_FILE_EXT_GEX, true)); } int status = jfc.showDialog (parent, "Open database"); if(status == JFileChooser.APPROVE_OPTION) { if (getDbType() == TYPE_GDB) { PreferenceManager.getCurrent().setFile (GlobalPreference.DIR_LAST_USED_PGDB, jfc.getCurrentDirectory()); } else { PreferenceManager.getCurrent().setFile (GlobalPreference.DIR_LAST_USED_PGEX, jfc.getCurrentDirectory()); } return jfc.getSelectedFile().toString(); } return null; } public String openNewDbDialog(Component parent, String defaultName) { JFileChooser jfc = new JFileChooser(); jfc.setDialogType(JFileChooser.SAVE_DIALOG); if (getDbType() == TYPE_GDB) { jfc.setCurrentDirectory(PreferenceManager.getCurrent().getFile(GlobalPreference.DIR_LAST_USED_PGDB)); jfc.addChoosableFileFilter(new SimpleFileFilter(DB_EXT_NAME_GDB, "*." + DB_FILE_EXT_GDB, true)); } else { jfc.setCurrentDirectory(PreferenceManager.getCurrent().getFile(GlobalPreference.DIR_LAST_USED_PGEX)); jfc.addChoosableFileFilter(new SimpleFileFilter(DB_EXT_NAME_GEX, "*." + DB_FILE_EXT_GEX, true)); } int status = jfc.showDialog (parent, "Choose filename for database"); if(status == JFileChooser.APPROVE_OPTION) { if (getDbType() == TYPE_GDB) { PreferenceManager.getCurrent().setFile (GlobalPreference.DIR_LAST_USED_PGDB, jfc.getCurrentDirectory()); } else { PreferenceManager.getCurrent().setFile (GlobalPreference.DIR_LAST_USED_PGEX, jfc.getCurrentDirectory()); } return jfc.getSelectedFile().toString(); } return null; } }
apache-2.0
aranhakki/experimental-performance
java/src/memorymanagement/FunctionArrayMemory.java
2524
/* * Copyright 2014 Aran Hakki * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package memorymanagement; // The FunctionArrayMemory which will provide a way to manage the memory of java Lambdas. // See ByteArrayMemory for applications. public final class FunctionArrayMemory { // encapsulate data access behind functions for value safety. private static byte _1 = 1; private static byte _2 = 2; private static byte _3= 3; private static byte _4 = 4; private static byte _5 = 5; private static byte _6 = 6; private static byte _7 = 7; private static byte _8 = 8; private static byte _9 = 9; private static byte _10 = 10; private static byte _11 = 11; private static byte _12 = 12; private static byte _13 = 13; private static byte _14 = 14; private static byte _15 = 15; private static byte _16 = 16; private Object[] objects = new Object[Integer.MAX_VALUE]; static public class Ref{ private int ref; public Ref(int offsetReference){ ref = offsetReference; } } public int alloc(int size){ int sizePlus1 = size++; int freeSlots = 0; int start = 0; int end = 0; for (int i=0;i<Integer.MAX_VALUE;++i){ if (objects[i]!=null){ freeSlots++; if (freeSlots==sizePlus1){ end = i; objects[start]=new Ref(end); return start; } } else { freeSlots = 0; start = i; } } throw new IllegalStateException("cannot alloc, not enough space"); } public void free(int offsetReference){ Object o = objects[offsetReference]; if (o instanceof Ref){ Ref r = (Ref)o; for (int i=1;i<=r.ref;++i){ free(offsetReference+i); } } objects[offsetReference] = null; } public <T> void set(int offsetReference, int fieldId, Class<T> clazz, T object){ if (!clazz.isInstance(object)){ throw new IllegalStateException("not expected instance"); } objects[offsetReference+fieldId] = object; } public <T> T get(int offsetReference, int fieldId, Class<T> clazz){ return (T) objects[offsetReference+fieldId]; } }
apache-2.0
stokito/gag
gag-agent/src/main/java/com/github/stokito/gag/agent/AnswerToLifeGenerator.java
5042
/** * Copyright 2010 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.stokito.gag.agent; import com.google.common.collect.ImmutableList; import com.github.stokito.gag.annotation.enforceable.AnswerToTheUltimateQuestionOfLifeTheUniverseAndEverything; import com.github.stokito.gag.instrument.AnnotationStateError; import com.github.stokito.gag.instrument.ClassGenerator; import com.github.stokito.gag.instrument.info.AnnoInfo; import com.github.stokito.gag.instrument.info.ClassInfo; import com.github.stokito.gag.instrument.info.LocalVarInfo; import com.github.stokito.gag.instrument.info.MethodInfo; import org.objectweb.asm.MethodVisitor; import org.objectweb.asm.Type; import java.util.List; import static org.objectweb.asm.Opcodes.*; public class AnswerToLifeGenerator extends ClassGenerator { private static final Type ANSWER_TYPE = Type.getType(AnswerToTheUltimateQuestionOfLifeTheUniverseAndEverything.class); private static final int FORTY_TWO = 42; @Override protected boolean canInstrument(ClassInfo classInfo) { return classInfo.hasLocalVarAnnoAnywhere(ANSWER_TYPE); } /** Supported types that have a valueOf(String) method. */ private final List<Type> supportedValueOfTypes = ImmutableList.of( Type.getType(Integer.class), Type.getType(Long.class), Type.getType(Double.class), Type.getType(Float.class), Type.getType(Byte.class), Type.getType(Short.class)); @Override public MethodVisitor visitMethod(int access, String name, String desc, String sig, String[] exceptions) { MethodVisitor mv = writer().visitMethod(access, name, desc, sig, exceptions); mv.visitCode(); MethodInfo method = classInfo().getMethod(name, desc); for (LocalVarInfo param : method.getLocalVars()) { AnnoInfo anno = param.getAnnoFor(ANSWER_TYPE); if (anno == null) { continue; } Type paramType = param.getType(); switch (paramType.getSort()) { case Type.INT: case Type.BYTE: case Type.CHAR: case Type.SHORT: mv.visitLdcInsn(FORTY_TWO); mv.visitVarInsn(ISTORE, param.getIndex()); break; case Type.LONG: mv.visitLdcInsn((long) FORTY_TWO); mv.visitVarInsn(LSTORE, param.getIndex()); break; case Type.DOUBLE: mv.visitLdcInsn((double) FORTY_TWO); mv.visitVarInsn(DSTORE, param.getIndex()); break; case Type.FLOAT: mv.visitLdcInsn((float) FORTY_TWO); mv.visitVarInsn(FSTORE, param.getIndex()); break; case Type.OBJECT: visitObject(mv, param); break; default: throwUnsupportedException(param); } setInstrumented(true); } mv.visitEnd(); return mv; } /** TODO: Support BigDecimal and BigInteger. */ private void visitObject(MethodVisitor mv, LocalVarInfo param) { Type paramType = param.getType(); if (Type.getType(Character.class).equals(paramType)) { mv.visitLdcInsn((char) FORTY_TWO); mv.visitMethodInsn(INVOKESTATIC, paramType.getInternalName(), "valueOf", "(C)L" + paramType.getInternalName() + ";"); mv.visitVarInsn(ASTORE, param.getIndex()); } else if (supportedValueOfTypes.contains(paramType)) { mv.visitLdcInsn(String.valueOf(FORTY_TWO)); mv.visitMethodInsn(INVOKESTATIC, paramType.getInternalName(), "valueOf", "(Ljava/lang/String;)L" + paramType.getInternalName() + ";"); mv.visitVarInsn(ASTORE, param.getIndex()); } else { throwUnsupportedException(param); } } private void throwUnsupportedException(LocalVarInfo param) { throw new AnnotationStateError("Unsupported parameter type (" + param.getType() + ") for " + AnswerToTheUltimateQuestionOfLifeTheUniverseAndEverything.class); } }
apache-2.0
normanmaurer/niosmtp
src/main/java/me/normanmaurer/niosmtp/SMTPByteArrayMessage.java
2366
/** * Licensed to niosmtp developers ('niosmtp') under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * niosmtp licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package me.normanmaurer.niosmtp; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; /** * Abstract base implementation of {@link SMTPMessage} which allows to access the raw <code>byte</code> array of the {@link SMTPMessage}. This * should only be used if you are sure that the whole {@link SMTPMessage} fit into memory. * * The {@link #get7BitAsByteArray()} and {@link #get8BitAsByteArray()} methods can be used by the Transport implementation for better performing * write operations. * * * @author Norman Maurer * */ public abstract class SMTPByteArrayMessage implements SMTPMessage{ private InputStream _7bitIn; private InputStream _8bitIn; @Override public InputStream get7bit() throws IOException { if (_7bitIn == null) { _7bitIn = new ByteArrayInputStream(get7BitAsByteArray()); } return _7bitIn; } @Override public InputStream get8Bit() throws IOException { if (_8bitIn == null) { _8bitIn = new ByteArrayInputStream(get8BitAsByteArray()); } return _8bitIn; } /** * Return the <code>byte</code> array which should be used for the 7Bit message input. * * @return 7bit */ public abstract byte[] get7BitAsByteArray(); /** * Return the <code>byte</code> array which should be used for the 8Bit message input. * * @return 8bit */ public abstract byte[] get8BitAsByteArray(); }
apache-2.0
TheLimeGlass/Skellett
src/main/java/com/gmail/thelimeglass/Npcs/ExprNewNpc.java
2050
package com.gmail.thelimeglass.Npcs; import org.bukkit.entity.EntityType; import org.bukkit.event.Event; import org.eclipse.jdt.annotation.Nullable; import com.gmail.thelimeglass.Utils.Annotations.Config; import com.gmail.thelimeglass.Utils.Annotations.FullConfig; import com.gmail.thelimeglass.Utils.Annotations.MainConfig; import com.gmail.thelimeglass.Utils.Annotations.PropertyType; import com.gmail.thelimeglass.Utils.Annotations.Syntax; import ch.njol.skript.lang.Expression; import ch.njol.skript.lang.ExpressionType; import ch.njol.skript.lang.SkriptParser.ParseResult; import ch.njol.skript.lang.util.SimpleExpression; import ch.njol.util.Kleenean; import net.citizensnpcs.api.CitizensAPI; import net.citizensnpcs.api.npc.NPC; import net.citizensnpcs.api.npc.NPCRegistry; @Syntax({"[a] [new] (npc|citizen) [with] (name[d]|id|string) %string% [and] [with] [entity [type]] %string%", "[a] [new] npc [with] [entity [type]] %string% [and] [with] (name[d]|id|string) %string%"}) @Config("PluginHooks.Npc") @FullConfig @MainConfig @PropertyType(ExpressionType.SIMPLE) public class ExprNewNpc extends SimpleExpression<NPC>{ private Expression<String> name, type; @Override public Class<? extends NPC> getReturnType() { return NPC.class; } @Override public boolean isSingle() { return true; } @SuppressWarnings("unchecked") @Override public boolean init(Expression<?>[] e, int matchedPattern, Kleenean isDelayed, ParseResult parser) { if (matchedPattern == 0) { name = (Expression<String>) e[0]; type = (Expression<String>) e[1]; } else { type = (Expression<String>) e[0]; name = (Expression<String>) e[1]; } return true; } @Override public String toString(@Nullable Event e, boolean arg1) { return "[a] [new] npc [with] (name[d]|id|string) %string% [and] [with] [entity [type]] %string%"; } @Override @Nullable protected NPC[] get(Event e) { NPCRegistry registry = CitizensAPI.getNPCRegistry(); return new NPC[]{registry.createNPC(EntityType.valueOf(type.getSingle(e)), name.getSingle(e))}; } }
apache-2.0
FasterXML/jackson-core
src/test/java/com/fasterxml/jackson/core/read/ParserClosingTest.java
5970
package com.fasterxml.jackson.core.read; import static org.junit.Assert.*; import com.fasterxml.jackson.core.*; import java.io.*; /** * Set of basic unit tests that verify that the closing (or not) of * the underlying source occurs as expected and specified * by documentation. */ public class ParserClosingTest extends BaseTest { /** * This unit test checks the default behaviour; with no auto-close, no * automatic closing should occur, nor explicit one unless specific * forcing method is used. */ public void testNoAutoCloseReader() throws Exception { final String DOC = "[ 1 ]"; // Check the default settings assertTrue(sharedStreamFactory().isEnabled(StreamReadFeature.AUTO_CLOSE_SOURCE)); // then change JsonFactory f = JsonFactory.builder() .disable(StreamReadFeature.AUTO_CLOSE_SOURCE) .build(); assertFalse(f.isEnabled(StreamReadFeature.AUTO_CLOSE_SOURCE)); { assertFalse(f.isEnabled(JsonParser.Feature.AUTO_CLOSE_SOURCE)); } @SuppressWarnings("resource") MyReader input = new MyReader(DOC); JsonParser jp = f.createParser(input); // shouldn't be closed to begin with... assertFalse(input.isClosed()); assertToken(JsonToken.START_ARRAY, jp.nextToken()); assertToken(JsonToken.VALUE_NUMBER_INT, jp.nextToken()); assertToken(JsonToken.END_ARRAY, jp.nextToken()); assertNull(jp.nextToken()); // normally would be closed now assertFalse(input.isClosed()); // regular close won't close it either: jp.close(); assertFalse(input.isClosed()); } @SuppressWarnings("resource") public void testAutoCloseReader() throws Exception { final String DOC = "[ 1 ]"; JsonFactory f = JsonFactory.builder() .enable(StreamReadFeature.AUTO_CLOSE_SOURCE) .build(); MyReader input = new MyReader(DOC); JsonParser jp = f.createParser(input); assertFalse(input.isClosed()); assertToken(JsonToken.START_ARRAY, jp.nextToken()); // but can close half-way through jp.close(); assertTrue(input.isClosed()); // And then let's test implicit close at the end too: input = new MyReader(DOC); jp = f.createParser(input); assertFalse(input.isClosed()); assertToken(JsonToken.START_ARRAY, jp.nextToken()); assertToken(JsonToken.VALUE_NUMBER_INT, jp.nextToken()); assertToken(JsonToken.END_ARRAY, jp.nextToken()); assertNull(jp.nextToken()); assertTrue(input.isClosed()); } @SuppressWarnings("resource") public void testNoAutoCloseInputStream() throws Exception { final String DOC = "[ 1 ]"; JsonFactory f = JsonFactory.builder() .disable(StreamReadFeature.AUTO_CLOSE_SOURCE) .build(); MyStream input = new MyStream(DOC.getBytes("UTF-8")); JsonParser jp = f.createParser(input); // shouldn't be closed to begin with... assertFalse(input.isClosed()); assertToken(JsonToken.START_ARRAY, jp.nextToken()); assertToken(JsonToken.VALUE_NUMBER_INT, jp.nextToken()); assertToken(JsonToken.END_ARRAY, jp.nextToken()); assertNull(jp.nextToken()); // normally would be closed now assertFalse(input.isClosed()); // regular close won't close it either: jp.close(); assertFalse(input.isClosed()); } // [JACKSON-287] public void testReleaseContentBytes() throws Exception { byte[] input = "[1]foobar".getBytes("UTF-8"); JsonParser jp = sharedStreamFactory().createParser(input); assertToken(JsonToken.START_ARRAY, jp.nextToken()); assertToken(JsonToken.VALUE_NUMBER_INT, jp.nextToken()); assertToken(JsonToken.END_ARRAY, jp.nextToken()); ByteArrayOutputStream out = new ByteArrayOutputStream(); // theoretically could have only read subset; but current impl is more greedy assertEquals(6, jp.releaseBuffered(out)); assertArrayEquals("foobar".getBytes("UTF-8"), out.toByteArray()); // also will "drain" so can not release twice assertEquals(0, jp.releaseBuffered(out)); jp.close(); } public void testReleaseContentChars() throws Exception { JsonParser jp = sharedStreamFactory().createParser("[true]xyz"); assertToken(JsonToken.START_ARRAY, jp.nextToken()); assertToken(JsonToken.VALUE_TRUE, jp.nextToken()); assertToken(JsonToken.END_ARRAY, jp.nextToken()); StringWriter sw = new StringWriter(); // theoretically could have only read subset; but current impl is more greedy assertEquals(3, jp.releaseBuffered(sw)); assertEquals("xyz", sw.toString()); // also will "drain" so can not release twice assertEquals(0, jp.releaseBuffered(sw)); jp.close(); } /* /********************************************************** /* Helper classes /********************************************************** */ final static class MyReader extends StringReader { boolean mIsClosed = false; public MyReader(String contents) { super(contents); } @Override public void close() { mIsClosed = true; super.close(); } public boolean isClosed() { return mIsClosed; } } final static class MyStream extends ByteArrayInputStream { boolean mIsClosed = false; public MyStream(byte[] data) { super(data); } @Override public void close() throws IOException { mIsClosed = true; super.close(); } public boolean isClosed() { return mIsClosed; } } }
apache-2.0
kochedykov/jlibmodbus
src/com/intelligt/modbus/jlibmodbus/serial/SerialPortJSerialComm.java
3904
package com.intelligt.modbus.jlibmodbus.serial; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; /* * Copyright (C) 2017 Vladislav Y. Kochedykov * * [http://jlibmodbus.sourceforge.net] * * This file is part of JLibModbus. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Authors: Vladislav Y. Kochedykov, software engineer. * email: vladislav.kochedykov@gmail.com */ public class SerialPortJSerialComm extends SerialPort { private com.fazecast.jSerialComm.SerialPort port; private InputStream in; private OutputStream out; final byte [] b = new byte[1]; public SerialPortJSerialComm(SerialParameters sp) { super(sp); } @Override public void write(int b) throws IOException { if (!isOpened()) { throw new IOException("Port not opened"); } try { out.write((byte) b); } catch (Exception e) { throw new IOException(e); } } @Override public void write(byte[] bytes) throws IOException { if (!isOpened()) { throw new IOException("Port not opened"); } try { out.write(bytes); } catch (Exception e) { throw new IOException(e); } } @Override public void open() throws SerialPortException { SerialParameters sp = getSerialParameters(); port = com.fazecast.jSerialComm.SerialPort.getCommPort(sp.getDevice()); port.openPort(); port.setComPortParameters(sp.getBaudRate(), sp.getDataBits(), sp.getStopBits(), sp.getParity().getValue()); port.setFlowControl(com.fazecast.jSerialComm.SerialPort.FLOW_CONTROL_DISABLED); in = port.getInputStream(); out = port.getOutputStream(); port.setComPortTimeouts(com.fazecast.jSerialComm.SerialPort.TIMEOUT_READ_BLOCKING, getReadTimeout(), 0); } @Override public void setReadTimeout(int readTimeout) { super.setReadTimeout(readTimeout); if (isOpened()) { port.setComPortTimeouts(com.fazecast.jSerialComm.SerialPort.TIMEOUT_NONBLOCKING, getReadTimeout(), getReadTimeout()); } } @Override public int read() throws IOException { if (!isOpened()) { throw new IOException("Port not opened"); } int c; try { c = in.read(b, 0, b.length); } catch (Exception e) { throw new IOException(e); } if (c > 0) return b[0]; else throw new IOException("Read timeout"); } @Override public int read(byte[] b, int off, int len) throws IOException { if (!isOpened()) { throw new IOException("Port not opened"); } int c; try { c = in.read(b, off, len); } catch (Exception e) { throw new IOException(e); } if (c > -1) return c; else throw new IOException("Read timeout"); } @Override public void close() { try { if (isOpened()) { in.close(); out.close(); port.closePort(); } } catch (Exception e) { e.printStackTrace(); } } @Override public boolean isOpened() { return port.isOpen(); } }
apache-2.0
haoch/kylin
engine-mr/src/main/java/org/apache/kylin/engine/mr/steps/CuboidJob.java
9526
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kylin.engine.mr.steps; import java.io.IOException; import org.apache.commons.cli.Options; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat; import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat; import org.apache.kylin.common.KylinConfig; import org.apache.kylin.cube.CubeInstance; import org.apache.kylin.cube.CubeManager; import org.apache.kylin.cube.CubeSegment; import org.apache.kylin.cube.cuboid.CuboidCLI; import org.apache.kylin.cube.model.CubeDesc; import org.apache.kylin.engine.mr.CubingJob; import org.apache.kylin.engine.mr.IMRInput.IMRTableInputFormat; import org.apache.kylin.engine.mr.MRUtil; import org.apache.kylin.engine.mr.common.AbstractHadoopJob; import org.apache.kylin.engine.mr.common.BatchConstants; import org.apache.kylin.job.exception.JobException; import org.apache.kylin.job.manager.ExecutableManager; import org.apache.kylin.metadata.model.SegmentStatusEnum; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * @author ysong1 */ public class CuboidJob extends AbstractHadoopJob { protected static final Logger logger = LoggerFactory.getLogger(CuboidJob.class); private static final String MAPRED_REDUCE_TASKS = "mapred.reduce.tasks"; @SuppressWarnings("rawtypes") private Class<? extends Mapper> mapperClass; private boolean skipped = false; @Override public boolean isSkipped() { return skipped; } private boolean checkSkip(String cubingJobId) { if (cubingJobId == null) return false; ExecutableManager execMgr = ExecutableManager.getInstance(KylinConfig.getInstanceFromEnv()); CubingJob cubingJob = (CubingJob) execMgr.getJob(cubingJobId); skipped = cubingJob.isLayerCubing() == false; return skipped; } @Override public int run(String[] args) throws Exception { if (this.mapperClass == null) throw new Exception("Mapper class is not set!"); Options options = new Options(); try { options.addOption(OPTION_JOB_NAME); options.addOption(OPTION_CUBE_NAME); options.addOption(OPTION_SEGMENT_NAME); options.addOption(OPTION_INPUT_PATH); options.addOption(OPTION_OUTPUT_PATH); options.addOption(OPTION_NCUBOID_LEVEL); options.addOption(OPTION_INPUT_FORMAT); options.addOption(OPTION_CUBING_JOB_ID); parseOptions(options, args); Path output = new Path(getOptionValue(OPTION_OUTPUT_PATH)); String cubeName = getOptionValue(OPTION_CUBE_NAME).toUpperCase(); int nCuboidLevel = Integer.parseInt(getOptionValue(OPTION_NCUBOID_LEVEL)); String segmentName = getOptionValue(OPTION_SEGMENT_NAME); String cubingJobId = getOptionValue(OPTION_CUBING_JOB_ID); KylinConfig config = KylinConfig.getInstanceFromEnv(); CubeManager cubeMgr = CubeManager.getInstance(config); CubeInstance cube = cubeMgr.getCube(cubeName); if (checkSkip(cubingJobId)) { logger.info("Skip job " + getOptionValue(OPTION_JOB_NAME) + " for " + cubeName + "[" + segmentName + "]"); return 0; } job = Job.getInstance(getConf(), getOptionValue(OPTION_JOB_NAME)); logger.info("Starting: " + job.getJobName()); setJobClasspath(job); // Mapper configureMapperInputFormat(cube.getSegment(segmentName, SegmentStatusEnum.NEW)); job.setMapperClass(this.mapperClass); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); job.setCombinerClass(CuboidReducer.class); // for base cuboid shuffle skew, some rowkey aggregates far more records than others // Reducer job.setReducerClass(CuboidReducer.class); job.setOutputFormatClass(SequenceFileOutputFormat.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); FileOutputFormat.setOutputPath(job, output); // set job configuration job.getConfiguration().set(BatchConstants.CFG_CUBE_NAME, cubeName); job.getConfiguration().set(BatchConstants.CFG_CUBE_SEGMENT_NAME, segmentName); job.getConfiguration().setInt(BatchConstants.CFG_CUBE_CUBOID_LEVEL, nCuboidLevel); // add metadata to distributed cache attachKylinPropsAndMetadata(cube, job.getConfiguration()); setReduceTaskNum(job, config, cubeName, nCuboidLevel); this.deletePath(job.getConfiguration(), output); return waitForCompletion(job); } catch (Exception e) { logger.error("error in CuboidJob", e); printUsage(options); throw e; } finally { if (job != null) cleanupTempConfFile(job.getConfiguration()); } } private void configureMapperInputFormat(CubeSegment cubeSeg) throws IOException { String input = getOptionValue(OPTION_INPUT_PATH); if ("FLAT_TABLE".equals(input)) { // base cuboid case IMRTableInputFormat flatTableInputFormat = MRUtil.getBatchCubingInputSide(cubeSeg).getFlatTableInputFormat(); flatTableInputFormat.configureJob(job); } else { // n-dimension cuboid case FileInputFormat.setInputPaths(job, new Path(input)); if (hasOption(OPTION_INPUT_FORMAT) && ("textinputformat".equalsIgnoreCase(getOptionValue(OPTION_INPUT_FORMAT)))) { job.setInputFormatClass(TextInputFormat.class); } else { job.setInputFormatClass(SequenceFileInputFormat.class); } } } protected void setReduceTaskNum(Job job, KylinConfig config, String cubeName, int level) throws ClassNotFoundException, IOException, InterruptedException, JobException { Configuration jobConf = job.getConfiguration(); KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv(); CubeDesc cubeDesc = CubeManager.getInstance(config).getCube(cubeName).getDescriptor(); double perReduceInputMB = kylinConfig.getDefaultHadoopJobReducerInputMB(); double reduceCountRatio = kylinConfig.getDefaultHadoopJobReducerCountRatio(); // total map input MB double totalMapInputMB = this.getTotalMapInputMB(); // output / input ratio int preLevelCuboids, thisLevelCuboids; if (level == 0) { // base cuboid preLevelCuboids = thisLevelCuboids = 1; } else { // n-cuboid int[] allLevelCount = CuboidCLI.calculateAllLevelCount(cubeDesc); preLevelCuboids = allLevelCount[level - 1]; thisLevelCuboids = allLevelCount[level]; } // total reduce input MB double totalReduceInputMB = totalMapInputMB * thisLevelCuboids / preLevelCuboids; // number of reduce tasks int numReduceTasks = (int) Math.round(totalReduceInputMB / perReduceInputMB * reduceCountRatio); // adjust reducer number for cube which has DISTINCT_COUNT measures for better performance if (cubeDesc.hasMemoryHungryMeasures()) { numReduceTasks = numReduceTasks * 4; } // at least 1 reducer numReduceTasks = Math.max(1, numReduceTasks); // no more than 5000 reducer by default numReduceTasks = Math.min(kylinConfig.getHadoopJobMaxReducerNumber(), numReduceTasks); jobConf.setInt(MAPRED_REDUCE_TASKS, numReduceTasks); logger.info("Having total map input MB " + Math.round(totalMapInputMB)); logger.info("Having level " + level + ", pre-level cuboids " + preLevelCuboids + ", this level cuboids " + thisLevelCuboids); logger.info("Having per reduce MB " + perReduceInputMB + ", reduce count ratio " + reduceCountRatio); logger.info("Setting " + MAPRED_REDUCE_TASKS + "=" + numReduceTasks); } /** * @param mapperClass * the mapperClass to set */ @SuppressWarnings("rawtypes") public void setMapperClass(Class<? extends Mapper> mapperClass) { this.mapperClass = mapperClass; } }
apache-2.0
OmniKryptec/OmniKryptec-Engine
src/main/java/de/omnikryptec/old/event/input/InputState.java
1257
/* * Copyright 2017 - 2019 Roman Borris (pcfreak9000), Paul Hagedorn (Panzer1119) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package de.omnikryptec.old.event.input; import org.lwjgl.glfw.GLFW; /** * InputState * * @author Panzer1119 */ public enum InputState { NOTHING(-1), RELEASED(GLFW.GLFW_RELEASE), PRESSED(GLFW.GLFW_PRESS), REPEATED(GLFW.GLFW_REPEAT); private final int state; InputState(int state) { this.state = state; } public static final InputState ofState(int state) { for (InputState inputState : values()) { if (inputState.getState() == state) { return inputState; } } return null; } public final int getState() { return state; } }
apache-2.0
muhd7rosli/desmoj
src/desmoj/core/dist/Distribution.java
12890
package desmoj.core.dist; import desmoj.core.simulator.Model; /** * Base class for all pseudo random number distributions used in this package. * Defines a set of methods usefull for all kinds of random distributions that * can be based upon a stream of uniform distributed pseudo random numbers. * Prefabricated distributions implemented in this package can handle uniform, * normal (gaussian), bernoulli, poisson and heuristic distributions with return * values of the primitive data types double (floating point), long (integer) * and boolean (true or false). Inherit from this class if you want to implement * new types of distributions handing back values of other types than those * listed above. Basic idea is to use a pseudo random generator which produces a * uniformly distributed stream of double numbers between 0 and 1 use inverse * transformation to generate the desired distribution. See also [Page91, p. * 107] Note that although this class implements all methods, it is set to be * abstract, since instantiating this class would not produce any meaningfull * distribution to be used by a client. * * @see desmoj.core.dist.UniformRandomGenerator * @see desmoj.core.dist.LinearCongruentialRandomGenerator * * @version DESMO-J, Ver. 2.4.1 copyright (c) 2014 * @author Tim Lechler * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You may * obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. * */ public abstract class Distribution extends desmoj.core.simulator.Reportable { /** * The underlying uniform pseudo random generator available to every * distribution inheriting from this abstract class. Valid generators have * to implement the <code>desmoj.dist.UniformRandomGenerator</code> * interface. By default the <code>desmoj.dist.DefaultRandomGenerator</code> * is used. * * @see desmoj.core.dist.UniformRandomGenerator * @see desmoj.core.dist.LinearCongruentialRandomGenerator */ protected desmoj.core.dist.UniformRandomGenerator randomGenerator; /** * The status of the random number generation. If set to true, antithetic * values are delivered. These depend upon the kind of distribution, so this * value here will probably be most useful to switch the algorithm in the * implementation of the abstract <code>sample()</code> method between * "normal" and "antithetic" value generation. This feature is not * associated to the pseudo random generator since the algorithm for * calculating antithetic values might not require antithetic uniformly * distributed values. */ protected boolean antithetic; /** * The seed of the underlying pseudorandom generator. The seed value is * passed on to the underlying <code>UniformRandomGenerator</code> but since * those generators are not supposed to keep track of their initial seed * value it is stored here to make sure they are not lost. */ protected long initialSeed; /** * This flag shows, if a distribution may produce negative samples or not. * This is important, if the value of a distribution's sample is to be used * for creating a TimeSpan object, which allows positive values only. If * this switch is set to <code>true</code>, the distribution will only * return positive samples. If a negative sample is drawn, it will be * dismissed and new samples will be drawn until a positive is produced, * which will be returned. */ protected boolean nonNegative; /** * Creates a RandomDistribution object which gets its initial seed from the * experiment's seedgenerator. The * <code>LinearCongruentialRandomGenerator</code> is used as the underlying * uniform pseudo random number generator for all pseudo random distribution * . * * @param owner * Model : The distribution's owner * @param name * java.lang.String : The distribution's name * @param showInReport * boolean : Flag to show distribution in report * @param showInTrace * boolean : Flag to show distribution in trace */ public Distribution(Model owner, String name, boolean showInReport, boolean showInTrace) { super(owner, name, showInReport, showInTrace); // construct the // reportable if (randomGenerator == null) { try { randomGenerator = owner.getExperiment() .getDistributionManager().getRandomNumberGenerator() .newInstance(); // default RandomGenerator } catch (InstantiationException e) { randomGenerator = new LinearCongruentialRandomGenerator(); } catch (IllegalAccessException e) { randomGenerator = new LinearCongruentialRandomGenerator(); } } owner.getExperiment().getDistributionManager().register(this); // set seed in case experiment running // (for not yet running experiments, this happens automatically // when the experiment is started) if (owner.getExperiment().isRunning()) { randomGenerator.setSeed(initialSeed); } } /** * Changes the underlying random generator to the one given as a parameter. * Custom random generators have to implement the * desmoj.dist.UniormRandomGenerator interface. Note that changing the * underlying random generator forces a reset, since a new generator might * produce a completely different stream of pseudo random numbers that won't * enable us to reproduce the stream of numbers probably delivered by the * previously used generator. * * @param randomGenerator * java.util.Random : the random generator used for creating * distributions */ public void changeRandomGenerator( desmoj.core.dist.UniformRandomGenerator randomGenerator) { this.randomGenerator = randomGenerator; reset(); } /** * Creates the default reporter associated with this distribution. The basic * <code>DistributionReporter</code> returned as a default implementation of * this method simply reports the distribution's name, number of * observations (samples given), seed and point of simulation time of the * last reset. * * @return Reportable : The reporter associated with this distribution * @see desmoj.core.report.DistributionReporter */ public desmoj.core.report.Reporter createReporter() { return new desmoj.core.report.DistributionReporter(this); } /** * Returns the seed value since last reset. * * @return long : The initial seed value */ public long getInitialSeed() { return initialSeed; } /** * Tells if this distribution can return negative samples. * * @return boolean : If <code>true</code> it returns positive samples only */ public boolean getNonNegative() { return nonNegative; } /** * Returns the number of Samples given by this distribution. The number of * samples is increased whenever the sample() method is called. It is based * on the random numbers of the distribution, not on the number of random * numbers produced by the underlying random generator, since some * distributions use algorithms consuming more than one uniformly * distributed random number to produce one sample following the desired * distribution. * * @return long : the number of samples given to clients */ public long getNumSamples() { return getObservations(); } /** * Returns the current status for antithetic random number generation in * this distribution. * * @return boolean : The status of antithetic pseudo random number * generation * @see desmoj.core.dist.Distribution#setAntithetic */ public boolean isAntithetic() { return antithetic; } /** * Resets the pseudo random generator's seed and the number of samples given * to zero. The field antithetic keeps the value it has had before the * reset. */ public void reset() { if (randomGenerator == null) { try { randomGenerator = this.getModel().getExperiment() .getDistributionManager().getRandomNumberGenerator() .newInstance(); // default RandomGenerator } catch (InstantiationException e) { randomGenerator = new LinearCongruentialRandomGenerator(); } catch (IllegalAccessException e) { randomGenerator = new LinearCongruentialRandomGenerator(); } } // sets seed to the seed specified in constructor or by call to // setSeed(long) randomGenerator.setSeed(initialSeed); // initialSeed stays unchanged // here // antithetic = false; /* * no need to change this to false. If this distribution has delivered * antithetic random number than it will do so after the reset, too. */ super.reset(); // reset the Reportable, too. } /** * Resets the pseudo random generator's seed to the value passed, the number * of samples given to zero and sets antithetic to false for this * distribution. Acts the same as a call of method <code>reset()</code> and * a consecutive call to <code>setSeed(long)</code>. * * @param newSeed * long : new seed to be used by underlying random number * generator after reset */ public void reset(long newSeed) { randomGenerator.setSeed(newSeed); this.initialSeed = newSeed; // initialSeed is changed here // antithetic = false; /* * no need to change this to false. If this distribution has delivered * antithetic random number than it will do so after the reset, too. */ super.reset(); // reset the Reportable, too. } /** * Convenience method to return the distribution's sample as <code>Object</code>. * For type safety, method <code>sample()</code> should be preferred. However, * this method is useful for environments requiring a non-genetic access point * to obtain samples from any distribution. * * @return Object : A sample from this this distribution wrapped as <code>Object</code>. */ public abstract Object sampleObject(); /** * Switches this distribution to produce antithetic samples. To obtain * antithetic random numbers, call this method with the parameter * <code>true</code>. Antithetic random numbers are used to minimize the * standard deviation of a series of simulation runs. The results of a run * with normal random numbers has to be standardized with the results of a * run using antithetic random numbers, thus doubling the number of samples * needed, but also lowering the standard deviation of the results of that * simulation. See [Page91, p.139]. * * @param newAntiStatus * boolean : Parameter <code>true</code> switches antithetic mode * on, <code>false</code> switches antithetic mode off */ public void setAntithetic(boolean newAntiStatus) { antithetic = newAntiStatus; reset(); } /** * Sets the nonNegative switch to the given value. If nonNegative is set to * <code>true</code> the distribution returns positive samples only, * otherwise it also produces negative samples, if possible. * * @param newValue * boolean : If <code>true</code> the distribution is set to * return positive samples only, otherwise it also produces * negative samples, if possible. */ public void setNonNegative(boolean newValue) { this.nonNegative = newValue; } /** * Sets the underlying pseudo random number generator's seed to the value * given. The seed controls the starting value of the random generators and * all following generated pseudo random numbers. Resetting the seed between * two simulation runs will let you use identical streams of random numbers. * That will enable you to compare different strategies within your model * based on the same random number stream produced by the random generator. * * @param newSeed * long : new seed used by underlying pseudo random number * generator */ public void setSeed(long newSeed) { randomGenerator.setSeed(newSeed); // well, the seed is passed on... // ;-) initialSeed = newSeed; // remember new seed for next reset() reset(); // and do a reset of statistics to display when a new seed // was // set } /** * Generates the trace output of each sample. This method is called by * sample(). * * @param sample * String : The last sample, converted to a String */ protected void traceLastSample(String sample) { if (this.currentlySendTraceNotes()) this.sendTraceNote("samples " + sample + " from " + this.getName()); } }
apache-2.0
mmaro/giraph
giraph-core/src/main/java/org/apache/giraph/io/formats/IdWithValueTextOutputFormat.java
3342
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.giraph.io.formats; import java.io.IOException; import org.apache.giraph.graph.Vertex; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapreduce.TaskAttemptContext; /** * Write out Vertices' IDs and values, but not their edges nor edges' values. * This is a useful output format when the final value of the vertex is * all that's needed. The boolean configuration parameter reverse.id.and.value * allows reversing the output of id and value. * * @param <I> Vertex index value * @param <V> Vertex value * @param <E> Edge value */ @SuppressWarnings("rawtypes") public class IdWithValueTextOutputFormat<I extends WritableComparable, V extends Writable, E extends Writable> extends TextVertexOutputFormat<I, V, E> { /** Specify the output delimiter */ public static final String LINE_TOKENIZE_VALUE = "output.delimiter"; /** Default output delimiter */ public static final String LINE_TOKENIZE_VALUE_DEFAULT = "\t"; /** Reverse id and value order? */ public static final String REVERSE_ID_AND_VALUE = "reverse.id.and.value"; /** Default is to not reverse id and value order. */ public static final boolean REVERSE_ID_AND_VALUE_DEFAULT = false; @Override public TextVertexWriter createVertexWriter(TaskAttemptContext context) { return new IdWithValueVertexWriter(); } /** * Vertex writer used with {@link IdWithValueTextOutputFormat}. */ protected class IdWithValueVertexWriter extends TextVertexWriterToEachLine { /** Saved delimiter */ private String delimiter; /** Cached reserve option */ private boolean reverseOutput; @Override public void initialize(TaskAttemptContext context) throws IOException, InterruptedException { super.initialize(context); delimiter = getConf().get( LINE_TOKENIZE_VALUE, LINE_TOKENIZE_VALUE_DEFAULT); reverseOutput = getConf().getBoolean( REVERSE_ID_AND_VALUE, REVERSE_ID_AND_VALUE_DEFAULT); } @Override protected Text convertVertexToLine(Vertex<I, V, E> vertex) throws IOException { String first; String second; if (reverseOutput) { first = vertex.getValue().toString(); second = vertex.getId().toString(); } else { first = vertex.getId().toString(); second = vertex.getValue().toString(); } Text line = new Text(first + delimiter + second); return line; } } }
apache-2.0
li1993hao/JDI
app/src/main/java/com/tiptimes/tp/widget/XListViewFooter.java
2816
/** * @file XFooterView.java * @create Mar 31, 2012 9:33:43 PM * @author Maxwin * @description XListView's footer */ package com.tiptimes.tp.widget; import android.content.Context; import android.util.AttributeSet; import android.view.LayoutInflater; import android.view.View; import android.widget.LinearLayout; import android.widget.TextView; import com.tiptimes.R; public class XListViewFooter extends LinearLayout { public final static int STATE_NORMAL = 0; public final static int STATE_READY = 1; public final static int STATE_LOADING = 2; private Context mContext; private View mContentView; private View mProgressBar; private TextView mHintView; public XListViewFooter(Context context) { super(context); initView(context); } public XListViewFooter(Context context, AttributeSet attrs) { super(context, attrs); initView(context); } public void setState(int state) { mHintView.setVisibility(View.INVISIBLE); mProgressBar.setVisibility(View.INVISIBLE); mHintView.setVisibility(View.INVISIBLE); if (state == STATE_READY) { mHintView.setVisibility(View.VISIBLE); mHintView.setText(R.string.xlistview_footer_hint_ready); } else if (state == STATE_LOADING) { mProgressBar.setVisibility(View.VISIBLE); } else { mHintView.setVisibility(View.VISIBLE); mHintView.setText(R.string.xlistview_footer_hint_normal); } } public void setBottomMargin(int height) { if (height < 0) { return; } LayoutParams lp = (LayoutParams) mContentView .getLayoutParams(); lp.bottomMargin = height; mContentView.setLayoutParams(lp); } public int getBottomMargin() { LayoutParams lp = (LayoutParams) mContentView .getLayoutParams(); return lp.bottomMargin; } /** * normal status */ public void normal() { mHintView.setVisibility(View.VISIBLE); mProgressBar.setVisibility(View.GONE); } /** * loading status */ public void loading() { mHintView.setVisibility(View.GONE); mProgressBar.setVisibility(View.VISIBLE); } /** * hide footer when disable pull load more */ public void hide() { this.setVisibility(View.GONE); } /** * show footer */ public void show() { this.setVisibility(View.VISIBLE); } private void initView(Context context) { mContext = context; LinearLayout moreView = (LinearLayout) LayoutInflater.from(mContext) .inflate(R.layout.library_xlistview_footer, null); addView(moreView); moreView.setLayoutParams(new LayoutParams( android.view.ViewGroup.LayoutParams.MATCH_PARENT, android.view.ViewGroup.LayoutParams.WRAP_CONTENT)); mContentView = moreView.findViewById(R.id.xlistview_footer_content); mProgressBar = moreView.findViewById(R.id.xlistview_footer_progressbar); mHintView = (TextView) moreView .findViewById(R.id.xlistview_footer_hint_textview); } }
apache-2.0
atsolakid/jena-examples
src/main/java/org/apache/jena/examples/ExampleLARQ_02.java
1986
package org.apache.jena.examples; import java.io.InputStream; import org.apache.jena.larq.assembler.AssemblerLARQ; import com.hp.hpl.jena.query.Query; import com.hp.hpl.jena.query.QueryExecution; import com.hp.hpl.jena.query.QueryExecutionFactory; import com.hp.hpl.jena.query.QueryFactory; import com.hp.hpl.jena.query.QuerySolution; import com.hp.hpl.jena.query.ResultSet; import com.hp.hpl.jena.tdb.TDBFactory; import com.hp.hpl.jena.tdb.TDBLoader; import com.hp.hpl.jena.tdb.base.file.Location; import com.hp.hpl.jena.tdb.store.DatasetGraphTDB; import com.hp.hpl.jena.util.FileManager; public class ExampleLARQ_02 { public static void main(String[] args) throws Exception { FileManager fm = FileManager.get(); fm.addLocatorClassLoader(ExampleTDB_01.class.getClassLoader()); InputStream in = fm.open("data/data.nt"); Location location = new Location ("tmp/TDB"); DatasetGraphTDB dsg = (DatasetGraphTDB)TDBFactory.createDatasetGraph(location); TDBLoader.load(dsg, in, false); // load data into TDB // build the Lucene index when pointed to a non existing directory AssemblerLARQ.make(dsg.toDataset(), "tmp/lucene"); String queryString = "PREFIX pf: <http://jena.hpl.hp.com/ARQ/property#>" + "PREFIX foaf: <http://xmlns.com/foaf/0.1/> " + "SELECT ?name ?email WHERE { " + " ?person foaf:name ?name . " + " ?name pf:textMatch '*:*' . " + // use the Lucene syntax here " OPTIONAL { ?person foaf:mbox ?email . }" + "}"; Query query = QueryFactory.create(queryString); QueryExecution qexec = QueryExecutionFactory.create(query, dsg.toDataset()); try { ResultSet results = qexec.execSelect(); while ( results.hasNext() ) { QuerySolution soln = results.nextSolution(); System.out.println(soln); } } finally { qexec.close(); } } }
apache-2.0
haikuowuya/android_system_code
src/com/sun/org/apache/xerces/internal/dom/ElementNSImpl.java
17660
/* * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. */ /* * Copyright 1999-2002,2004,2005 The Apache Software Foundation. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.sun.org.apache.xerces.internal.dom; import com.sun.org.apache.xerces.internal.xs.XSSimpleTypeDefinition; import com.sun.org.apache.xerces.internal.xs.XSTypeDefinition; import com.sun.org.apache.xerces.internal.impl.dv.xs.XSSimpleTypeDecl; import com.sun.org.apache.xerces.internal.impl.xs.XSComplexTypeDecl; import com.sun.org.apache.xerces.internal.util.URI; import com.sun.org.apache.xerces.internal.xni.NamespaceContext; import org.w3c.dom.Attr; import org.w3c.dom.DOMException; /** * ElementNSImpl inherits from ElementImpl and adds namespace support. * <P> * The qualified name is the node name, and we store localName which is also * used in all queries. On the other hand we recompute the prefix when * necessary. * * @xerces.internal * * @author Elena litani, IBM * @author Neeraj Bajaj, Sun Microsystems * @version $Id: ElementNSImpl.java,v 1.7 2010-11-01 04:39:39 joehw Exp $ */ public class ElementNSImpl extends ElementImpl { // // Constants // /** Serialization version. */ static final long serialVersionUID = -9142310625494392642L; static final String xmlURI = "http://www.w3.org/XML/1998/namespace"; // // Data // /** DOM2: Namespace URI. */ protected String namespaceURI; /** DOM2: localName. */ protected String localName; /** DOM3: type information */ // REVISIT: we are losing the type information in DOM during serialization transient XSTypeDefinition type; protected ElementNSImpl() { super(); } /** * DOM2: Constructor for Namespace implementation. */ protected ElementNSImpl(CoreDocumentImpl ownerDocument, String namespaceURI, String qualifiedName) throws DOMException { super(ownerDocument, qualifiedName); setName(namespaceURI, qualifiedName); } private void setName(String namespaceURI, String qname) { String prefix; // DOM Level 3: namespace URI is never empty string. this.namespaceURI = namespaceURI; if (namespaceURI != null) { //convert the empty string to 'null' this.namespaceURI = (namespaceURI.length() == 0) ? null : namespaceURI; } int colon1, colon2 ; //NAMESPACE_ERR: //1. if the qualified name is 'null' it is malformed. //2. or if the qualifiedName is null and the namespaceURI is different from null, // We dont need to check for namespaceURI != null, if qualified name is null throw DOMException. if(qname == null){ String msg = DOMMessageFormatter.formatMessage( DOMMessageFormatter.DOM_DOMAIN, "NAMESPACE_ERR", null); throw new DOMException(DOMException.NAMESPACE_ERR, msg); } else{ colon1 = qname.indexOf(':'); colon2 = qname.lastIndexOf(':'); } ownerDocument.checkNamespaceWF(qname, colon1, colon2); if (colon1 < 0) { // there is no prefix localName = qname; if (ownerDocument.errorChecking) { ownerDocument.checkQName(null, localName); if (qname.equals("xmlns") && (namespaceURI == null || !namespaceURI.equals(NamespaceContext.XMLNS_URI)) || (namespaceURI!=null && namespaceURI.equals(NamespaceContext.XMLNS_URI) && !qname.equals("xmlns"))) { String msg = DOMMessageFormatter.formatMessage( DOMMessageFormatter.DOM_DOMAIN, "NAMESPACE_ERR", null); throw new DOMException(DOMException.NAMESPACE_ERR, msg); } } }//there is a prefix else { prefix = qname.substring(0, colon1); localName = qname.substring(colon2 + 1); //NAMESPACE_ERR: //1. if the qualifiedName has a prefix and the namespaceURI is null, //2. or if the qualifiedName has a prefix that is "xml" and the namespaceURI //is different from " http://www.w3.org/XML/1998/namespace" if (ownerDocument.errorChecking) { if( namespaceURI == null || ( prefix.equals("xml") && !namespaceURI.equals(NamespaceContext.XML_URI) )){ String msg = DOMMessageFormatter.formatMessage( DOMMessageFormatter.DOM_DOMAIN, "NAMESPACE_ERR", null); throw new DOMException(DOMException.NAMESPACE_ERR, msg); } ownerDocument.checkQName(prefix, localName); ownerDocument.checkDOMNSErr(prefix, namespaceURI); } } } // when local name is known protected ElementNSImpl(CoreDocumentImpl ownerDocument, String namespaceURI, String qualifiedName, String localName) throws DOMException { super(ownerDocument, qualifiedName); this.localName = localName; this.namespaceURI = namespaceURI; } // for DeferredElementImpl protected ElementNSImpl(CoreDocumentImpl ownerDocument, String value) { super(ownerDocument, value); } // Support for DOM Level 3 renameNode method. // Note: This only deals with part of the pb. CoreDocumentImpl // does all the work. void rename(String namespaceURI, String qualifiedName) { if (needsSyncData()) { synchronizeData(); } this.name = qualifiedName; setName(namespaceURI, qualifiedName); reconcileDefaultAttributes(); } /** * NON-DOM: resets this node and sets specified values for the node * * @param ownerDocument * @param namespaceURI * @param qualifiedName * @param localName */ protected void setValues (CoreDocumentImpl ownerDocument, String namespaceURI, String qualifiedName, String localName){ // remove children first firstChild = null; previousSibling = null; nextSibling = null; fNodeListCache = null; // set owner document attributes = null; super.flags = 0; setOwnerDocument(ownerDocument); // synchronizeData will initialize attributes needsSyncData(true); super.name = qualifiedName; this.localName = localName; this.namespaceURI = namespaceURI; } // // Node methods // // //DOM2: Namespace methods. // /** * Introduced in DOM Level 2. <p> * * The namespace URI of this node, or null if it is unspecified.<p> * * This is not a computed value that is the result of a namespace lookup based on * an examination of the namespace declarations in scope. It is merely the * namespace URI given at creation time.<p> * * For nodes created with a DOM Level 1 method, such as createElement * from the Document interface, this is null. * @since WD-DOM-Level-2-19990923 */ public String getNamespaceURI() { if (needsSyncData()) { synchronizeData(); } return namespaceURI; } /** * Introduced in DOM Level 2. <p> * * The namespace prefix of this node, or null if it is unspecified. <p> * * For nodes created with a DOM Level 1 method, such as createElement * from the Document interface, this is null. <p> * * @since WD-DOM-Level-2-19990923 */ public String getPrefix() { if (needsSyncData()) { synchronizeData(); } int index = name.indexOf(':'); return index < 0 ? null : name.substring(0, index); } /** * Introduced in DOM Level 2. <p> * * Note that setting this attribute changes the nodeName attribute, which holds the * qualified name, as well as the tagName and name attributes of the Element * and Attr interfaces, when applicable.<p> * * @param prefix The namespace prefix of this node, or null(empty string) if it is unspecified. * * @exception INVALID_CHARACTER_ERR * Raised if the specified * prefix contains an invalid character. * @exception DOMException * @since WD-DOM-Level-2-19990923 */ public void setPrefix(String prefix) throws DOMException { if (needsSyncData()) { synchronizeData(); } if (ownerDocument.errorChecking) { if (isReadOnly()) { String msg = DOMMessageFormatter.formatMessage(DOMMessageFormatter.DOM_DOMAIN, "NO_MODIFICATION_ALLOWED_ERR", null); throw new DOMException( DOMException.NO_MODIFICATION_ALLOWED_ERR, msg); } if (prefix != null && prefix.length() != 0) { if (!CoreDocumentImpl.isXMLName(prefix,ownerDocument.isXML11Version())) { String msg = DOMMessageFormatter.formatMessage(DOMMessageFormatter.DOM_DOMAIN, "INVALID_CHARACTER_ERR", null); throw new DOMException(DOMException.INVALID_CHARACTER_ERR, msg); } if (namespaceURI == null || prefix.indexOf(':') >=0) { String msg = DOMMessageFormatter.formatMessage(DOMMessageFormatter.DOM_DOMAIN, "NAMESPACE_ERR", null); throw new DOMException(DOMException.NAMESPACE_ERR, msg); } else if (prefix.equals("xml")) { if (!namespaceURI.equals(xmlURI)) { String msg = DOMMessageFormatter.formatMessage(DOMMessageFormatter.DOM_DOMAIN, "NAMESPACE_ERR", null); throw new DOMException(DOMException.NAMESPACE_ERR, msg); } } } } // update node name with new qualifiedName if (prefix !=null && prefix.length() != 0) { name = prefix + ":" + localName; } else { name = localName; } } /** * Introduced in DOM Level 2. <p> * * Returns the local part of the qualified name of this node. * @since WD-DOM-Level-2-19990923 */ public String getLocalName() { if (needsSyncData()) { synchronizeData(); } return localName; } /** * DOM Level 3 WD - Experimental. * Retrieve baseURI */ public String getBaseURI() { if (needsSyncData()) { synchronizeData(); } // Absolute base URI is computed according to XML Base (http://www.w3.org/TR/xmlbase/#granularity) // 1. the base URI specified by an xml:base attribute on the element, if one exists if (attributes != null) { Attr attrNode = (Attr)attributes.getNamedItemNS("http://www.w3.org/XML/1998/namespace", "base"); if (attrNode != null) { String uri = attrNode.getNodeValue(); if (uri.length() != 0 ) {// attribute value is always empty string try { uri = new URI(uri).toString(); } catch (com.sun.org.apache.xerces.internal.util.URI.MalformedURIException e) { // This may be a relative URI. // Start from the base URI of the parent, or if this node has no parent, the owner node. NodeImpl parentOrOwner = (parentNode() != null) ? parentNode() : ownerNode; // Make any parentURI into a URI object to use with the URI(URI, String) constructor. String parentBaseURI = (parentOrOwner != null) ? parentOrOwner.getBaseURI() : null; if (parentBaseURI != null) { try { uri = new URI(new URI(parentBaseURI), uri).toString(); } catch (com.sun.org.apache.xerces.internal.util.URI.MalformedURIException ex){ // This should never happen: parent should have checked the URI and returned null if invalid. return null; } return uri; } // REVISIT: what should happen in this case? return null; } return uri; } } } //2.the base URI of the element's parent element within the document or external entity, //if one exists String parentElementBaseURI = (this.parentNode() != null) ? this.parentNode().getBaseURI() : null ; //base URI of parent element is not null if(parentElementBaseURI != null){ try { //return valid absolute base URI return new URI(parentElementBaseURI).toString(); } catch (com.sun.org.apache.xerces.internal.util.URI.MalformedURIException e){ // REVISIT: what should happen in this case? return null; } } //3. the base URI of the document entity or external entity containing the element String baseURI = (this.ownerNode != null) ? this.ownerNode.getBaseURI() : null ; if(baseURI != null){ try { //return valid absolute base URI return new URI(baseURI).toString(); } catch (com.sun.org.apache.xerces.internal.util.URI.MalformedURIException e){ // REVISIT: what should happen in this case? return null; } } return null; } /** * @see org.w3c.dom.TypeInfo#getTypeName() */ public String getTypeName() { if (type !=null){ if (type instanceof XSSimpleTypeDecl) { return ((XSSimpleTypeDecl) type).getTypeName(); } else if (type instanceof XSComplexTypeDecl) { return ((XSComplexTypeDecl) type).getTypeName(); } } return null; } /** * @see org.w3c.dom.TypeInfo#getTypeNamespace() */ public String getTypeNamespace() { if (type !=null){ return type.getNamespace(); } return null; } /** * Introduced in DOM Level 2. <p> * Checks if a type is derived from another by restriction. See: * http://www.w3.org/TR/DOM-Level-3-Core/core.html#TypeInfo-isDerivedFrom * * @param ancestorNS * The namspace of the ancestor type declaration * @param ancestorName * The name of the ancestor type declaration * @param type * The reference type definition * * @return boolean True if the type is derived by restriciton for the * reference type */ public boolean isDerivedFrom(String typeNamespaceArg, String typeNameArg, int derivationMethod) { if(needsSyncData()) { synchronizeData(); } if (type != null) { if (type instanceof XSSimpleTypeDecl) { return ((XSSimpleTypeDecl) type).isDOMDerivedFrom( typeNamespaceArg, typeNameArg, derivationMethod); } else if (type instanceof XSComplexTypeDecl) { return ((XSComplexTypeDecl) type).isDOMDerivedFrom( typeNamespaceArg, typeNameArg, derivationMethod); } } return false; } /** * NON-DOM: setting type used by the DOM parser * @see NodeImpl#setReadOnly */ public void setType(XSTypeDefinition type) { this.type = type; } }
apache-2.0
IHTSDO/snow-owl
core/com.b2international.snowowl.datastore/src/com/b2international/snowowl/datastore/ComponentDeletionPlan.java
2912
/* * Copyright 2011-2018 B2i Healthcare Pte Ltd, http://b2i.sg * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.b2international.snowowl.datastore; import java.util.Collection; import java.util.Collections; import java.util.Set; import java.util.TreeSet; import java.util.function.Function; import org.eclipse.emf.cdo.CDOObject; import com.b2international.snowowl.core.ComponentIdentifier; import com.b2international.snowowl.datastore.utils.ComponentUtils2; import com.google.common.collect.Sets; /** * Deletion plan for storing the components that will be deleted. * */ public class ComponentDeletionPlan { //keep terminology component identifier and component identifier identifying the deleted objects private final Set<ComponentIdentifier> deletedComponents = Sets.newHashSet(); // keep deletedItems sorted by type for nicer display to the user private final Set<CDOObject> deletedItems = new TreeSet<CDOObject>(ComponentUtils2.CDO_OBJECT_COMPARATOR); private final Function<CDOObject, String> idProvider; private final Function<CDOObject, Short> terminologyComponentIdProvider; public ComponentDeletionPlan(Function<CDOObject, String> idProvider, Function<CDOObject, Short> terminologyComponentIdProvider) { this.idProvider = idProvider; this.terminologyComponentIdProvider = terminologyComponentIdProvider; } /** * Marks a component for deletion. * @param cdoObject the component to delete. */ public void markForDeletion(final CDOObject cdoObject) { internalMarkForDeletion(Collections.singletonList(cdoObject)); } private void internalMarkForDeletion(final Collection<? extends CDOObject> items) { for (final CDOObject object : items) { deletedComponents.add(ComponentIdentifier.of(terminologyComponentIdProvider.apply(object), idProvider.apply(object))); } deletedItems.addAll(items); } public Set<ComponentIdentifier> getDeletedComponents() { return deletedComponents; } public Set<CDOObject> getDeletedItems() { return deletedItems; } /** * Returns with a copy of the deleted components represented as {@link ComponentIdentifier identifier pair} instances. * @return a set of component identifier pairs. Generally another representation of the components marked for deletion. */ public Set<ComponentIdentifier> getDeletedComponentIdentifiers() { return Collections.unmodifiableSet(deletedComponents); } }
apache-2.0
cestella/streaming_outliers
core/src/main/java/com/caseystella/analytics/converters/MeasurementConverter.java
165
package com.caseystella.analytics.converters; /** * Created by cstella on 2/27/16. */ public interface MeasurementConverter extends Converter<Double, Object> { }
apache-2.0
bozzzzo/quark
quarkc/test/emit/expected/java/delegate/src/main/java/delegate_md/delegate_Pong_encode_Method.java
660
package delegate_md; public class delegate_Pong_encode_Method extends quark.reflect.Method implements io.datawire.quark.runtime.QObject { public delegate_Pong_encode_Method() { super("quark.String", "encode", new java.util.ArrayList(java.util.Arrays.asList(new Object[]{}))); } public Object invoke(Object object, java.util.ArrayList<Object> args) { delegate.Pong obj = (delegate.Pong) (object); return (obj).encode(); } public String _getClass() { return (String) (null); } public Object _getField(String name) { return null; } public void _setField(String name, Object value) {} }
apache-2.0
BriData/DBus
dbus-stream/dbus-stream-common/src/main/java/com/creditease/dbus/stream/common/appender/bolt/processor/kafkawriter/IncrementTerminationHandler.java
1809
/*- * << * DBus * == * Copyright (C) 2016 - 2019 Bridata * == * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * >> */ package com.creditease.dbus.stream.common.appender.bolt.processor.kafkawriter; import com.creditease.dbus.commons.DbusMessage; import com.creditease.dbus.stream.common.Constants; import com.creditease.dbus.stream.common.appender.bean.EmitData; import com.creditease.dbus.stream.common.appender.bean.MetaVersion; import com.creditease.dbus.stream.common.appender.bolt.processor.BoltCommandHandler; import com.creditease.dbus.stream.common.appender.bolt.processor.listener.KafkaBoltHandlerListener; import org.apache.storm.tuple.Tuple; /** * Created by Shrimp on 16/7/4. */ public class IncrementTerminationHandler implements BoltCommandHandler { private KafkaBoltHandlerListener listener; public IncrementTerminationHandler(KafkaBoltHandlerListener listener) { this.listener = listener; } @Override public void handle(Tuple tuple) { EmitData emitData = (EmitData) tuple.getValueByField(Constants.EmitFields.DATA); MetaVersion version = emitData.get(EmitData.VERSION); DbusMessage message = emitData.get(EmitData.MESSAGE); listener.writeData(version.getSchema(), version.getTable(), message, tuple); } }
apache-2.0
rosette-api/java
model/src/main/java/com/basistech/rosette/apimodel/EventsResponse.java
1075
/* * Copyright 2021 Basis Technology Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.basistech.rosette.apimodel; import com.basistech.rosette.annotations.JacksonMixin; import com.basistech.rosette.dm.Event; import lombok.Builder; import lombok.EqualsAndHashCode; import lombok.Getter; import java.util.List; /** * Simple api response data model for events */ @Getter @EqualsAndHashCode @Builder @JacksonMixin public final class EventsResponse extends Response { /** * @return the list of events */ private final List<Event> events; }
apache-2.0
pantsbuild/jarjar
src/main/java/org/pantsbuild/jarjar/util/GetNameClassWriter.java
1331
/** * Copyright 2007 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.pantsbuild.jarjar.util; import org.objectweb.asm.ClassVisitor; import org.objectweb.asm.ClassWriter; import org.objectweb.asm.Opcodes; public class GetNameClassWriter extends ClassVisitor { private String className; public GetNameClassWriter(int flags) { super(Opcodes.ASM7,new ClassWriter(flags)); } public void visit(int version, int access, String name, String signature, String superName, String[] interfaces) { className = name; super.visit(version, access, name, signature, superName, interfaces); } public String getClassName() { return className; } public byte[] toByteArray() { return ((ClassWriter) cv).toByteArray(); } }
apache-2.0
dawidgdanski/android-compass-api
compass/compass-api-tests/src/test/java/pl/dawidgdanski/compass/compassapi/test/assertions/AbstractTestAssert.java
1126
package pl.dawidgdanski.compass.compassapi.test.assertions; import android.os.Parcel; import android.os.Parcelable; import org.fest.assertions.Assertions; import org.fest.assertions.GenericAssert; public abstract class AbstractTestAssert<S extends GenericAssert<S, A>, A> extends GenericAssert<S, A> { protected AbstractTestAssert(A actual, Class<S> selfType) { super(selfType, actual); isNotNull(); } protected abstract Parcelable.Creator getCreator(); public final S canBeParceled() { Assertions.assertThat(actual).overridingErrorMessage(String.format("Class %s does not implement Parcelable interface", actual.getClass().getSimpleName())) .isInstanceOf(Parcelable.class); final Parcelable parcelable = (Parcelable) actual; final Parcel parcel = Parcel.obtain(); parcelable.writeToParcel(parcel, 0); parcel.setDataPosition(0); A retrievedInstance = (A) getCreator().createFromParcel(parcel); Assertions.assertThat(retrievedInstance).isNotNull() .isEqualTo(actual); return myself; } }
apache-2.0
googleapis/java-talent
proto-google-cloud-talent-v4beta1/src/main/java/com/google/cloud/talent/v4beta1/UpdateTenantRequest.java
37041
/* * Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/cloud/talent/v4beta1/tenant_service.proto package com.google.cloud.talent.v4beta1; /** * * * <pre> * Request for updating a specified tenant. * </pre> * * Protobuf type {@code google.cloud.talent.v4beta1.UpdateTenantRequest} */ public final class UpdateTenantRequest extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.cloud.talent.v4beta1.UpdateTenantRequest) UpdateTenantRequestOrBuilder { private static final long serialVersionUID = 0L; // Use UpdateTenantRequest.newBuilder() to construct. private UpdateTenantRequest(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { super(builder); } private UpdateTenantRequest() {} @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance(UnusedPrivateParameter unused) { return new UpdateTenantRequest(); } @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private UpdateTenantRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { com.google.cloud.talent.v4beta1.Tenant.Builder subBuilder = null; if (tenant_ != null) { subBuilder = tenant_.toBuilder(); } tenant_ = input.readMessage( com.google.cloud.talent.v4beta1.Tenant.parser(), extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(tenant_); tenant_ = subBuilder.buildPartial(); } break; } case 18: { com.google.protobuf.FieldMask.Builder subBuilder = null; if (updateMask_ != null) { subBuilder = updateMask_.toBuilder(); } updateMask_ = input.readMessage(com.google.protobuf.FieldMask.parser(), extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(updateMask_); updateMask_ = subBuilder.buildPartial(); } break; } default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.talent.v4beta1.TenantServiceProto .internal_static_google_cloud_talent_v4beta1_UpdateTenantRequest_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.talent.v4beta1.TenantServiceProto .internal_static_google_cloud_talent_v4beta1_UpdateTenantRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.talent.v4beta1.UpdateTenantRequest.class, com.google.cloud.talent.v4beta1.UpdateTenantRequest.Builder.class); } public static final int TENANT_FIELD_NUMBER = 1; private com.google.cloud.talent.v4beta1.Tenant tenant_; /** * * * <pre> * Required. The tenant resource to replace the current resource in the system. * </pre> * * <code>.google.cloud.talent.v4beta1.Tenant tenant = 1 [(.google.api.field_behavior) = REQUIRED]; * </code> * * @return Whether the tenant field is set. */ @java.lang.Override public boolean hasTenant() { return tenant_ != null; } /** * * * <pre> * Required. The tenant resource to replace the current resource in the system. * </pre> * * <code>.google.cloud.talent.v4beta1.Tenant tenant = 1 [(.google.api.field_behavior) = REQUIRED]; * </code> * * @return The tenant. */ @java.lang.Override public com.google.cloud.talent.v4beta1.Tenant getTenant() { return tenant_ == null ? com.google.cloud.talent.v4beta1.Tenant.getDefaultInstance() : tenant_; } /** * * * <pre> * Required. The tenant resource to replace the current resource in the system. * </pre> * * <code>.google.cloud.talent.v4beta1.Tenant tenant = 1 [(.google.api.field_behavior) = REQUIRED]; * </code> */ @java.lang.Override public com.google.cloud.talent.v4beta1.TenantOrBuilder getTenantOrBuilder() { return getTenant(); } public static final int UPDATE_MASK_FIELD_NUMBER = 2; private com.google.protobuf.FieldMask updateMask_; /** * * * <pre> * Strongly recommended for the best service experience. * If [update_mask][google.cloud.talent.v4beta1.UpdateTenantRequest.update_mask] is provided, only the specified fields in * [tenant][google.cloud.talent.v4beta1.UpdateTenantRequest.tenant] are updated. Otherwise all the fields are updated. * A field mask to specify the tenant fields to be updated. Only * top level fields of [Tenant][google.cloud.talent.v4beta1.Tenant] are supported. * </pre> * * <code>.google.protobuf.FieldMask update_mask = 2;</code> * * @return Whether the updateMask field is set. */ @java.lang.Override public boolean hasUpdateMask() { return updateMask_ != null; } /** * * * <pre> * Strongly recommended for the best service experience. * If [update_mask][google.cloud.talent.v4beta1.UpdateTenantRequest.update_mask] is provided, only the specified fields in * [tenant][google.cloud.talent.v4beta1.UpdateTenantRequest.tenant] are updated. Otherwise all the fields are updated. * A field mask to specify the tenant fields to be updated. Only * top level fields of [Tenant][google.cloud.talent.v4beta1.Tenant] are supported. * </pre> * * <code>.google.protobuf.FieldMask update_mask = 2;</code> * * @return The updateMask. */ @java.lang.Override public com.google.protobuf.FieldMask getUpdateMask() { return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; } /** * * * <pre> * Strongly recommended for the best service experience. * If [update_mask][google.cloud.talent.v4beta1.UpdateTenantRequest.update_mask] is provided, only the specified fields in * [tenant][google.cloud.talent.v4beta1.UpdateTenantRequest.tenant] are updated. Otherwise all the fields are updated. * A field mask to specify the tenant fields to be updated. Only * top level fields of [Tenant][google.cloud.talent.v4beta1.Tenant] are supported. * </pre> * * <code>.google.protobuf.FieldMask update_mask = 2;</code> */ @java.lang.Override public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { return getUpdateMask(); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (tenant_ != null) { output.writeMessage(1, getTenant()); } if (updateMask_ != null) { output.writeMessage(2, getUpdateMask()); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (tenant_ != null) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getTenant()); } if (updateMask_ != null) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getUpdateMask()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.talent.v4beta1.UpdateTenantRequest)) { return super.equals(obj); } com.google.cloud.talent.v4beta1.UpdateTenantRequest other = (com.google.cloud.talent.v4beta1.UpdateTenantRequest) obj; if (hasTenant() != other.hasTenant()) return false; if (hasTenant()) { if (!getTenant().equals(other.getTenant())) return false; } if (hasUpdateMask() != other.hasUpdateMask()) return false; if (hasUpdateMask()) { if (!getUpdateMask().equals(other.getUpdateMask())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasTenant()) { hash = (37 * hash) + TENANT_FIELD_NUMBER; hash = (53 * hash) + getTenant().hashCode(); } if (hasUpdateMask()) { hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER; hash = (53 * hash) + getUpdateMask().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static com.google.cloud.talent.v4beta1.UpdateTenantRequest parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.talent.v4beta1.UpdateTenantRequest parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.talent.v4beta1.UpdateTenantRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.talent.v4beta1.UpdateTenantRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.talent.v4beta1.UpdateTenantRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.talent.v4beta1.UpdateTenantRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.talent.v4beta1.UpdateTenantRequest parseFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.talent.v4beta1.UpdateTenantRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.talent.v4beta1.UpdateTenantRequest parseDelimitedFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } public static com.google.cloud.talent.v4beta1.UpdateTenantRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.talent.v4beta1.UpdateTenantRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.talent.v4beta1.UpdateTenantRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(com.google.cloud.talent.v4beta1.UpdateTenantRequest prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * * * <pre> * Request for updating a specified tenant. * </pre> * * Protobuf type {@code google.cloud.talent.v4beta1.UpdateTenantRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements // @@protoc_insertion_point(builder_implements:google.cloud.talent.v4beta1.UpdateTenantRequest) com.google.cloud.talent.v4beta1.UpdateTenantRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.talent.v4beta1.TenantServiceProto .internal_static_google_cloud_talent_v4beta1_UpdateTenantRequest_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.talent.v4beta1.TenantServiceProto .internal_static_google_cloud_talent_v4beta1_UpdateTenantRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.talent.v4beta1.UpdateTenantRequest.class, com.google.cloud.talent.v4beta1.UpdateTenantRequest.Builder.class); } // Construct using com.google.cloud.talent.v4beta1.UpdateTenantRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} } @java.lang.Override public Builder clear() { super.clear(); if (tenantBuilder_ == null) { tenant_ = null; } else { tenant_ = null; tenantBuilder_ = null; } if (updateMaskBuilder_ == null) { updateMask_ = null; } else { updateMask_ = null; updateMaskBuilder_ = null; } return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return com.google.cloud.talent.v4beta1.TenantServiceProto .internal_static_google_cloud_talent_v4beta1_UpdateTenantRequest_descriptor; } @java.lang.Override public com.google.cloud.talent.v4beta1.UpdateTenantRequest getDefaultInstanceForType() { return com.google.cloud.talent.v4beta1.UpdateTenantRequest.getDefaultInstance(); } @java.lang.Override public com.google.cloud.talent.v4beta1.UpdateTenantRequest build() { com.google.cloud.talent.v4beta1.UpdateTenantRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public com.google.cloud.talent.v4beta1.UpdateTenantRequest buildPartial() { com.google.cloud.talent.v4beta1.UpdateTenantRequest result = new com.google.cloud.talent.v4beta1.UpdateTenantRequest(this); if (tenantBuilder_ == null) { result.tenant_ = tenant_; } else { result.tenant_ = tenantBuilder_.build(); } if (updateMaskBuilder_ == null) { result.updateMask_ = updateMask_; } else { result.updateMask_ = updateMaskBuilder_.build(); } onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof com.google.cloud.talent.v4beta1.UpdateTenantRequest) { return mergeFrom((com.google.cloud.talent.v4beta1.UpdateTenantRequest) other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(com.google.cloud.talent.v4beta1.UpdateTenantRequest other) { if (other == com.google.cloud.talent.v4beta1.UpdateTenantRequest.getDefaultInstance()) return this; if (other.hasTenant()) { mergeTenant(other.getTenant()); } if (other.hasUpdateMask()) { mergeUpdateMask(other.getUpdateMask()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { com.google.cloud.talent.v4beta1.UpdateTenantRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (com.google.cloud.talent.v4beta1.UpdateTenantRequest) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private com.google.cloud.talent.v4beta1.Tenant tenant_; private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.talent.v4beta1.Tenant, com.google.cloud.talent.v4beta1.Tenant.Builder, com.google.cloud.talent.v4beta1.TenantOrBuilder> tenantBuilder_; /** * * * <pre> * Required. The tenant resource to replace the current resource in the system. * </pre> * * <code> * .google.cloud.talent.v4beta1.Tenant tenant = 1 [(.google.api.field_behavior) = REQUIRED]; * </code> * * @return Whether the tenant field is set. */ public boolean hasTenant() { return tenantBuilder_ != null || tenant_ != null; } /** * * * <pre> * Required. The tenant resource to replace the current resource in the system. * </pre> * * <code> * .google.cloud.talent.v4beta1.Tenant tenant = 1 [(.google.api.field_behavior) = REQUIRED]; * </code> * * @return The tenant. */ public com.google.cloud.talent.v4beta1.Tenant getTenant() { if (tenantBuilder_ == null) { return tenant_ == null ? com.google.cloud.talent.v4beta1.Tenant.getDefaultInstance() : tenant_; } else { return tenantBuilder_.getMessage(); } } /** * * * <pre> * Required. The tenant resource to replace the current resource in the system. * </pre> * * <code> * .google.cloud.talent.v4beta1.Tenant tenant = 1 [(.google.api.field_behavior) = REQUIRED]; * </code> */ public Builder setTenant(com.google.cloud.talent.v4beta1.Tenant value) { if (tenantBuilder_ == null) { if (value == null) { throw new NullPointerException(); } tenant_ = value; onChanged(); } else { tenantBuilder_.setMessage(value); } return this; } /** * * * <pre> * Required. The tenant resource to replace the current resource in the system. * </pre> * * <code> * .google.cloud.talent.v4beta1.Tenant tenant = 1 [(.google.api.field_behavior) = REQUIRED]; * </code> */ public Builder setTenant(com.google.cloud.talent.v4beta1.Tenant.Builder builderForValue) { if (tenantBuilder_ == null) { tenant_ = builderForValue.build(); onChanged(); } else { tenantBuilder_.setMessage(builderForValue.build()); } return this; } /** * * * <pre> * Required. The tenant resource to replace the current resource in the system. * </pre> * * <code> * .google.cloud.talent.v4beta1.Tenant tenant = 1 [(.google.api.field_behavior) = REQUIRED]; * </code> */ public Builder mergeTenant(com.google.cloud.talent.v4beta1.Tenant value) { if (tenantBuilder_ == null) { if (tenant_ != null) { tenant_ = com.google.cloud.talent.v4beta1.Tenant.newBuilder(tenant_) .mergeFrom(value) .buildPartial(); } else { tenant_ = value; } onChanged(); } else { tenantBuilder_.mergeFrom(value); } return this; } /** * * * <pre> * Required. The tenant resource to replace the current resource in the system. * </pre> * * <code> * .google.cloud.talent.v4beta1.Tenant tenant = 1 [(.google.api.field_behavior) = REQUIRED]; * </code> */ public Builder clearTenant() { if (tenantBuilder_ == null) { tenant_ = null; onChanged(); } else { tenant_ = null; tenantBuilder_ = null; } return this; } /** * * * <pre> * Required. The tenant resource to replace the current resource in the system. * </pre> * * <code> * .google.cloud.talent.v4beta1.Tenant tenant = 1 [(.google.api.field_behavior) = REQUIRED]; * </code> */ public com.google.cloud.talent.v4beta1.Tenant.Builder getTenantBuilder() { onChanged(); return getTenantFieldBuilder().getBuilder(); } /** * * * <pre> * Required. The tenant resource to replace the current resource in the system. * </pre> * * <code> * .google.cloud.talent.v4beta1.Tenant tenant = 1 [(.google.api.field_behavior) = REQUIRED]; * </code> */ public com.google.cloud.talent.v4beta1.TenantOrBuilder getTenantOrBuilder() { if (tenantBuilder_ != null) { return tenantBuilder_.getMessageOrBuilder(); } else { return tenant_ == null ? com.google.cloud.talent.v4beta1.Tenant.getDefaultInstance() : tenant_; } } /** * * * <pre> * Required. The tenant resource to replace the current resource in the system. * </pre> * * <code> * .google.cloud.talent.v4beta1.Tenant tenant = 1 [(.google.api.field_behavior) = REQUIRED]; * </code> */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.talent.v4beta1.Tenant, com.google.cloud.talent.v4beta1.Tenant.Builder, com.google.cloud.talent.v4beta1.TenantOrBuilder> getTenantFieldBuilder() { if (tenantBuilder_ == null) { tenantBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.talent.v4beta1.Tenant, com.google.cloud.talent.v4beta1.Tenant.Builder, com.google.cloud.talent.v4beta1.TenantOrBuilder>( getTenant(), getParentForChildren(), isClean()); tenant_ = null; } return tenantBuilder_; } private com.google.protobuf.FieldMask updateMask_; private com.google.protobuf.SingleFieldBuilderV3< com.google.protobuf.FieldMask, com.google.protobuf.FieldMask.Builder, com.google.protobuf.FieldMaskOrBuilder> updateMaskBuilder_; /** * * * <pre> * Strongly recommended for the best service experience. * If [update_mask][google.cloud.talent.v4beta1.UpdateTenantRequest.update_mask] is provided, only the specified fields in * [tenant][google.cloud.talent.v4beta1.UpdateTenantRequest.tenant] are updated. Otherwise all the fields are updated. * A field mask to specify the tenant fields to be updated. Only * top level fields of [Tenant][google.cloud.talent.v4beta1.Tenant] are supported. * </pre> * * <code>.google.protobuf.FieldMask update_mask = 2;</code> * * @return Whether the updateMask field is set. */ public boolean hasUpdateMask() { return updateMaskBuilder_ != null || updateMask_ != null; } /** * * * <pre> * Strongly recommended for the best service experience. * If [update_mask][google.cloud.talent.v4beta1.UpdateTenantRequest.update_mask] is provided, only the specified fields in * [tenant][google.cloud.talent.v4beta1.UpdateTenantRequest.tenant] are updated. Otherwise all the fields are updated. * A field mask to specify the tenant fields to be updated. Only * top level fields of [Tenant][google.cloud.talent.v4beta1.Tenant] are supported. * </pre> * * <code>.google.protobuf.FieldMask update_mask = 2;</code> * * @return The updateMask. */ public com.google.protobuf.FieldMask getUpdateMask() { if (updateMaskBuilder_ == null) { return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; } else { return updateMaskBuilder_.getMessage(); } } /** * * * <pre> * Strongly recommended for the best service experience. * If [update_mask][google.cloud.talent.v4beta1.UpdateTenantRequest.update_mask] is provided, only the specified fields in * [tenant][google.cloud.talent.v4beta1.UpdateTenantRequest.tenant] are updated. Otherwise all the fields are updated. * A field mask to specify the tenant fields to be updated. Only * top level fields of [Tenant][google.cloud.talent.v4beta1.Tenant] are supported. * </pre> * * <code>.google.protobuf.FieldMask update_mask = 2;</code> */ public Builder setUpdateMask(com.google.protobuf.FieldMask value) { if (updateMaskBuilder_ == null) { if (value == null) { throw new NullPointerException(); } updateMask_ = value; onChanged(); } else { updateMaskBuilder_.setMessage(value); } return this; } /** * * * <pre> * Strongly recommended for the best service experience. * If [update_mask][google.cloud.talent.v4beta1.UpdateTenantRequest.update_mask] is provided, only the specified fields in * [tenant][google.cloud.talent.v4beta1.UpdateTenantRequest.tenant] are updated. Otherwise all the fields are updated. * A field mask to specify the tenant fields to be updated. Only * top level fields of [Tenant][google.cloud.talent.v4beta1.Tenant] are supported. * </pre> * * <code>.google.protobuf.FieldMask update_mask = 2;</code> */ public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForValue) { if (updateMaskBuilder_ == null) { updateMask_ = builderForValue.build(); onChanged(); } else { updateMaskBuilder_.setMessage(builderForValue.build()); } return this; } /** * * * <pre> * Strongly recommended for the best service experience. * If [update_mask][google.cloud.talent.v4beta1.UpdateTenantRequest.update_mask] is provided, only the specified fields in * [tenant][google.cloud.talent.v4beta1.UpdateTenantRequest.tenant] are updated. Otherwise all the fields are updated. * A field mask to specify the tenant fields to be updated. Only * top level fields of [Tenant][google.cloud.talent.v4beta1.Tenant] are supported. * </pre> * * <code>.google.protobuf.FieldMask update_mask = 2;</code> */ public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { if (updateMaskBuilder_ == null) { if (updateMask_ != null) { updateMask_ = com.google.protobuf.FieldMask.newBuilder(updateMask_).mergeFrom(value).buildPartial(); } else { updateMask_ = value; } onChanged(); } else { updateMaskBuilder_.mergeFrom(value); } return this; } /** * * * <pre> * Strongly recommended for the best service experience. * If [update_mask][google.cloud.talent.v4beta1.UpdateTenantRequest.update_mask] is provided, only the specified fields in * [tenant][google.cloud.talent.v4beta1.UpdateTenantRequest.tenant] are updated. Otherwise all the fields are updated. * A field mask to specify the tenant fields to be updated. Only * top level fields of [Tenant][google.cloud.talent.v4beta1.Tenant] are supported. * </pre> * * <code>.google.protobuf.FieldMask update_mask = 2;</code> */ public Builder clearUpdateMask() { if (updateMaskBuilder_ == null) { updateMask_ = null; onChanged(); } else { updateMask_ = null; updateMaskBuilder_ = null; } return this; } /** * * * <pre> * Strongly recommended for the best service experience. * If [update_mask][google.cloud.talent.v4beta1.UpdateTenantRequest.update_mask] is provided, only the specified fields in * [tenant][google.cloud.talent.v4beta1.UpdateTenantRequest.tenant] are updated. Otherwise all the fields are updated. * A field mask to specify the tenant fields to be updated. Only * top level fields of [Tenant][google.cloud.talent.v4beta1.Tenant] are supported. * </pre> * * <code>.google.protobuf.FieldMask update_mask = 2;</code> */ public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { onChanged(); return getUpdateMaskFieldBuilder().getBuilder(); } /** * * * <pre> * Strongly recommended for the best service experience. * If [update_mask][google.cloud.talent.v4beta1.UpdateTenantRequest.update_mask] is provided, only the specified fields in * [tenant][google.cloud.talent.v4beta1.UpdateTenantRequest.tenant] are updated. Otherwise all the fields are updated. * A field mask to specify the tenant fields to be updated. Only * top level fields of [Tenant][google.cloud.talent.v4beta1.Tenant] are supported. * </pre> * * <code>.google.protobuf.FieldMask update_mask = 2;</code> */ public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { if (updateMaskBuilder_ != null) { return updateMaskBuilder_.getMessageOrBuilder(); } else { return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; } } /** * * * <pre> * Strongly recommended for the best service experience. * If [update_mask][google.cloud.talent.v4beta1.UpdateTenantRequest.update_mask] is provided, only the specified fields in * [tenant][google.cloud.talent.v4beta1.UpdateTenantRequest.tenant] are updated. Otherwise all the fields are updated. * A field mask to specify the tenant fields to be updated. Only * top level fields of [Tenant][google.cloud.talent.v4beta1.Tenant] are supported. * </pre> * * <code>.google.protobuf.FieldMask update_mask = 2;</code> */ private com.google.protobuf.SingleFieldBuilderV3< com.google.protobuf.FieldMask, com.google.protobuf.FieldMask.Builder, com.google.protobuf.FieldMaskOrBuilder> getUpdateMaskFieldBuilder() { if (updateMaskBuilder_ == null) { updateMaskBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< com.google.protobuf.FieldMask, com.google.protobuf.FieldMask.Builder, com.google.protobuf.FieldMaskOrBuilder>( getUpdateMask(), getParentForChildren(), isClean()); updateMask_ = null; } return updateMaskBuilder_; } @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:google.cloud.talent.v4beta1.UpdateTenantRequest) } // @@protoc_insertion_point(class_scope:google.cloud.talent.v4beta1.UpdateTenantRequest) private static final com.google.cloud.talent.v4beta1.UpdateTenantRequest DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.google.cloud.talent.v4beta1.UpdateTenantRequest(); } public static com.google.cloud.talent.v4beta1.UpdateTenantRequest getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser<UpdateTenantRequest> PARSER = new com.google.protobuf.AbstractParser<UpdateTenantRequest>() { @java.lang.Override public UpdateTenantRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new UpdateTenantRequest(input, extensionRegistry); } }; public static com.google.protobuf.Parser<UpdateTenantRequest> parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser<UpdateTenantRequest> getParserForType() { return PARSER; } @java.lang.Override public com.google.cloud.talent.v4beta1.UpdateTenantRequest getDefaultInstanceForType() { return DEFAULT_INSTANCE; } }
apache-2.0
OpenVnmrJ/OpenVnmrJ
src/vnmrj/src/vnmr/bo/BorderDeli.java
4177
/* * Copyright (C) 2015 University of Oregon * * You may distribute under the terms of either the GNU General Public * License or the Apache License, as specified in the LICENSE file. * * For more information, see the LICENSE file. */ package vnmr.bo; import java.awt.*; import javax.swing.border.*; import javax.swing.BorderFactory; import vnmr.util.VTitledBorder; /** * Provides an convenient way to get a Border for a JComponent from * a string description of its properties. */ public class BorderDeli extends Object { private BorderDeli() {} /** * Returns a border, optionally with a title. The border properties * are mostly specified by Strings. These are case insensitive and * may contain "extra" trailing characters. Thus, a lowered, beveled * border is specified by <i>lower</i> but <i><B>Lower</B>edBeveled</i> is * also accepted. Required characters are in <B>bold</B> below. * * @param type * The type of border. The default (matching none of the recognized * types) is no border. The following types are recognized: * <pre> * <B>Etch</B>ed * <B>Raise</B>dBeveled * <B>Lower</B>dBeveled * </pre> * @param title * A title for the group, or an empty string for no title. * @param titlePosition * The vertical location on the title on the frame. The default * is <i>Top</i>. Choices are: * <pre> * <B>Top</B> * <B>AboveT</B>op * <B>BelowT</B>op * <B>Bot</B>tom * <B>AboveB</B>ottom * <B>BelowB</B>ottom * </pre> * @param titleJustification * The horizontal position of the title. The Default is <i>Left</i>. * Choices are: * <pre> * <B>L</B>eft * <B>R</B>ight * <B>C</B>enter * </pre> * @param fontColor Required: The color used to draw the title. * @param font Required: The font of the title. */ public static Border createBorder(String type, String title, String titlePosition, String titleJustification, Color fontColor, Font font) { int titleJust = TitledBorder.DEFAULT_JUSTIFICATION; int titlePosn = TitledBorder.DEFAULT_POSITION; Border border = null; /* Set basic border */ if (type == null || type.trim().length() == 0) { return border; } type = type.trim().toLowerCase(); if (type.startsWith("etch")) { border = BorderFactory.createEtchedBorder(); } else if (type.startsWith("raise") || type.startsWith("bevel")) { border = BorderFactory.createRaisedBevelBorder(); } else if (type.startsWith("low")) { border = BorderFactory.createLoweredBevelBorder(); } else border = BorderFactory.createEmptyBorder(); /* Set title */ if (title != null && title.trim() != null && border != null) { /* Set position */ if (titlePosition != null && titlePosition.trim().length()>0) { titlePosition = titlePosition.trim().toLowerCase(); if (titlePosition.startsWith("top")) { titlePosn = TitledBorder.TOP; } else if (titlePosition.startsWith("abovet")) { titlePosn = TitledBorder.ABOVE_TOP; } else if (titlePosition.startsWith("belowt")) { titlePosn = TitledBorder.BELOW_TOP; } else if (titlePosition.startsWith("bot")) { titlePosn = TitledBorder.BOTTOM; } else if (titlePosition.startsWith("aboveb")) { titlePosn = TitledBorder.ABOVE_BOTTOM; } else if (titlePosition.startsWith("belowb")) { titlePosn = TitledBorder.BELOW_BOTTOM; } } /* Set justification */ if (titleJustification != null && titleJustification.trim().length()>0) { titleJustification = titleJustification.trim().toLowerCase(); if (titleJustification.startsWith("l")) { titleJust = TitledBorder.LEFT; } else if (titleJustification.startsWith("c")) { titleJust = TitledBorder.CENTER; } else if (titleJustification.startsWith("r")) { titleJust = TitledBorder.RIGHT; } } /* Put title on border */ border = new VTitledBorder(border, title, titleJust, titlePosn, font, fontColor); } return border; } }
apache-2.0