repo_name
stringlengths
5
108
path
stringlengths
6
333
size
stringlengths
1
6
content
stringlengths
4
977k
license
stringclasses
15 values
xiangyong/btrace
src/share/classes/com/sun/btrace/annotations/Injected.java
2009
/* * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package com.sun.btrace.annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** * Annotates a field as an injected service. * @author Jaroslav Bachorik */ @Target(ElementType.FIELD) @Retention(RetentionPolicy.CLASS) public @interface Injected { /** * The injected service type * @return */ ServiceType value() default ServiceType.SIMPLE; /** * The factory method to be used. * <p> * It must be a static method declared by the service class * and returning the service class instance * @return The name of the static method to be used as the factory method or an empty string */ String factoryMethod() default ""; }
gpl-2.0
sbbic/core
qadevOOo/tests/java/ifc/configuration/backend/_XMultiLayerStratum.java
11416
/* * This file is part of the LibreOffice project. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * This file incorporates work covered by the following license notice: * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed * with this work for additional information regarding copyright * ownership. The ASF licenses this file to you under the Apache * License, Version 2.0 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of * the License at http://www.apache.org/licenses/LICENSE-2.0 . */ package ifc.configuration.backend; import com.sun.star.configuration.backend.XLayer; import com.sun.star.configuration.backend.XMultiLayerStratum; import com.sun.star.configuration.backend.XUpdatableLayer; import lib.MultiMethodTest; import util.XLayerHandlerImpl; public class _XMultiLayerStratum extends MultiMethodTest { public XMultiLayerStratum oObj; protected String aLayerID; public void _getLayer() { boolean res = true; try { oObj.getLayer("", ""); log.println("Exception expected -- FAILED"); res = false; } catch (com.sun.star.configuration.backend.BackendAccessException e) { log.println("unexpected Exception " + e + " -- FAILED"); res = false; } catch (com.sun.star.lang.IllegalArgumentException e) { log.println("expected Exception -- OK"); } try { XLayer aLayer = oObj.getLayer(aLayerID, ""); res &= (aLayer != null); if (aLayer == null) { log.println("\treturned Layer is NULL -- FAILED"); } res &= checkLayer(aLayer); } catch (com.sun.star.configuration.backend.BackendAccessException e) { log.println("unexpected Exception -- FAILED"); res = false; } catch (com.sun.star.lang.IllegalArgumentException e) { log.println("unexpected Exception -- FAILED"); res = false; } tRes.tested("getLayer()", res); } public void _getLayers() { boolean res = true; try { String[] LayerIds = new String[] { "1 /org/openoffice/Office/Jobs.xcu", "2 /org/openoffice/Office/Linguistic.xcu" }; XLayer[] Layers = oObj.getLayers(LayerIds, ""); res = Layers.length == 2; log.println("Getting two XLayers -- OK"); log.println("Checking first on "+LayerIds[0]); res &= checkLayer(Layers[0]); log.println("Checking second on "+LayerIds[1]); res &= checkLayer(Layers[1]); } catch (com.sun.star.configuration.backend.BackendAccessException e) { log.println("unexpected Exception -- FAILED"); res = false; } catch (com.sun.star.lang.IllegalArgumentException e) { log.println("unexpected Exception -- FAILED"); res = false; } tRes.tested("getLayers()", res); } public void _getMultipleLayers() { boolean res = true; try { String[] LayerIds = new String[] { "1 /org/openoffice/Office/Jobs.xcu", "2 /org/openoffice/Office/Linguistic.xcu" }; String[] Times = new String[] { "", "" }; XLayer[] Layers = oObj.getMultipleLayers(LayerIds, Times); res = Layers.length == 2; log.println("Getting two XLayers -- OK"); log.println("Checking first on "+LayerIds[0]); res &= checkLayer(Layers[0]); log.println("Checking second on "+LayerIds[1]); res &= checkLayer(Layers[1]); } catch (com.sun.star.configuration.backend.BackendAccessException e) { log.println("unexpected Exception -- FAILED"); res = false; } catch (com.sun.star.lang.IllegalArgumentException e) { log.println("unexpected Exception -- FAILED"); res = false; } tRes.tested("getMultipleLayers()", res); } public void _getUpdatableLayer() { boolean res = true; try { oObj.getUpdatableLayer(""); log.println("Exception expected -- FAILED"); res = false; } catch (com.sun.star.configuration.backend.BackendAccessException e) { log.println("unexpected Exception " + e + " -- FAILED"); res = false; } catch (com.sun.star.lang.IllegalArgumentException e) { log.println("expected Exception -- OK"); } catch (com.sun.star.lang.NoSupportException e) { log.println("unexpected Exception -- FAILED"); res = false; } try { XUpdatableLayer aLayer = oObj.getUpdatableLayer(aLayerID); res &= (aLayer != null); if (aLayer == null) { log.println("\treturned Layer is NULL -- FAILED"); } res &= checkLayer(aLayer); } catch (com.sun.star.configuration.backend.BackendAccessException e) { log.println("unexpected Exception -- FAILED"); res = false; } catch (com.sun.star.lang.IllegalArgumentException e) { log.println("unexpected Exception -- FAILED"); res = false; } catch (com.sun.star.lang.NoSupportException e) { log.println("unexpected Exception -- FAILED"); res = false; } tRes.tested("getUpdatableLayer()", res); } public void _getUpdateLayerId() { boolean res = true; try { oObj.getUpdateLayerId( "org.openoffice.Office.TypeDetection", "illegal"); log.println("Exception expected -- FAILED"); res = false; } catch (com.sun.star.configuration.backend.BackendAccessException e) { log.println("expected Exception -- OK"); } catch (com.sun.star.lang.IllegalArgumentException e) { log.println("unexpected Exception -- FAILED"); res = false; } catch (com.sun.star.lang.NoSupportException e) { log.println("unexpected Exception -- FAILED"); res = false; } try { String ent = util.utils.getOfficeURL( tParam.getMSF()) + "/../share/registry"; String UpdateLayerID = oObj.getUpdateLayerId( "org.openoffice.Office.Linguistic", ent); res &= UpdateLayerID.endsWith("Linguistic.xcu"); if (!UpdateLayerID.endsWith("Linguistic.xcu")) { log.println("\tExpected the id to end with Linguistic.xcu"); log.println("\tBut got " + UpdateLayerID); log.println("\t=> FAILED"); } } catch (com.sun.star.configuration.backend.BackendAccessException e) { log.println("unexpected Exception -- FAILED"); res = false; } catch (com.sun.star.lang.IllegalArgumentException e) { log.println("unexpected Exception "+e+" -- FAILED"); res = false; } catch (com.sun.star.lang.NoSupportException e) { log.println("unexpected Exception -- FAILED"); res = false; } tRes.tested("getUpdateLayerId()", res); } public void _listLayerIds() { boolean res = true; try { oObj.listLayerIds( "org.openoffice.Office.TypeDetection", "illegal"); log.println("Exception expected -- FAILED"); res = false; } catch (com.sun.star.configuration.backend.BackendAccessException e) { log.println("expected Exception -- OK"); } catch (com.sun.star.lang.IllegalArgumentException e) { log.println("unexpected Exception -- FAILED"); res = false; } try { String ent = util.utils.getOfficeURL( tParam.getMSF()) + "/../share/registry"; String[] LayerIDs = oObj.listLayerIds("org.openoffice.Office.Jobs", ent); res &= LayerIDs[0].endsWith("Jobs.xcu"); aLayerID = LayerIDs[0]; if (!LayerIDs[0].endsWith("Jobs.xcu")) { log.println("\tExpected the id to end with Jobs.xcu"); log.println("\tBut got " + LayerIDs[0]); log.println("\t=> FAILED"); } } catch (com.sun.star.configuration.backend.BackendAccessException e) { log.println("unexpected Exception -- FAILED"); res = false; } catch (com.sun.star.lang.IllegalArgumentException e) { log.println("unexpected Exception -- FAILED"); res = false; } tRes.tested("listLayerIds()", res); } protected boolean checkLayer(XLayer aLayer) { boolean res = false; log.println("Checking for Exception in case of null argument"); try { aLayer.readData(null); } catch (com.sun.star.lang.NullPointerException e) { log.println("Expected Exception -- OK"); res = true; } catch (com.sun.star.lang.WrappedTargetException e) { log.println("Unexpected Exception (" + e + ") -- FAILED"); } catch (com.sun.star.configuration.backend.MalformedDataException e) { log.println("Unexpected Exception (" + e + ") -- FAILED"); } log.println("checking read data with own XLayerHandler implementation"); try { XLayerHandlerImpl xLayerHandler = new XLayerHandlerImpl(); aLayer.readData(xLayerHandler); String implCalled = xLayerHandler.getCalls(); log.println(implCalled); int sl = implCalled.indexOf("startLayer"); if (sl < 0) { log.println("startLayer wasn't called -- FAILED"); res &= false; } else { log.println("startLayer was called -- OK"); res &= true; } int el = implCalled.indexOf("endLayer"); if (el < 0) { log.println("endLayer wasn't called -- FAILED"); res &= false; } else { log.println("endLayer was called -- OK"); res &= true; } } catch (com.sun.star.lang.NullPointerException e) { log.println("Unexpected Exception (" + e + ") -- FAILED"); res &= false; } catch (com.sun.star.lang.WrappedTargetException e) { log.println("Unexpected Exception (" + e + ") -- FAILED"); res &= false; } catch (com.sun.star.configuration.backend.MalformedDataException e) { log.println("Unexpected Exception (" + e + ") -- FAILED"); res &= false; } return res; } }
gpl-3.0
sosilent/euca
clc/modules/msgs/src/main/java/com/eucalyptus/auth/login/WsSecCredentials.java
281
package com.eucalyptus.auth.login; import org.apache.axiom.soap.SOAPEnvelope; public class WsSecCredentials extends WrappedCredentials<SOAPEnvelope> { public WsSecCredentials( String correlationId, SOAPEnvelope loginData ) { super( correlationId, loginData ); } }
gpl-3.0
seblund/Dissolvable
build/tmp/recompileMc/sources/net/minecraft/client/renderer/entity/RenderBat.java
1667
package net.minecraft.client.renderer.entity; import net.minecraft.client.model.ModelBat; import net.minecraft.client.renderer.GlStateManager; import net.minecraft.entity.passive.EntityBat; import net.minecraft.util.ResourceLocation; import net.minecraft.util.math.MathHelper; import net.minecraftforge.fml.relauncher.Side; import net.minecraftforge.fml.relauncher.SideOnly; @SideOnly(Side.CLIENT) public class RenderBat extends RenderLiving<EntityBat> { private static final ResourceLocation batTextures = new ResourceLocation("textures/entity/bat.png"); public RenderBat(RenderManager renderManagerIn) { super(renderManagerIn, new ModelBat(), 0.25F); } /** * Returns the location of an entity's texture. Doesn't seem to be called unless you call Render.bindEntityTexture. */ protected ResourceLocation getEntityTexture(EntityBat entity) { return batTextures; } /** * Allows the render to do any OpenGL state modifications necessary before the model is rendered. Args: * entityLiving, partialTickTime */ protected void preRenderCallback(EntityBat entitylivingbaseIn, float partialTickTime) { GlStateManager.scale(0.35F, 0.35F, 0.35F); } protected void rotateCorpse(EntityBat bat, float p_77043_2_, float p_77043_3_, float partialTicks) { if (!bat.getIsBatHanging()) { GlStateManager.translate(0.0F, MathHelper.cos(p_77043_2_ * 0.3F) * 0.1F, 0.0F); } else { GlStateManager.translate(0.0F, -0.1F, 0.0F); } super.rotateCorpse(bat, p_77043_2_, p_77043_3_, partialTicks); } }
gpl-3.0
srnsw/xena
plugins/project/ext/src/poi-3.2-FINAL/src/java/org/apache/poi/poifs/filesystem/POIFSFileSystem.java
20400
/* ==================================================================== Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==================================================================== */ package org.apache.poi.poifs.filesystem; import java.io.ByteArrayInputStream; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.PushbackInputStream; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; import org.apache.poi.poifs.common.POIFSConstants; import org.apache.poi.poifs.dev.POIFSViewable; import org.apache.poi.poifs.property.DirectoryProperty; import org.apache.poi.poifs.property.Property; import org.apache.poi.poifs.property.PropertyTable; import org.apache.poi.poifs.storage.BATBlock; import org.apache.poi.poifs.storage.BlockAllocationTableReader; import org.apache.poi.poifs.storage.BlockAllocationTableWriter; import org.apache.poi.poifs.storage.BlockList; import org.apache.poi.poifs.storage.BlockWritable; import org.apache.poi.poifs.storage.HeaderBlockConstants; import org.apache.poi.poifs.storage.HeaderBlockReader; import org.apache.poi.poifs.storage.HeaderBlockWriter; import org.apache.poi.poifs.storage.RawDataBlockList; import org.apache.poi.poifs.storage.SmallBlockTableReader; import org.apache.poi.poifs.storage.SmallBlockTableWriter; import org.apache.poi.util.IOUtils; import org.apache.poi.util.LongField; import org.apache.poi.util.POILogFactory; import org.apache.poi.util.POILogger; /** * This is the main class of the POIFS system; it manages the entire * life cycle of the filesystem. * * @author Marc Johnson (mjohnson at apache dot org) */ public class POIFSFileSystem implements POIFSViewable { private static final POILogger _logger = POILogFactory.getLogger(POIFSFileSystem.class); private static final class CloseIgnoringInputStream extends InputStream { private final InputStream _is; public CloseIgnoringInputStream(InputStream is) { _is = is; } public int read() throws IOException { return _is.read(); } public int read(byte[] b, int off, int len) throws IOException { return _is.read(b, off, len); } public void close() { // do nothing } } /** * Convenience method for clients that want to avoid the auto-close behaviour of the constructor. */ public static InputStream createNonClosingInputStream(InputStream is) { return new CloseIgnoringInputStream(is); } private PropertyTable _property_table; private List _documents; private DirectoryNode _root; /** * What big block size the file uses. Most files * use 512 bytes, but a few use 4096 */ private int bigBlockSize = POIFSConstants.BIG_BLOCK_SIZE; /** * Constructor, intended for writing */ public POIFSFileSystem() { _property_table = new PropertyTable(); _documents = new ArrayList(); _root = null; } /** * Create a POIFSFileSystem from an <tt>InputStream</tt>. Normally the stream is read until * EOF. The stream is always closed.<p/> * * Some streams are usable after reaching EOF (typically those that return <code>true</code> * for <tt>markSupported()</tt>). In the unlikely case that the caller has such a stream * <i>and</i> needs to use it after this constructor completes, a work around is to wrap the * stream in order to trap the <tt>close()</tt> call. A convenience method ( * <tt>createNonClosingInputStream()</tt>) has been provided for this purpose: * <pre> * InputStream wrappedStream = POIFSFileSystem.createNonClosingInputStream(is); * HSSFWorkbook wb = new HSSFWorkbook(wrappedStream); * is.reset(); * doSomethingElse(is); * </pre> * Note also the special case of <tt>ByteArrayInputStream</tt> for which the <tt>close()</tt> * method does nothing. * <pre> * ByteArrayInputStream bais = ... * HSSFWorkbook wb = new HSSFWorkbook(bais); // calls bais.close() ! * bais.reset(); // no problem * doSomethingElse(bais); * </pre> * * @param stream the InputStream from which to read the data * * @exception IOException on errors reading, or on invalid data */ public POIFSFileSystem(InputStream stream) throws IOException { this(); boolean success = false; HeaderBlockReader header_block_reader; RawDataBlockList data_blocks; try { // read the header block from the stream header_block_reader = new HeaderBlockReader(stream); bigBlockSize = header_block_reader.getBigBlockSize(); // read the rest of the stream into blocks data_blocks = new RawDataBlockList(stream, bigBlockSize); success = true; } finally { closeInputStream(stream, success); } // set up the block allocation table (necessary for the // data_blocks to be manageable new BlockAllocationTableReader(header_block_reader.getBATCount(), header_block_reader.getBATArray(), header_block_reader.getXBATCount(), header_block_reader.getXBATIndex(), data_blocks); // get property table from the document PropertyTable properties = new PropertyTable(header_block_reader.getPropertyStart(), data_blocks); // init documents processProperties(SmallBlockTableReader .getSmallDocumentBlocks(data_blocks, properties .getRoot(), header_block_reader .getSBATStart()), data_blocks, properties.getRoot() .getChildren(), null); } /** * @param stream the stream to be closed * @param success <code>false</code> if an exception is currently being thrown in the calling method */ private void closeInputStream(InputStream stream, boolean success) { if(stream.markSupported() && !(stream instanceof ByteArrayInputStream)) { String msg = "POIFS is closing the supplied input stream of type (" + stream.getClass().getName() + ") which supports mark/reset. " + "This will be a problem for the caller if the stream will still be used. " + "If that is the case the caller should wrap the input stream to avoid this close logic. " + "This warning is only temporary and will not be present in future versions of POI."; _logger.log(POILogger.WARN, msg); } try { stream.close(); } catch (IOException e) { if(success) { throw new RuntimeException(e); } // else not success? Try block did not complete normally // just print stack trace and leave original ex to be thrown e.printStackTrace(); } } /** * Checks that the supplied InputStream (which MUST * support mark and reset, or be a PushbackInputStream) * has a POIFS (OLE2) header at the start of it. * If your InputStream does not support mark / reset, * then wrap it in a PushBackInputStream, then be * sure to always use that, and not the original! * @param inp An InputStream which supports either mark/reset, or is a PushbackInputStream */ public static boolean hasPOIFSHeader(InputStream inp) throws IOException { // We want to peek at the first 8 bytes inp.mark(8); byte[] header = new byte[8]; IOUtils.readFully(inp, header); LongField signature = new LongField(HeaderBlockConstants._signature_offset, header); // Wind back those 8 bytes if(inp instanceof PushbackInputStream) { PushbackInputStream pin = (PushbackInputStream)inp; pin.unread(header); } else { inp.reset(); } // Did it match the signature? return (signature.get() == HeaderBlockConstants._signature); } /** * Create a new document to be added to the root directory * * @param stream the InputStream from which the document's data * will be obtained * @param name the name of the new POIFSDocument * * @return the new DocumentEntry * * @exception IOException on error creating the new POIFSDocument */ public DocumentEntry createDocument(final InputStream stream, final String name) throws IOException { return getRoot().createDocument(name, stream); } /** * create a new DocumentEntry in the root entry; the data will be * provided later * * @param name the name of the new DocumentEntry * @param size the size of the new DocumentEntry * @param writer the writer of the new DocumentEntry * * @return the new DocumentEntry * * @exception IOException */ public DocumentEntry createDocument(final String name, final int size, final POIFSWriterListener writer) throws IOException { return getRoot().createDocument(name, size, writer); } /** * create a new DirectoryEntry in the root directory * * @param name the name of the new DirectoryEntry * * @return the new DirectoryEntry * * @exception IOException on name duplication */ public DirectoryEntry createDirectory(final String name) throws IOException { return getRoot().createDirectory(name); } /** * Write the filesystem out * * @param stream the OutputStream to which the filesystem will be * written * * @exception IOException thrown on errors writing to the stream */ public void writeFilesystem(final OutputStream stream) throws IOException { // get the property table ready _property_table.preWrite(); // create the small block store, and the SBAT SmallBlockTableWriter sbtw = new SmallBlockTableWriter(_documents, _property_table.getRoot()); // create the block allocation table BlockAllocationTableWriter bat = new BlockAllocationTableWriter(); // create a list of BATManaged objects: the documents plus the // property table and the small block table List bm_objects = new ArrayList(); bm_objects.addAll(_documents); bm_objects.add(_property_table); bm_objects.add(sbtw); bm_objects.add(sbtw.getSBAT()); // walk the list, allocating space for each and assigning each // a starting block number Iterator iter = bm_objects.iterator(); while (iter.hasNext()) { BATManaged bmo = ( BATManaged ) iter.next(); int block_count = bmo.countBlocks(); if (block_count != 0) { bmo.setStartBlock(bat.allocateSpace(block_count)); } else { // Either the BATManaged object is empty or its data // is composed of SmallBlocks; in either case, // allocating space in the BAT is inappropriate } } // allocate space for the block allocation table and take its // starting block int batStartBlock = bat.createBlocks(); // get the extended block allocation table blocks HeaderBlockWriter header_block_writer = new HeaderBlockWriter(); BATBlock[] xbat_blocks = header_block_writer.setBATBlocks(bat.countBlocks(), batStartBlock); // set the property table start block header_block_writer.setPropertyStart(_property_table.getStartBlock()); // set the small block allocation table start block header_block_writer.setSBATStart(sbtw.getSBAT().getStartBlock()); // set the small block allocation table block count header_block_writer.setSBATBlockCount(sbtw.getSBATBlockCount()); // the header is now properly initialized. Make a list of // writers (the header block, followed by the documents, the // property table, the small block store, the small block // allocation table, the block allocation table, and the // extended block allocation table blocks) List writers = new ArrayList(); writers.add(header_block_writer); writers.addAll(_documents); writers.add(_property_table); writers.add(sbtw); writers.add(sbtw.getSBAT()); writers.add(bat); for (int j = 0; j < xbat_blocks.length; j++) { writers.add(xbat_blocks[ j ]); } // now, write everything out iter = writers.iterator(); while (iter.hasNext()) { BlockWritable writer = ( BlockWritable ) iter.next(); writer.writeBlocks(stream); } } /** * read in a file and write it back out again * * @param args names of the files; arg[ 0 ] is the input file, * arg[ 1 ] is the output file * * @exception IOException */ public static void main(String args[]) throws IOException { if (args.length != 2) { System.err.println( "two arguments required: input filename and output filename"); System.exit(1); } FileInputStream istream = new FileInputStream(args[ 0 ]); FileOutputStream ostream = new FileOutputStream(args[ 1 ]); new POIFSFileSystem(istream).writeFilesystem(ostream); istream.close(); ostream.close(); } /** * get the root entry * * @return the root entry */ public DirectoryNode getRoot() { if (_root == null) { _root = new DirectoryNode(_property_table.getRoot(), this, null); } return _root; } /** * open a document in the root entry's list of entries * * @param documentName the name of the document to be opened * * @return a newly opened DocumentInputStream * * @exception IOException if the document does not exist or the * name is that of a DirectoryEntry */ public DocumentInputStream createDocumentInputStream( final String documentName) throws IOException { return getRoot().createDocumentInputStream(documentName); } /** * add a new POIFSDocument * * @param document the POIFSDocument being added */ void addDocument(final POIFSDocument document) { _documents.add(document); _property_table.addProperty(document.getDocumentProperty()); } /** * add a new DirectoryProperty * * @param directory the DirectoryProperty being added */ void addDirectory(final DirectoryProperty directory) { _property_table.addProperty(directory); } /** * remove an entry * * @param entry to be removed */ void remove(EntryNode entry) { _property_table.removeProperty(entry.getProperty()); if (entry.isDocumentEntry()) { _documents.remove((( DocumentNode ) entry).getDocument()); } } private void processProperties(final BlockList small_blocks, final BlockList big_blocks, final Iterator properties, final DirectoryNode dir) throws IOException { while (properties.hasNext()) { Property property = ( Property ) properties.next(); String name = property.getName(); DirectoryNode parent = (dir == null) ? (( DirectoryNode ) getRoot()) : dir; if (property.isDirectory()) { DirectoryNode new_dir = ( DirectoryNode ) parent.createDirectory(name); new_dir.setStorageClsid( property.getStorageClsid() ); processProperties( small_blocks, big_blocks, (( DirectoryProperty ) property).getChildren(), new_dir); } else { int startBlock = property.getStartBlock(); int size = property.getSize(); POIFSDocument document = null; if (property.shouldUseSmallBlocks()) { document = new POIFSDocument(name, small_blocks .fetchBlocks(startBlock), size); } else { document = new POIFSDocument(name, big_blocks.fetchBlocks(startBlock), size); } parent.createDocument(document); } } } /* ********** START begin implementation of POIFSViewable ********** */ /** * Get an array of objects, some of which may implement * POIFSViewable * * @return an array of Object; may not be null, but may be empty */ public Object [] getViewableArray() { if (preferArray()) { return (( POIFSViewable ) getRoot()).getViewableArray(); } else { return new Object[ 0 ]; } } /** * Get an Iterator of objects, some of which may implement * POIFSViewable * * @return an Iterator; may not be null, but may have an empty * back end store */ public Iterator getViewableIterator() { if (!preferArray()) { return (( POIFSViewable ) getRoot()).getViewableIterator(); } else { return Collections.EMPTY_LIST.iterator(); } } /** * Give viewers a hint as to whether to call getViewableArray or * getViewableIterator * * @return true if a viewer should call getViewableArray, false if * a viewer should call getViewableIterator */ public boolean preferArray() { return (( POIFSViewable ) getRoot()).preferArray(); } /** * Provides a short description of the object, to be used when a * POIFSViewable object has not provided its contents. * * @return short description */ public String getShortDescription() { return "POIFS FileSystem"; } /** * @return The Big Block size, normally 512 bytes, sometimes 4096 bytes */ public int getBigBlockSize() { return bigBlockSize; } /* ********** END begin implementation of POIFSViewable ********** */ } // end public class POIFSFileSystem
gpl-3.0
wormzjl/PneumaticCraft
src/pneumaticCraft/common/thirdparty/nei/NEISpecialCraftingManager.java
1419
package pneumaticCraft.common.thirdparty.nei; import java.util.List; import net.minecraft.client.Minecraft; import net.minecraft.client.gui.Gui; import net.minecraft.client.resources.I18n; import net.minecraft.util.ResourceLocation; import org.lwjgl.opengl.GL11; import pneumaticCraft.common.util.PneumaticCraftUtils; import pneumaticCraft.lib.Textures; import cpw.mods.fml.client.FMLClientHandler; public abstract class NEISpecialCraftingManager extends PneumaticCraftPlugins{ private ResourceLocation texture; private List<String> text; protected void setText(String localizationKey){ text = PneumaticCraftUtils.convertStringIntoList(I18n.format(localizationKey), 30); } @Override public String getGuiTexture(){ return Textures.GUI_NEI_MISC_RECIPES; } @Override public void drawBackground(int recipe){ if(text != null) { for(int i = 0; i < text.size(); i++) { Minecraft.getMinecraft().fontRenderer.drawString(text.get(i), 5, 20 + i * 10, 0xFF000000); } } if(texture == null) texture = new ResourceLocation(getGuiTexture()); FMLClientHandler.instance().getClient().getTextureManager().bindTexture(texture); GL11.glColor4f(1, 1, 1, 1); Gui.func_146110_a(40, 79, 0, 0, 82, 18, 256, 256); drawProgressBar(63, 80, 82, 0, 38, 18, cycleticks % 48 / 48F, 0); } }
gpl-3.0
magneticflux-/halfnes-headless
src/main/java/com/grapeshot/halfnes/mappers/VRC6Mapper.java
6699
/* * HalfNES by Andrew Hoffman * Licensed under the GNU GPL Version 3. See LICENSE file */ package com.grapeshot.halfnes.mappers; import com.grapeshot.halfnes.*; import com.grapeshot.halfnes.audio.*; public class VRC6Mapper extends Mapper { int[][] registerselectbits = {{0, 1}, {1, 0}}; int[] registers; int prgbank0, prgbank1 = 0; int[] chrbank = {0, 0, 0, 0, 0, 0, 0, 0}; boolean irqmode, irqenable, irqack, firedinterrupt = false; int irqreload, irqcounter = 22; VRC6SoundChip sndchip; boolean hasInitSound = false; public VRC6Mapper(int mappernum) { super(); sndchip = new VRC6SoundChip(); switch (mappernum) { //vrc6 has 2 different mapper numbers, for 2 different ways to assign the registers case 24: registers = registerselectbits[0]; break; case 26: default: registers = registerselectbits[1]; break; } } @Override public void loadrom() throws BadMapperException { super.loadrom(); // needs to be in every mapper. Fill with initial cfg for (int i = 1; i <= 32; ++i) { //map last banks in to start off prg_map[32 - i] = prgsize - (1024 * i); } for (int i = 0; i < 8; ++i) { chr_map[i] = (1024 * i) & (chrsize - 1); } } @Override public final void cartWrite(final int addr, final int data) { if (addr < 0x8000 || addr > 0xffff) { super.cartWrite(addr, data); return; } final boolean bit0 = ((addr & (1 << registers[0])) != 0); final boolean bit1 = ((addr & (1 << registers[1])) != 0); switch (addr >> 12) { case 0x8: //8000-8003: prg bank 0 select prgbank0 = data; setbanks(); break; case 0x9: case 0xa: //sound registers here sndchip.write((addr & 0xf000) + (bit1 ? 2 : 0) + (bit0 ? 1 : 0), data); break; case 0xc: //c000-c003: prg bank 1 select prgbank1 = data; setbanks(); break; case 0xb: if (bit0 && bit1) { //mirroring select switch ((data >> 2) & 3) { case 0: setmirroring(Mapper.MirrorType.V_MIRROR); break; case 1: setmirroring(Mapper.MirrorType.H_MIRROR); break; case 2: setmirroring(Mapper.MirrorType.SS_MIRROR0); break; case 3: setmirroring(Mapper.MirrorType.SS_MIRROR1); break; } } else { //expansion sound register here as well sndchip.write((addr & 0xf000) + (bit1 ? 2 : 0) + (bit0 ? 1 : 0), data); } break; case 0xd: //character bank selects chrbank[(bit1 ? 2 : 0) + (bit0 ? 1 : 0)] = data; setbanks(); break; case 0xe: chrbank[(bit1 ? 2 : 0) + (bit0 ? 1 : 0) + 4] = data; setbanks(); break; case 0xf: //irq control if (!bit1) { if (!bit0) { irqreload = data; } else { irqack = ((data & (utils.BIT0)) != 0); irqenable = ((data & (utils.BIT1)) != 0); irqmode = ((data & (utils.BIT2)) != 0); if (irqenable) { irqcounter = irqreload; prescaler = 341; } if (firedinterrupt) { --cpu.interrupt; } firedinterrupt = false; } } else { if (!bit0) { irqenable = irqack; if (firedinterrupt) { --cpu.interrupt; } firedinterrupt = false; } } } } private void setbanks() { //map prg banks //last 8k fixed to end of rom for (int i = 1; i <= 8; ++i) { prg_map[32 - i] = prgsize - (1024 * i); } //first bank set to prg0 register for (int i = 0; i < 16; ++i) { prg_map[i] = (1024 * (i + 16 * prgbank0)) % prgsize; } //second bank set to prg1 register for (int i = 0; i < 8; ++i) { prg_map[i + 16] = (1024 * (i + 8 * prgbank1)) % prgsize; } //map chr banks for (int i = 0; i < 8; ++i) { setppubank(1, i, chrbank[i]); } } private void setppubank(final int banksize, final int bankpos, final int banknum) { // System.err.println(banksize + ", " + bankpos + ", "+ banknum); for (int i = 0; i < banksize; ++i) { chr_map[i + bankpos] = (1024 * ((banknum) + i)) % chrsize; } // utils.printarray(chr_map); } int prescaler = 341; @Override public void cpucycle(int cycles) { if (irqenable) { if (irqmode) { scanlinecount(); //clock regardless of prescaler state } else { prescaler -= 3; if (prescaler <= 0) { prescaler += 341; scanlinecount(); } } } } public void scanlinecount() { if (!hasInitSound) { //tiny hack, because the APU is not initialized until AFTER this happens //TODO: this really should not need to be here. cpuram.apu.addExpnSound(sndchip); hasInitSound = true; } if (irqenable) { if (irqcounter == 255) { irqcounter = irqreload; //System.err.println("Interrupt @ Scanline " + scanline + " reload " + irqreload); if (!firedinterrupt) { ++cpu.interrupt; } firedinterrupt = true; } else { ++irqcounter; } } } }
gpl-3.0
srnsw/xena
plugins/project/ext/src/poi-3.2-FINAL/src/java/org/apache/poi/hssf/record/HeaderRecord.java
6319
/* ==================================================================== Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==================================================================== */ package org.apache.poi.hssf.record; import org.apache.poi.util.LittleEndian; import org.apache.poi.util.StringUtil; /** * Title: Header Record<P> * Description: Specifies a header for a sheet<P> * REFERENCE: PG 321 Microsoft Excel 97 Developer's Kit (ISBN: 1-57231-498-2)<P> * @author Andrew C. Oliver (acoliver at apache dot org) * @author Shawn Laubach (slaubach at apache dot org) Modified 3/14/02 * @author Jason Height (jheight at chariot dot net dot au) * @version 2.0-pre */ public class HeaderRecord extends Record { public final static short sid = 0x14; private byte field_1_header_len; private byte field_2_reserved; private byte field_3_unicode_flag; private String field_4_header; public HeaderRecord() { } public HeaderRecord(RecordInputStream in) { if (in.remaining() > 0) { field_1_header_len = in.readByte(); /** These two fields are a bit odd. They are not documented*/ field_2_reserved = in.readByte(); field_3_unicode_flag = in.readByte(); // unicode if(isMultibyte()) { field_4_header = in.readUnicodeLEString(LittleEndian.ubyteToInt( field_1_header_len)); } else { field_4_header = in.readCompressedUnicode(LittleEndian.ubyteToInt( field_1_header_len)); } } } /** * see the unicode flag * * @return boolean flag * true:footer string has at least one multibyte character */ public boolean isMultibyte() { return ((field_3_unicode_flag & 0xFF) == 1); } /** * set the length of the header string * * @param len length of the header string * @see #setHeader(String) */ public void setHeaderLength(byte len) { field_1_header_len = len; } /** * set the header string * * @param header string to display * @see #setHeaderLength(byte) */ public void setHeader(String header) { field_4_header = header; field_3_unicode_flag = (byte) (StringUtil.hasMultibyte(field_4_header) ? 1 : 0); // Check it'll fit into the space in the record if(field_4_header == null) return; if(field_3_unicode_flag == 1) { if(field_4_header.length() > 127) { throw new IllegalArgumentException("Header string too long (limit is 127 for unicode strings)"); } } else { if(field_4_header.length() > 255) { throw new IllegalArgumentException("Header string too long (limit is 255 for non-unicode strings)"); } } } /** * get the length of the header string * * @return length of the header string * @see #getHeader() */ public short getHeaderLength() { return (short)(0xFF & field_1_header_len); // [Shawn] Fixed needing unsigned byte } /** * get the header string * * @return header string to display * @see #getHeaderLength() */ public String getHeader() { return field_4_header; } public String toString() { StringBuffer buffer = new StringBuffer(); buffer.append("[HEADER]\n"); buffer.append(" .length = ").append(getHeaderLength()) .append("\n"); buffer.append(" .header = ").append(getHeader()) .append("\n"); buffer.append("[/HEADER]\n"); return buffer.toString(); } public int serialize(int offset, byte [] data) { int len = 4; if (getHeaderLength() != 0) { len+=3; // [Shawn] Fixed for two null bytes in the length } short bytelen = (short)(isMultibyte() ? getHeaderLength()*2 : getHeaderLength() ); LittleEndian.putShort(data, 0 + offset, sid); LittleEndian.putShort(data, 2 + offset, ( short ) ((len - 4) + bytelen)); if (getHeaderLength() > 0) { data[ 4 + offset ] = (byte)getHeaderLength(); data[ 6 + offset ] = field_3_unicode_flag; if(isMultibyte()) { StringUtil.putUnicodeLE(getHeader(), data, 7 + offset); } else { StringUtil.putCompressedUnicode(getHeader(), data, 7 + offset); // [Shawn] Place the string in the correct offset } } return getRecordSize(); } public int getRecordSize() { int retval = 4; if (getHeaderLength() != 0) { retval+=3; // [Shawn] Fixed for two null bytes in the length } return (isMultibyte() ? (retval + getHeaderLength()*2) : (retval + getHeaderLength())); } public short getSid() { return sid; } public Object clone() { HeaderRecord rec = new HeaderRecord(); rec.field_1_header_len = field_1_header_len; rec.field_2_reserved = field_2_reserved; rec.field_3_unicode_flag = field_3_unicode_flag; rec.field_4_header = field_4_header; return rec; } }
gpl-3.0
AdUMinecraft/EventCore
src/test/java/org/bukkit/DyeColorsTest.java
1402
package org.bukkit; import static org.junit.Assert.*; import static org.hamcrest.Matchers.*; import java.util.ArrayList; import java.util.List; import net.minecraft.server.EntitySheep; import net.minecraft.server.ItemDye; import org.bukkit.support.AbstractTestingBase; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameter; import org.junit.runners.Parameterized.Parameters; @RunWith(Parameterized.class) public class DyeColorsTest extends AbstractTestingBase { @Parameters(name= "{index}: {0}") public static List<Object[]> data() { List<Object[]> list = new ArrayList<Object[]>(); for (DyeColor dye : DyeColor.values()) { list.add(new Object[] {dye}); } return list; } @Parameter public DyeColor dye; @Test public void checkColor() { Color color = dye.getColor(); float[] nmsColorArray = EntitySheep.d[dye.getWoolData()]; Color nmsColor = Color.fromRGB((int) (nmsColorArray[0] * 255), (int) (nmsColorArray[1] * 255), (int) (nmsColorArray[2] * 255)); assertThat(color, is(nmsColor)); } @Test public void checkFireworkColor() { Color color = dye.getFireworkColor(); int nmsColor = ItemDye.c[dye.getDyeData()]; assertThat(color, is(Color.fromRGB(nmsColor))); } }
gpl-3.0
huicpc0215/tellbo
tellbo/examples/weibo4j/examples/search/SearchSuggestionsCompanies.java
504
package weibo4j.examples.search; import weibo4j.Search; import weibo4j.model.WeiboException; import weibo4j.org.json.JSONArray; public class SearchSuggestionsCompanies { public static void main(String[] args) { String access_token = args[0]; String q = args[1]; Search search = new Search(access_token); try { JSONArray jo = search.searchSuggestionsCompanies(q); System.out.println(jo.toString()); } catch (WeiboException e) { e.printStackTrace(); } } }
gpl-3.0
evanchowsz/algorithm
zuoyun_algorithm/BookCode/src/chapter_3_binarytreeproblem/Problem_06_LongestPathSum.java
2339
package chapter_3_binarytreeproblem; import java.util.HashMap; public class Problem_06_LongestPathSum { public static class Node { public int value; public Node left; public Node right; public Node(int data) { this.value = data; } } public static int getMaxLength(Node head, int sum) { HashMap<Integer, Integer> sumMap = new HashMap<Integer, Integer>(); sumMap.put(0, 0); // important return preOrder(head, sum, 0, 1, 0, sumMap); } public static int preOrder(Node head, int sum, int preSum, int level, int maxLen, HashMap<Integer, Integer> sumMap) { if (head == null) { return maxLen; } int curSum = preSum + head.value; if (!sumMap.containsKey(curSum)) { sumMap.put(curSum, level); } if (sumMap.containsKey(curSum - sum)) { maxLen = Math.max(level - sumMap.get(curSum - sum), maxLen); } maxLen = preOrder(head.left, sum, curSum, level + 1, maxLen, sumMap); maxLen = preOrder(head.right, sum, curSum, level + 1, maxLen, sumMap); if (level == sumMap.get(curSum)) { sumMap.remove(curSum); } return maxLen; } // for test -- print tree public static void printTree(Node head) { System.out.println("Binary Tree:"); printInOrder(head, 0, "H", 17); System.out.println(); } public static void printInOrder(Node head, int height, String to, int len) { if (head == null) { return; } printInOrder(head.right, height + 1, "v", len); String val = to + head.value + to; int lenM = val.length(); int lenL = (len - lenM) / 2; int lenR = len - lenM - lenL; val = getSpace(lenL) + val + getSpace(lenR); System.out.println(getSpace(height * len) + val); printInOrder(head.left, height + 1, "^", len); } public static String getSpace(int num) { String space = " "; StringBuffer buf = new StringBuffer(""); for (int i = 0; i < num; i++) { buf.append(space); } return buf.toString(); } public static void main(String[] args) { Node head = new Node(-3); head.left = new Node(3); head.right = new Node(-9); head.left.left = new Node(1); head.left.right = new Node(0); head.left.right.left = new Node(1); head.left.right.right = new Node(6); head.right.left = new Node(2); head.right.right = new Node(1); printTree(head); System.out.println(getMaxLength(head, 6)); System.out.println(getMaxLength(head, -9)); } }
gpl-3.0
mark47/OESandbox
app/src/us/mn/state/health/lims/analyte/action/AnalyteAction.java
4533
/** * The contents of this file are subject to the Mozilla Public License * Version 1.1 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the * License for the specific language governing rights and limitations under * the License. * * The Original Code is OpenELIS code. * * Copyright (C) The Minnesota Department of Health. All Rights Reserved. */ package us.mn.state.health.lims.analyte.action; import java.util.List; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.commons.beanutils.PropertyUtils; import org.apache.struts.action.ActionForm; import org.apache.struts.action.ActionForward; import org.apache.struts.action.ActionMapping; import org.apache.struts.action.DynaActionForm; import us.mn.state.health.lims.analyte.dao.AnalyteDAO; import us.mn.state.health.lims.analyte.daoimpl.AnalyteDAOImpl; import us.mn.state.health.lims.analyte.valueholder.Analyte; import us.mn.state.health.lims.common.action.BaseAction; /** * @author diane benz * * To change this generated comment edit the template variable "typecomment": * Window>Preferences>Java>Templates. To enable and disable the creation of type * comments go to Window>Preferences>Java>Code Generation. */ public class AnalyteAction extends BaseAction { private boolean isNew = false; protected ActionForward performAction(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) throws Exception { // The first job is to determine if we are coming to this action with an // ID parameter in the request. If there is no parameter, we are // creating a new Analyte. // If there is a parameter present, we should bring up an existing // Analyte to edit. String id = request.getParameter(ID); String forward = FWD_SUCCESS; request.setAttribute(ALLOW_EDITS_KEY, "true"); request.setAttribute(PREVIOUS_DISABLED, "true"); request.setAttribute(NEXT_DISABLED, "true"); DynaActionForm dynaForm = (DynaActionForm) form; // initialize the form dynaForm.initialize(mapping); Analyte analyte = new Analyte(); if ((id != null) && (!"0".equals(id))) { // this is an existing // analyte analyte.setId(id); AnalyteDAO analyteDAO = new AnalyteDAOImpl(); analyteDAO.getData(analyte); // initialize selectedAnalyteId if (analyte.getAnalyte() != null) { analyte.setSelectedAnalyteId(analyte.getAnalyte().getId()); } isNew = false; // this is to set correct page title // do we need to enable next or previous? //bugzilla 1427 pass in name not id List analytes = analyteDAO.getNextAnalyteRecord(analyte.getAnalyteName()); if (analytes.size() > 0) { // enable next button request.setAttribute(NEXT_DISABLED, "false"); } //bugzilla 1427 pass in name not id analytes = analyteDAO.getPreviousAnalyteRecord(analyte.getAnalyteName()); if (analytes.size() > 0) { // enable next button request.setAttribute(PREVIOUS_DISABLED, "false"); } // end of logic to enable next or previous button } else { // this is a new analyte // default isActive to 'Y' analyte.setIsActive(YES); isNew = true; // this is to set correct page title } if (analyte.getId() != null && !analyte.getId().equals("0")) { request.setAttribute(ID, analyte.getId()); } // populate form from valueholder PropertyUtils.copyProperties(form, analyte); AnalyteDAO analDAO = new AnalyteDAOImpl(); List parentAnalytes = analDAO.getAllAnalytes(); // set parentAnalyteName String parentAnalyteName = null; for (int i = 0; i < parentAnalytes.size(); i++) { Analyte parentAnalyte = (Analyte) parentAnalytes.get(i); if (parentAnalyte.getId().equals(analyte.getSelectedAnalyteId())) { parentAnalyteName = parentAnalyte.getAnalyteName(); } } PropertyUtils.setProperty(form, "parentAnalytes", parentAnalytes); PropertyUtils.setProperty(form, "parentAnalyteName", parentAnalyteName); return mapping.findForward(forward); } protected String getPageTitleKey() { if (isNew) { return "analyte.add.title"; } else { return "analyte.edit.title"; } } protected String getPageSubtitleKey() { if (isNew) { return "analyte.add.title"; } else { return "analyte.edit.title"; } } }
mpl-2.0
aihua/opennms
core/jstl-support/src/main/java/org/apache/taglibs/standard/lang/jstl/PropertySuffix.java
2645
/* * The contents of this file are subject to the terms * of the Common Development and Distribution License * (the "License"). You may not use this file except * in compliance with the License. * * You can obtain a copy of the license at * glassfish/bootstrap/legal/CDDLv1.0.txt or * https://glassfish.dev.java.net/public/CDDLv1.0.html. * See the License for the specific language governing * permissions and limitations under the License. * * When distributing Covered Code, include this CDDL * HEADER in each file and include the License file at * glassfish/bootstrap/legal/CDDLv1.0.txt. If applicable, * add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your * own identifying information: Portions Copyright [yyyy] * [name of copyright owner] * * Copyright 2005 Sun Microsystems, Inc. All rights reserved. * * Portions Copyright Apache Software Foundation. */ package org.apache.taglibs.standard.lang.jstl; import java.util.Map; /** * * <p>Represents an operator that obtains the value of another value's * property. This is a specialization of ArraySuffix - a.b is * equivalent to a["b"] * * @author Nathan Abramson - Art Technology Group * @author Shawn Bayern * @version $Change: 181177 $$DateTime: 2001/06/26 08:45:09 $$Author: kchung $ **/ public class PropertySuffix extends ArraySuffix { //------------------------------------- // Properties //------------------------------------- // property name String mName; public String getName () { return mName; } public void setName (String pName) { mName = pName; } //------------------------------------- /** * * Constructor **/ public PropertySuffix (String pName) { super (null); mName = pName; } //------------------------------------- /** * * Gets the value of the index **/ Object evaluateIndex (Object pContext, VariableResolver pResolver, Map functions, String defaultPrefix, Logger pLogger) throws ELException { return mName; } //------------------------------------- /** * * Returns the operator symbol **/ String getOperatorSymbol () { return "."; } //------------------------------------- // ValueSuffix methods //------------------------------------- /** * * Returns the expression in the expression language syntax **/ public String getExpressionString () { return "." + StringLiteral.toIdentifierToken (mName); } //------------------------------------- }
agpl-3.0
fluks/mupdf-x11-bookmarks
platform/java/src/com/artifex/mupdf/fitz/android/AndroidImage.java
442
package com.artifex.mupdf.fitz.android; import android.graphics.Bitmap; import com.artifex.mupdf.fitz.Context; import com.artifex.mupdf.fitz.Image; public final class AndroidImage extends Image { static { Context.init(); } private native long newAndroidImageFromBitmap(Bitmap bitmap, long mask); public AndroidImage(Bitmap bitmap, AndroidImage mask) { super(0); pointer = newAndroidImageFromBitmap(bitmap, mask.pointer); } }
agpl-3.0
sabarish14/agreementmaker
AgreementMaker-CollaborationServer/app/models/ServerCandidateMapping.java
1404
package models; import java.util.Date; import javax.persistence.Entity; import javax.persistence.EnumType; import javax.persistence.Enumerated; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.Temporal; import javax.persistence.TemporalType; import play.data.validation.Constraints; import play.db.ebean.Model; @Entity public class ServerCandidateMapping extends Model { private static final long serialVersionUID = 184274387042579061L; @Id @GeneratedValue(strategy=GenerationType.IDENTITY) // the id is unique only per type hierarchy public Long id; @Constraints.Required public String sourceURI; @Constraints.Required public String targetURI; @Constraints.Required public String userId; @Temporal(TemporalType.TIMESTAMP) public Date timeSent; @Temporal(TemporalType.TIMESTAMP) public Date timeReceived; public enum FeedbackType { CORRECT, INCORRECT, SKIP, END_EXPERIMENT } @Enumerated(EnumType.STRING) public FeedbackType feedback; public String getTimeInterval() { if( timeReceived == null || timeSent == null ) return ""; return am.Utility.getFormattedTime(timeReceived.getTime() - timeSent.getTime()); } public static Model.Finder<Long,ServerCandidateMapping> find = new Model.Finder<Long,ServerCandidateMapping>(Long.class, ServerCandidateMapping.class); }
agpl-3.0
aihua/opennms
features/flows/rest/impl/src/main/java/org/opennms/netmgt/flows/rest/internal/classification/ErrorResponseUtils.java
2788
/******************************************************************************* * This file is part of OpenNMS(R). * * Copyright (C) 2018-2018 The OpenNMS Group, Inc. * OpenNMS(R) is Copyright (C) 1999-2018 The OpenNMS Group, Inc. * * OpenNMS(R) is a registered trademark of The OpenNMS Group, Inc. * * OpenNMS(R) is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, * or (at your option) any later version. * * OpenNMS(R) is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with OpenNMS(R). If not, see: * http://www.gnu.org/licenses/ * * For more information contact: * OpenNMS(R) Licensing <license@opennms.org> * http://www.opennms.org/ * http://www.opennms.com/ *******************************************************************************/ package org.opennms.netmgt.flows.rest.internal.classification; import java.util.Map; import javax.ws.rs.core.Response; import org.opennms.netmgt.flows.classification.csv.CsvImportResult; import org.opennms.netmgt.flows.classification.error.Error; import org.opennms.netmgt.flows.rest.classification.CsvImportErrorDTO; import org.opennms.netmgt.flows.rest.classification.ErrorDTO; public class ErrorResponseUtils { public static Response createResponse(Error error) { return Response.status(Response.Status.BAD_REQUEST).entity(convert(error)).build(); } public static Response createResponse(CsvImportResult csvImportResult) { return Response.status(Response.Status.BAD_REQUEST).entity(convert(csvImportResult)).build(); } private static CsvImportErrorDTO convert(CsvImportResult importResult) { final CsvImportErrorDTO errorDTO = new CsvImportErrorDTO(); errorDTO.setSuccess(importResult.isSuccess()); if (importResult.getError() != null) { errorDTO.setError(convert(importResult.getError())); } for (Map.Entry<Long, Error> entry : importResult.getErrorMap().entrySet()) { errorDTO.addError(entry.getKey(), convert(entry.getValue())); } return errorDTO; } private static ErrorDTO convert(Error error) { final ErrorDTO errorDTO = new ErrorDTO(); errorDTO.setKey(error.getTemplate().getKey()); errorDTO.setContext(error.getContext()); errorDTO.setMessage(error.getFormattedMessage()); return errorDTO; } }
agpl-3.0
fluidware/Eastwood-Charts
source/org/jfree/chart/renderer/category/LevelRenderer.java
17481
/* =========================================================== * JFreeChart : a free chart library for the Java(tm) platform * =========================================================== * * (C) Copyright 2000-2011, by Object Refinery Limited and Contributors. * * Project Info: http://www.jfree.org/jfreechart/index.html * * This library is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public * License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. * * [Oracle and Java are registered trademarks of Oracle and/or its affiliates. * Other names may be trademarks of their respective owners.] * * ------------------ * LevelRenderer.java * ------------------ * (C) Copyright 2004-2009, by Object Refinery Limited. * * Original Author: David Gilbert (for Object Refinery Limited); * Contributor(s): Peter Kolb (patch 2511330); * * Changes * ------- * 09-Jan-2004 : Version 1 (DG); * 05-Nov-2004 : Modified drawItem() signature (DG); * 20-Apr-2005 : Renamed CategoryLabelGenerator * --> CategoryItemLabelGenerator (DG); * ------------- JFREECHART 1.0.x --------------------------------------------- * 23-Jan-2006 : Renamed getMaxItemWidth() --> getMaximumItemWidth() (DG); * 13-May-2008 : Code clean-up (DG); * 26-Jun-2008 : Added crosshair support (DG); * 23-Jan-2009 : Set more appropriate default shape in legend (DG); * 23-Jan-2009 : Added support for seriesVisible flags - see patch * 2511330 (PK) * */ package org.jfree.chart.renderer.category; import java.awt.Color; import java.awt.Graphics2D; import java.awt.Paint; import java.awt.Stroke; import java.awt.geom.Line2D; import java.awt.geom.Rectangle2D; import java.io.Serializable; import org.jfree.chart.HashUtilities; import org.jfree.chart.axis.CategoryAxis; import org.jfree.chart.axis.ValueAxis; import org.jfree.chart.entity.EntityCollection; import org.jfree.chart.event.RendererChangeEvent; import org.jfree.chart.labels.CategoryItemLabelGenerator; import org.jfree.chart.plot.CategoryPlot; import org.jfree.chart.plot.PlotOrientation; import org.jfree.chart.plot.PlotRenderingInfo; import org.jfree.data.category.CategoryDataset; import org.jfree.ui.RectangleEdge; import org.jfree.util.PublicCloneable; /** * A {@link CategoryItemRenderer} that draws individual data items as * horizontal lines, spaced in the same way as bars in a bar chart. The * example shown here is generated by the * <code>OverlaidBarChartDemo2.java</code> program included in the JFreeChart * Demo Collection: * <br><br> * <img src="../../../../../images/LevelRendererSample.png" * alt="LevelRendererSample.png" /> */ public class LevelRenderer extends AbstractCategoryItemRenderer implements Cloneable, PublicCloneable, Serializable { /** For serialization. */ private static final long serialVersionUID = -8204856624355025117L; /** The default item margin percentage. */ public static final double DEFAULT_ITEM_MARGIN = 0.20; /** The margin between items within a category. */ private double itemMargin; /** The maximum item width as a percentage of the available space. */ private double maxItemWidth; /** * Creates a new renderer with default settings. */ public LevelRenderer() { super(); this.itemMargin = DEFAULT_ITEM_MARGIN; this.maxItemWidth = 1.0; // 100 percent, so it will not apply unless // changed setBaseLegendShape(new Rectangle2D.Float(-5.0f, -1.0f, 10.0f, 2.0f)); // set the outline paint to fully transparent, then the legend shape // will just have the same colour as the lines drawn by the renderer setBaseOutlinePaint(new Color(0, 0, 0, 0)); } /** * Returns the item margin. * * @return The margin. * * @see #setItemMargin(double) */ public double getItemMargin() { return this.itemMargin; } /** * Sets the item margin and sends a {@link RendererChangeEvent} to all * registered listeners. The value is expressed as a percentage of the * available width for plotting all the bars, with the resulting amount to * be distributed between all the bars evenly. * * @param percent the new margin. * * @see #getItemMargin() */ public void setItemMargin(double percent) { this.itemMargin = percent; fireChangeEvent(); } /** * Returns the maximum width, as a percentage of the available drawing * space. * * @return The maximum width. * * @see #setMaximumItemWidth(double) */ public double getMaximumItemWidth() { return getMaxItemWidth(); } /** * Sets the maximum item width, which is specified as a percentage of the * available space for all items, and sends a {@link RendererChangeEvent} * to all registered listeners. * * @param percent the percent. * * @see #getMaximumItemWidth() */ public void setMaximumItemWidth(double percent) { setMaxItemWidth(percent); } /** * Initialises the renderer and returns a state object that will be passed * to subsequent calls to the drawItem method. * <p> * This method gets called once at the start of the process of drawing a * chart. * * @param g2 the graphics device. * @param dataArea the area in which the data is to be plotted. * @param plot the plot. * @param rendererIndex the renderer index. * @param info collects chart rendering information for return to caller. * * @return The renderer state. */ public CategoryItemRendererState initialise(Graphics2D g2, Rectangle2D dataArea, CategoryPlot plot, int rendererIndex, PlotRenderingInfo info) { CategoryItemRendererState state = super.initialise(g2, dataArea, plot, rendererIndex, info); calculateItemWidth(plot, dataArea, rendererIndex, state); return state; } /** * Calculates the bar width and stores it in the renderer state. * * @param plot the plot. * @param dataArea the data area. * @param rendererIndex the renderer index. * @param state the renderer state. */ protected void calculateItemWidth(CategoryPlot plot, Rectangle2D dataArea, int rendererIndex, CategoryItemRendererState state) { CategoryAxis domainAxis = getDomainAxis(plot, rendererIndex); CategoryDataset dataset = plot.getDataset(rendererIndex); if (dataset != null) { int columns = dataset.getColumnCount(); int rows = state.getVisibleSeriesCount() >= 0 ? state.getVisibleSeriesCount() : dataset.getRowCount(); double space = 0.0; PlotOrientation orientation = plot.getOrientation(); if (orientation == PlotOrientation.HORIZONTAL) { space = dataArea.getHeight(); } else if (orientation == PlotOrientation.VERTICAL) { space = dataArea.getWidth(); } double maxWidth = space * getMaximumItemWidth(); double categoryMargin = 0.0; double currentItemMargin = 0.0; if (columns > 1) { categoryMargin = domainAxis.getCategoryMargin(); } if (rows > 1) { currentItemMargin = getItemMargin(); } double used = space * (1 - domainAxis.getLowerMargin() - domainAxis.getUpperMargin() - categoryMargin - currentItemMargin); if ((rows * columns) > 0) { state.setBarWidth(Math.min(used / (rows * columns), maxWidth)); } else { state.setBarWidth(Math.min(used, maxWidth)); } } } /** * Calculates the coordinate of the first "side" of a bar. This will be * the minimum x-coordinate for a vertical bar, and the minimum * y-coordinate for a horizontal bar. * * @param plot the plot. * @param orientation the plot orientation. * @param dataArea the data area. * @param domainAxis the domain axis. * @param state the renderer state (has the bar width precalculated). * @param row the row index. * @param column the column index. * * @return The coordinate. */ protected double calculateBarW0(CategoryPlot plot, PlotOrientation orientation, Rectangle2D dataArea, CategoryAxis domainAxis, CategoryItemRendererState state, int row, int column) { // calculate bar width... double space = 0.0; if (orientation == PlotOrientation.HORIZONTAL) { space = dataArea.getHeight(); } else { space = dataArea.getWidth(); } double barW0 = domainAxis.getCategoryStart(column, getColumnCount(), dataArea, plot.getDomainAxisEdge()); int seriesCount = state.getVisibleSeriesCount(); if (seriesCount < 0) { seriesCount = getRowCount(); } int categoryCount = getColumnCount(); if (seriesCount > 1) { double seriesGap = space * getItemMargin() / (categoryCount * (seriesCount - 1)); double seriesW = calculateSeriesWidth(space, domainAxis, categoryCount, seriesCount); barW0 = barW0 + row * (seriesW + seriesGap) + (seriesW / 2.0) - (state.getBarWidth() / 2.0); } else { barW0 = domainAxis.getCategoryMiddle(column, getColumnCount(), dataArea, plot.getDomainAxisEdge()) - state.getBarWidth() / 2.0; } return barW0; } /** * Draws the bar for a single (series, category) data item. * * @param g2 the graphics device. * @param state the renderer state. * @param dataArea the data area. * @param plot the plot. * @param domainAxis the domain axis. * @param rangeAxis the range axis. * @param dataset the dataset. * @param row the row index (zero-based). * @param column the column index (zero-based). * @param pass the pass index. */ public void drawItem(Graphics2D g2, CategoryItemRendererState state, Rectangle2D dataArea, CategoryPlot plot, CategoryAxis domainAxis, ValueAxis rangeAxis, CategoryDataset dataset, int row, int column, int pass) { // nothing is drawn if the row index is not included in the list with // the indices of the visible rows... int visibleRow = state.getVisibleSeriesIndex(row); if (visibleRow < 0) { return; } // nothing is drawn for null values... Number dataValue = dataset.getValue(row, column); if (dataValue == null) { return; } double value = dataValue.doubleValue(); PlotOrientation orientation = plot.getOrientation(); double barW0 = calculateBarW0(plot, orientation, dataArea, domainAxis, state, visibleRow, column); RectangleEdge edge = plot.getRangeAxisEdge(); double barL = rangeAxis.valueToJava2D(value, dataArea, edge); // draw the bar... Line2D line = null; double x = 0.0; double y = 0.0; if (orientation == PlotOrientation.HORIZONTAL) { x = barL; y = barW0 + state.getBarWidth() / 2.0; line = new Line2D.Double(barL, barW0, barL, barW0 + state.getBarWidth()); } else { x = barW0 + state.getBarWidth() / 2.0; y = barL; line = new Line2D.Double(barW0, barL, barW0 + state.getBarWidth(), barL); } Stroke itemStroke = getItemStroke(row, column); Paint itemPaint = getItemPaint(row, column); g2.setStroke(itemStroke); g2.setPaint(itemPaint); g2.draw(line); CategoryItemLabelGenerator generator = getItemLabelGenerator(row, column); if (generator != null && isItemLabelVisible(row, column)) { drawItemLabel(g2, orientation, dataset, row, column, x, y, (value < 0.0)); } // submit the current data point as a crosshair candidate int datasetIndex = plot.indexOf(dataset); updateCrosshairValues(state.getCrosshairState(), dataset.getRowKey(row), dataset.getColumnKey(column), value, datasetIndex, barW0, barL, orientation); // collect entity and tool tip information... EntityCollection entities = state.getEntityCollection(); if (entities != null) { addItemEntity(entities, dataset, row, column, line.getBounds()); } } /** * Calculates the available space for each series. * * @param space the space along the entire axis (in Java2D units). * @param axis the category axis. * @param categories the number of categories. * @param series the number of series. * * @return The width of one series. */ protected double calculateSeriesWidth(double space, CategoryAxis axis, int categories, int series) { double factor = 1.0 - getItemMargin() - axis.getLowerMargin() - axis.getUpperMargin(); if (categories > 1) { factor = factor - axis.getCategoryMargin(); } return (space * factor) / (categories * series); } /** * Returns the Java2D coordinate for the middle of the specified data item. * * @param rowKey the row key. * @param columnKey the column key. * @param dataset the dataset. * @param axis the axis. * @param area the drawing area. * @param edge the edge along which the axis lies. * * @return The Java2D coordinate. * * @since 1.0.11 */ public double getItemMiddle(Comparable rowKey, Comparable columnKey, CategoryDataset dataset, CategoryAxis axis, Rectangle2D area, RectangleEdge edge) { return axis.getCategorySeriesMiddle(columnKey, rowKey, dataset, this.itemMargin, area, edge); } /** * Tests an object for equality with this instance. * * @param obj the object (<code>null</code> permitted). * * @return A boolean. */ public boolean equals(Object obj) { if (obj == this) { return true; } if (!(obj instanceof LevelRenderer)) { return false; } LevelRenderer that = (LevelRenderer) obj; if (this.itemMargin != that.itemMargin) { return false; } if (this.maxItemWidth != that.maxItemWidth) { return false; } return super.equals(obj); } /** * Returns a hash code for this instance. * * @return A hash code. */ public int hashCode() { int hash = super.hashCode(); hash = HashUtilities.hashCode(hash, this.itemMargin); hash = HashUtilities.hashCode(hash, this.maxItemWidth); return hash; } /** * Returns the maximum width, as a percentage of the available drawing * space. * * @return The maximum width. * * @deprecated Use {@link #getMaximumItemWidth()} instead. */ public double getMaxItemWidth() { return this.maxItemWidth; } /** * Sets the maximum item width, which is specified as a percentage of the * available space for all items, and sends a {@link RendererChangeEvent} * to all registered listeners. * * @param percent the percent. * * @deprecated Use {@link #setMaximumItemWidth(double)} instead. */ public void setMaxItemWidth(double percent) { this.maxItemWidth = percent; fireChangeEvent(); } }
lgpl-2.1
bawn92/kurento-java
kurento-jsonrpc/kurento-jsonrpc-server/src/main/java/org/kurento/jsonrpc/internal/server/config/DefaultJsonRpcHandlerRegistry.java
2473
/* * Copyright 2002-2013 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kurento.jsonrpc.internal.server.config; import java.util.ArrayList; import java.util.List; import org.springframework.web.socket.WebSocketHandler; import org.springframework.web.socket.config.annotation.WebSocketHandlerRegistry; import org.kurento.jsonrpc.JsonRpcHandler; import org.kurento.jsonrpc.server.JsonRpcHandlerRegistration; import org.kurento.jsonrpc.server.JsonRpcHandlerRegistry; /** * A {@link WebSocketHandlerRegistry} that maps {@link WebSocketHandler}s to * URLs for use in a Servlet container. * * @author Rossen Stoyanchev * @since 4.0 */ public class DefaultJsonRpcHandlerRegistry implements JsonRpcHandlerRegistry { private final List<DefaultJsonRpcHandlerRegistration> registrations = new ArrayList<>(); @Override public JsonRpcHandlerRegistration addHandler( JsonRpcHandler<?> webSocketHandler, String... paths) { DefaultJsonRpcHandlerRegistration registration = new DefaultJsonRpcHandlerRegistration(); registration.addHandler(webSocketHandler, paths); this.registrations.add(registration); return registration; } @Override public JsonRpcHandlerRegistration addPerSessionHandler( Class<? extends JsonRpcHandler<?>> handlerClass, String... paths) { DefaultJsonRpcHandlerRegistration registration = new DefaultJsonRpcHandlerRegistration(); registration.addPerSessionHandler(handlerClass, paths); this.registrations.add(registration); return registration; } @Override public JsonRpcHandlerRegistration addPerSessionHandler(String beanName, String... paths) { DefaultJsonRpcHandlerRegistration registration = new DefaultJsonRpcHandlerRegistration(); registration.addPerSessionHandler(beanName, paths); this.registrations.add(registration); return registration; } public List<DefaultJsonRpcHandlerRegistration> getRegistrations() { return registrations; } }
lgpl-2.1
jochenvdv/checkstyle
src/it/java/com/google/checkstyle/test/chapter7javadoc/rule713atclauses/JavadocTagContinuationIndentationTest.java
2476
//////////////////////////////////////////////////////////////////////////////// // checkstyle: Checks Java source code for adherence to a set of rules. // Copyright (C) 2001-2017 the original author or authors. // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 2.1 of the License, or (at your option) any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA //////////////////////////////////////////////////////////////////////////////// package com.google.checkstyle.test.chapter7javadoc.rule713atclauses; import org.junit.Test; import com.google.checkstyle.test.base.AbstractModuleTestSupport; import com.puppycrawl.tools.checkstyle.api.Configuration; import com.puppycrawl.tools.checkstyle.checks.javadoc.JavadocTagContinuationIndentationCheck; public class JavadocTagContinuationIndentationTest extends AbstractModuleTestSupport { @Override protected String getPackageLocation() { return "com/google/checkstyle/test/chapter7javadoc/rule713atclauses"; } @Test public void testWithDefaultConfiguration() throws Exception { final String msg = getCheckMessage(JavadocTagContinuationIndentationCheck.class, "tag.continuation.indent", 4); final String[] expected = { "47: " + msg, "109: " + msg, "112: " + msg, "203: " + msg, "206: " + msg, "221: " + msg, "223: " + msg, "285: " + msg, "288: " + msg, "290: " + msg, "310: " + msg, "322: " + msg, }; final Configuration checkConfig = getModuleConfig("JavadocTagContinuationIndentation"); final String filePath = getPath("InputJavaDocTagContinuationIndentation.java"); final Integer[] warnList = getLinesWithWarn(filePath); verify(checkConfig, filePath, expected, warnList); } }
lgpl-2.1
austiine04/RapidFTR---Android
RapidFTR-Android/src/main/java/com/rapidftr/view/fields/NumericField.java
326
package com.rapidftr.view.fields; import android.content.Context; import android.util.AttributeSet; public class NumericField extends TextField { public NumericField(Context context) { super(context); } public NumericField(Context context, AttributeSet attrs) { super(context, attrs); } }
lgpl-3.0
altsoft/PlatypusJS
platypus-js-sql-parser/src/main/java/net/sf/jsqlparser/util/deparser/ReplaceDeParser.java
6901
package net.sf.jsqlparser.util.deparser; import java.util.Iterator; import net.sf.jsqlparser.expression.Expression; import net.sf.jsqlparser.expression.ExpressionVisitor; import net.sf.jsqlparser.expression.operators.relational.ExpressionList; import net.sf.jsqlparser.expression.operators.relational.ItemsListVisitor; import net.sf.jsqlparser.schema.Column; import net.sf.jsqlparser.statement.replace.Replace; import net.sf.jsqlparser.statement.select.SelectVisitor; import net.sf.jsqlparser.statement.select.SubSelect; /** * A class to de-parse (that is, tranform from JSqlParser hierarchy into a string) * a {@link net.sf.jsqlparser.statement.replace.Replace} */ public class ReplaceDeParser implements ItemsListVisitor { protected StringBuilder buffer; protected ExpressionVisitor expressionVisitor; protected SelectVisitor selectVisitor; public ReplaceDeParser() { } /** * @param expressionVisitor a {@link ExpressionVisitor} to de-parse expressions. It has to share the same<br> * StringBuilder (buffer parameter) as this object in order to work * @param selectVisitor a {@link SelectVisitor} to de-parse {@link net.sf.jsqlparser.statement.select.Select}s. * It has to share the same<br> * StringBuilder (buffer parameter) as this object in order to work * @param buffer the buffer that will be filled with the select */ public ReplaceDeParser(ExpressionVisitor expressionVisitor, SelectVisitor selectVisitor, StringBuilder buffer) { this.buffer = buffer; this.expressionVisitor = expressionVisitor; this.selectVisitor = selectVisitor; } public StringBuilder getBuffer() { return buffer; } public void setBuffer(StringBuilder buffer) { this.buffer = buffer; } public void deParse(Replace replace) { buffer.append(replace.getComment() != null ? replace.getComment()+" "+ExpressionDeParser.LINE_SEPARATOR : "").append("Replace "); if (replace.isUseInto()){ buffer.append(replace.getComment() != null ? replace.getCommentInto()+" "+ExpressionDeParser.LINE_SEPARATOR : "").append("Into "); } buffer.append(replace.getComment() != null ? replace.getComment()+" "+ExpressionDeParser.LINE_SEPARATOR : "").append(replace.getTable().getWholeTableName()); if (replace.getExpressions() != null && replace.getColumns() != null) { buffer.append(replace.getCommentSet() != null ? " "+replace.getCommentSet()+ExpressionDeParser.LINE_SEPARATOR : "").append(" SET "); //each element from expressions match up with a column from columns. int columnsCounter = 0; for (int i = 0, s = replace.getColumns().size(); i < s; i++) { Column column = (Column) replace.getColumns().get(i); buffer.append(column.getComment() != null ? column.getComment()+" "+ExpressionDeParser.LINE_SEPARATOR : "").append(column.getWholeColumnName()) .append(!replace.getCommentEqlasColums().get(i).toString().isEmpty()?" "+replace.getCommentEqlasColums().get(i)+ExpressionDeParser.LINE_SEPARATOR : "" ).append(" = "); Expression expression = (Expression) replace.getExpressions().get(i); expression.accept(expressionVisitor); if (i < replace.getColumns().size() - 1) { buffer.append(!replace.getCommentCommaExpr().get(i).toString().isEmpty()?" "+replace.getCommentCommaExpr().get(i)+" ":""); if (columnsCounter++ == 2) { columnsCounter = 0; buffer.append(ExpressionDeParser.LINE_SEPARATOR).append(", "); } else { buffer.append(", "); } } } } else { if (replace.getColumns() != null) { buffer.append(replace.getCommentBeforeColums() != null ? " "+replace.getCommentBeforeColums()+ExpressionDeParser.LINE_SEPARATOR : "").append(" ("); for (int i = 0; i < replace.getColumns().size(); i++) { Column column = (Column) replace.getColumns().get(i); buffer.append(column.getComment() != null ? column.getComment()+" "+ExpressionDeParser.LINE_SEPARATOR : "").append(column.getWholeColumnName()); if (i < replace.getColumns().size() - 1) { buffer.append(!"".equals(replace.getCommentCommaColums().get(i)) ? " " + replace.getCommentCommaColums().get(i)+ExpressionDeParser.LINE_SEPARATOR : "") .append(", "); } } buffer.append(replace.getCommentAfterColums() != null ? replace.getCommentAfterColums()+" "+ExpressionDeParser.LINE_SEPARATOR : "").append(") "); } } if (replace.isUseValues()) { buffer.append(replace.getCommentValues() != null ? " "+replace.getCommentValues() : "") .append(ExpressionDeParser.LINE_SEPARATOR).append(" Values ") .append(replace.getCommentBeforeItems()!= null ? replace.getCommentBeforeItems()+" "+ExpressionDeParser.LINE_SEPARATOR : ""); } replace.getItemsList().accept(this); if (replace.isUseValues()) { buffer.append(replace.getCommentAfterItems() != null ? replace.getCommentAfterItems()+" "+ExpressionDeParser.LINE_SEPARATOR : "").append(")"); } buffer.append(!"".equals(replace.getEndComment()) ? " "+replace.getEndComment() : ""); } public void visit(ExpressionList expressionList) { buffer.append(ExpressionDeParser.LINE_SEPARATOR).append("("); int valuesCounter = 0; for (Iterator iter = expressionList.getExpressions().iterator(); iter.hasNext();) { Expression expression = (Expression) iter.next(); expression.accept(expressionVisitor); if (iter.hasNext()) { if (valuesCounter++ == 2) { valuesCounter = 0; buffer.append(ExpressionDeParser.LINE_SEPARATOR).append(", "); } else { buffer.append(", "); } } } } public void visit(SubSelect subSelect) { subSelect.getSelectBody().accept(selectVisitor); } public ExpressionVisitor getExpressionVisitor() { return expressionVisitor; } public SelectVisitor getSelectVisitor() { return selectVisitor; } public void setExpressionVisitor(ExpressionVisitor visitor) { expressionVisitor = visitor; } public void setSelectVisitor(SelectVisitor visitor) { selectVisitor = visitor; } }
apache-2.0
ingokegel/intellij-community
platform/testFramework/src/com/intellij/facet/mock/MockFacetConfiguration.java
2380
// Copyright 2000-2018 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.facet.mock; import com.intellij.facet.FacetConfiguration; import com.intellij.facet.ui.FacetEditorContext; import com.intellij.facet.ui.FacetEditorTab; import com.intellij.facet.ui.FacetValidatorsManager; import com.intellij.openapi.util.InvalidDataException; import com.intellij.openapi.util.WriteExternalException; import com.intellij.openapi.util.text.StringUtil; import com.intellij.openapi.vfs.VirtualFile; import com.intellij.openapi.vfs.VirtualFileManager; import org.jdom.Element; import java.util.ArrayList; import java.util.Collection; import java.util.List; public class MockFacetConfiguration implements FacetConfiguration { private final List<VirtualFile> myRoots = new ArrayList<>(); private String myData = ""; private MockFacetEditorTab myEditor; public MockFacetConfiguration(String data) { myData = data; } public MockFacetConfiguration() { } @Override public FacetEditorTab[] createEditorTabs(final FacetEditorContext editorContext, final FacetValidatorsManager validatorsManager) { myEditor = new MockFacetEditorTab(this); return new FacetEditorTab[]{myEditor}; } public MockFacetEditorTab getEditor() { return myEditor; } public void addRoot(VirtualFile root) { myRoots.add(root); } public void removeRoot(VirtualFile root) { myRoots.remove(root); } public void setData(final String data) { myData = data; } public String getData() { return myData; } @Override public void readExternal(Element element) throws InvalidDataException { myData = StringUtil.notNullize(element.getAttributeValue("data")); myRoots.clear(); final List<Element> children = element.getChildren("root"); for (Element child : children) { myRoots.add(VirtualFileManager.getInstance().findFileByUrl(child.getAttributeValue("url"))); } } @Override public void writeExternal(Element element) throws WriteExternalException { if (!myData.isEmpty()) { element.setAttribute("data", myData); } for (VirtualFile root : myRoots) { element.addContent(new Element("root").setAttribute("url", root.getUrl())); } } public Collection<VirtualFile> getRoots() { return myRoots; } }
apache-2.0
robin13/elasticsearch
server/src/internalClusterTest/java/org/elasticsearch/recovery/RelocationIT.java
35458
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License * 2.0 and the Server Side Public License, v 1; you may not use this file except * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ package org.elasticsearch.recovery; import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.procedures.IntProcedure; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.util.English; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.RetentionLease; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.MockIndexEventListener; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.test.transport.StubbableTransport; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; import java.util.stream.Stream; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase.forEachFileRecursively; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.in; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.startsWith; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class RelocationIT extends ESIntegTestCase { private final TimeValue ACCEPTABLE_RELOCATION_TIME = new TimeValue(5, TimeUnit.MINUTES); @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class, MockTransportService.TestPlugin.class, MockIndexEventListener.TestPlugin.class); } @Override protected void beforeIndexDeletion() throws Exception { super.beforeIndexDeletion(); assertActiveCopiesEstablishedPeerRecoveryRetentionLeases(); internalCluster().assertSeqNos(); internalCluster().assertSameDocIdsOnShards(); } @Override public Settings indexSettings() { return Settings.builder().put(super.indexSettings()) // sync global checkpoint quickly so we can verify seq_no_stats aligned between all copies after tests. .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s").build(); } public void testSimpleRelocationNoIndexing() { logger.info("--> starting [node1] ..."); final String node_1 = internalCluster().startNode(); logger.info("--> creating test index ..."); prepareCreate("test", Settings.builder() .put("index.number_of_shards", 1) .put("index.number_of_replicas", 0) ).get(); logger.info("--> index 10 docs"); for (int i = 0; i < 10; i++) { client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); } logger.info("--> flush so we have an actual index"); client().admin().indices().prepareFlush().execute().actionGet(); logger.info("--> index more docs so we have something in the translog"); for (int i = 10; i < 20; i++) { client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); } logger.info("--> verifying count"); client().admin().indices().prepareRefresh().execute().actionGet(); assertThat(client().prepareSearch("test").setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(20L)); logger.info("--> start another node"); final String node_2 = internalCluster().startNode(); ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2").execute().actionGet(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> relocate the shard from node1 to node2"); client().admin().cluster().prepareReroute() .add(new MoveAllocationCommand("test", 0, node_1, node_2)) .execute().actionGet(); clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> verifying count again..."); client().admin().indices().prepareRefresh().execute().actionGet(); assertThat(client().prepareSearch("test").setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(20L)); } public void testRelocationWhileIndexingRandom() throws Exception { int numberOfRelocations = scaledRandomIntBetween(1, rarely() ? 10 : 4); int numberOfReplicas = randomBoolean() ? 0 : 1; int numberOfNodes = numberOfReplicas == 0 ? 2 : 3; logger.info("testRelocationWhileIndexingRandom(numRelocations={}, numberOfReplicas={}, numberOfNodes={})", numberOfRelocations, numberOfReplicas, numberOfNodes); String[] nodes = new String[numberOfNodes]; logger.info("--> starting [node1] ..."); nodes[0] = internalCluster().startNode(); logger.info("--> creating test index ..."); prepareCreate("test", Settings.builder() .put("index.number_of_shards", 1) .put("index.number_of_replicas", numberOfReplicas) ).get(); for (int i = 2; i <= numberOfNodes; i++) { logger.info("--> starting [node{}] ...", i); nodes[i - 1] = internalCluster().startNode(); if (i != numberOfNodes) { ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForNodes(Integer.toString(i)) .setWaitForGreenStatus().execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); } } int numDocs = scaledRandomIntBetween(200, 2500); try (BackgroundIndexer indexer = new BackgroundIndexer("test", "type1", client(), numDocs)) { logger.info("--> waiting for {} docs to be indexed ...", numDocs); waitForDocs(numDocs, indexer); logger.info("--> {} docs indexed", numDocs); logger.info("--> starting relocations..."); int nodeShiftBased = numberOfReplicas; // if we have replicas shift those for (int i = 0; i < numberOfRelocations; i++) { int fromNode = (i % 2); int toNode = fromNode == 0 ? 1 : 0; fromNode += nodeShiftBased; toNode += nodeShiftBased; numDocs = scaledRandomIntBetween(200, 1000); logger.debug("--> Allow indexer to index [{}] documents", numDocs); indexer.continueIndexing(numDocs); logger.info("--> START relocate the shard from {} to {}", nodes[fromNode], nodes[toNode]); client().admin().cluster().prepareReroute() .add(new MoveAllocationCommand("test", 0, nodes[fromNode], nodes[toNode])) .get(); if (rarely()) { logger.debug("--> flushing"); client().admin().indices().prepareFlush().get(); } ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) .setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); indexer.pauseIndexing(); logger.info("--> DONE relocate the shard from {} to {}", fromNode, toNode); } logger.info("--> done relocations"); logger.info("--> waiting for indexing threads to stop ..."); indexer.stopAndAwaitStopped(); logger.info("--> indexing threads stopped"); logger.info("--> refreshing the index"); client().admin().indices().prepareRefresh("test").execute().actionGet(); logger.info("--> searching the index"); boolean ranOnce = false; for (int i = 0; i < 10; i++) { logger.info("--> START search test round {}", i + 1); SearchHits hits = client().prepareSearch("test").setQuery(matchAllQuery()) .setSize((int) indexer.totalIndexedDocs()).storedFields().execute().actionGet().getHits(); ranOnce = true; if (hits.getTotalHits().value != indexer.totalIndexedDocs()) { int[] hitIds = new int[(int) indexer.totalIndexedDocs()]; for (int hit = 0; hit < indexer.totalIndexedDocs(); hit++) { hitIds[hit] = hit + 1; } IntHashSet set = IntHashSet.from(hitIds); for (SearchHit hit : hits.getHits()) { int id = Integer.parseInt(hit.getId()); if (set.remove(id) == false) { logger.error("Extra id [{}]", id); } } set.forEach((IntProcedure) value -> { logger.error("Missing id [{}]", value); }); } assertThat(hits.getTotalHits().value, equalTo(indexer.totalIndexedDocs())); logger.info("--> DONE search test round {}", i + 1); } if (ranOnce == false) { fail(); } } } public void testRelocationWhileRefreshing() throws Exception { int numberOfRelocations = scaledRandomIntBetween(1, rarely() ? 10 : 4); int numberOfReplicas = randomBoolean() ? 0 : 1; int numberOfNodes = numberOfReplicas == 0 ? 2 : 3; logger.info("testRelocationWhileIndexingRandom(numRelocations={}, numberOfReplicas={}, numberOfNodes={})", numberOfRelocations, numberOfReplicas, numberOfNodes); String[] nodes = new String[numberOfNodes]; logger.info("--> starting [node_0] ..."); nodes[0] = internalCluster().startNode(); logger.info("--> creating test index ..."); prepareCreate( "test", Settings.builder() .put("index.number_of_shards", 1) .put("index.number_of_replicas", numberOfReplicas) // we want to control refreshes .put("index.refresh_interval", -1) ).get(); for (int i = 1; i < numberOfNodes; i++) { logger.info("--> starting [node_{}] ...", i); nodes[i] = internalCluster().startNode(); if (i != numberOfNodes - 1) { ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) .setWaitForNodes(Integer.toString(i + 1)).setWaitForGreenStatus().execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); } } final Semaphore postRecoveryShards = new Semaphore(0); final IndexEventListener listener = new IndexEventListener() { @Override public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, IndexShardState currentState, @Nullable String reason) { if (currentState == IndexShardState.POST_RECOVERY) { postRecoveryShards.release(); } } }; for (MockIndexEventListener.TestEventListener eventListener : internalCluster() .getInstances(MockIndexEventListener.TestEventListener.class)) { eventListener.setNewDelegate(listener); } logger.info("--> starting relocations..."); int nodeShiftBased = numberOfReplicas; // if we have replicas shift those for (int i = 0; i < numberOfRelocations; i++) { int fromNode = (i % 2); int toNode = fromNode == 0 ? 1 : 0; fromNode += nodeShiftBased; toNode += nodeShiftBased; List<IndexRequestBuilder> builders1 = new ArrayList<>(); for (int numDocs = randomIntBetween(10, 30); numDocs > 0; numDocs--) { builders1.add(client().prepareIndex("test").setSource("{}", XContentType.JSON)); } List<IndexRequestBuilder> builders2 = new ArrayList<>(); for (int numDocs = randomIntBetween(10, 30); numDocs > 0; numDocs--) { builders2.add(client().prepareIndex("test").setSource("{}", XContentType.JSON)); } logger.info("--> START relocate the shard from {} to {}", nodes[fromNode], nodes[toNode]); client().admin().cluster().prepareReroute() .add(new MoveAllocationCommand("test", 0, nodes[fromNode], nodes[toNode])) .get(); logger.debug("--> index [{}] documents", builders1.size()); indexRandom(false, true, builders1); // wait for shard to reach post recovery postRecoveryShards.acquire(1); logger.debug("--> index [{}] documents", builders2.size()); indexRandom(true, true, builders2); // verify cluster was finished. assertFalse(client().admin().cluster().prepareHealth() .setWaitForNoRelocatingShards(true) .setWaitForEvents(Priority.LANGUID) .setTimeout("30s").get().isTimedOut()); logger.info("--> DONE relocate the shard from {} to {}", fromNode, toNode); logger.debug("--> verifying all searches return the same number of docs"); long expectedCount = -1; for (Client client : clients()) { SearchResponse response = client.prepareSearch("test").setPreference("_local").setSize(0).get(); assertNoFailures(response); if (expectedCount < 0) { expectedCount = response.getHits().getTotalHits().value; } else { assertEquals(expectedCount, response.getHits().getTotalHits().value); } } } } public void testCancellationCleansTempFiles() throws Exception { final String indexName = "test"; final String p_node = internalCluster().startNode(); prepareCreate(indexName, Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) ).get(); internalCluster().startNode(); internalCluster().startNode(); List<IndexRequestBuilder> requests = new ArrayList<>(); int numDocs = scaledRandomIntBetween(25, 250); for (int i = 0; i < numDocs; i++) { requests.add(client().prepareIndex(indexName).setSource("{}", XContentType.JSON)); } indexRandom(true, requests); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("3").setWaitForGreenStatus().get().isTimedOut()); flush(); int allowedFailures = randomIntBetween(3, 5); // the default of the `index.allocation.max_retries` is 5. logger.info("--> blocking recoveries from primary (allowed failures: [{}])", allowedFailures); CountDownLatch corruptionCount = new CountDownLatch(allowedFailures); ClusterService clusterService = internalCluster().getInstance(ClusterService.class, p_node); MockTransportService mockTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, p_node); for (DiscoveryNode node : clusterService.state().nodes()) { if (node.equals(clusterService.localNode()) == false) { mockTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, node.getName()), new RecoveryCorruption(corruptionCount)); } } client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)).get(); corruptionCount.await(); logger.info("--> stopping replica assignment"); assertAcked(client().admin().cluster().prepareUpdateSettings() .setTransientSettings(Settings.builder() .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none"))); logger.info("--> wait for all replica shards to be removed, on all nodes"); assertBusy(() -> { for (String node : internalCluster().getNodeNames()) { if (node.equals(p_node)) { continue; } ClusterState state = client(node).admin().cluster().prepareState().setLocal(true).get().getState(); assertThat(node + " indicates assigned replicas", state.getRoutingTable().index(indexName).shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); } }); logger.info("--> verifying no temporary recoveries are left"); for (String node : internalCluster().getNodeNames()) { NodeEnvironment nodeEnvironment = internalCluster().getInstance(NodeEnvironment.class, node); final Path shardLoc = nodeEnvironment.availableShardPath(new ShardId(indexName, "_na_", 0)); if (Files.exists(shardLoc)) { assertBusy(() -> { try { forEachFileRecursively(shardLoc, (file, attrs) -> assertThat("found a temporary recovery file: " + file, file.getFileName().toString(), not(startsWith("recovery.")))); } catch (IOException e) { throw new AssertionError("failed to walk file tree starting at [" + shardLoc + "]", e); } }); } } } public void testIndexSearchAndRelocateConcurrently() throws Exception { int halfNodes = randomIntBetween(1, 3); Settings[] nodeSettings = Stream.concat( Stream.generate(() -> Settings.builder().put("node.attr.color", "blue").build()).limit(halfNodes), Stream.generate(() -> Settings.builder().put("node.attr.color", "red").build()).limit(halfNodes) ).toArray(Settings[]::new); List<String> nodes = internalCluster().startNodes(nodeSettings); String[] blueNodes = nodes.subList(0, halfNodes).stream().toArray(String[]::new); String[] redNodes = nodes.subList(halfNodes, nodes.size()).stream().toArray(String[]::new); logger.info("blue nodes: {}", (Object)blueNodes); logger.info("red nodes: {}", (Object)redNodes); ensureStableCluster(halfNodes * 2); final Settings.Builder settings = Settings.builder() .put("index.routing.allocation.exclude.color", "blue") .put(indexSettings()) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, randomInt(halfNodes - 1)); if (randomBoolean()) { settings.put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), randomIntBetween(1, 10) + "s"); } assertAcked(prepareCreate("test", settings)); assertAllShardsOnNodes("test", redNodes); AtomicBoolean stopped = new AtomicBoolean(false); Thread[] searchThreads = randomBoolean() ? new Thread[0] : new Thread[randomIntBetween(1, 4)]; for (int i = 0; i < searchThreads.length; i++) { searchThreads[i] = new Thread(() -> { while (stopped.get() == false) { assertNoFailures(client().prepareSearch("test").setRequestCache(false).get()); } }); searchThreads[i].start(); } int numDocs = randomIntBetween(100, 150); ArrayList<String> ids = new ArrayList<>(); logger.info(" --> indexing [{}] docs", numDocs); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { String id = randomRealisticUnicodeOfLength(10) + String.valueOf(i); ids.add(id); docs[i] = client().prepareIndex("test").setId(id).setSource("field1", English.intToEnglish(i)); } indexRandom(true, docs); SearchResponse countResponse = client().prepareSearch("test").get(); assertHitCount(countResponse, numDocs); logger.info(" --> moving index to new nodes"); Settings build = Settings.builder().put("index.routing.allocation.exclude.color", "red") .put("index.routing.allocation.include.color", "blue").build(); client().admin().indices().prepareUpdateSettings("test").setSettings(build).execute().actionGet(); // index while relocating logger.info(" --> indexing [{}] more docs", numDocs); for (int i = 0; i < numDocs; i++) { String id = randomRealisticUnicodeOfLength(10) + String.valueOf(numDocs + i); ids.add(id); docs[i] = client().prepareIndex("test").setId(id).setSource("field1", English.intToEnglish(numDocs + i)); } indexRandom(true, docs); logger.info(" --> waiting for relocation to complete"); ensureGreen(TimeValue.timeValueSeconds(60), "test"); // move all shards to the new nodes (it waits on relocation) final int numIters = randomIntBetween(10, 20); for (int i = 0; i < numIters; i++) { logger.info(" --> checking iteration {}", i); SearchResponse afterRelocation = client().prepareSearch().setSize(ids.size()).get(); assertNoFailures(afterRelocation); assertSearchHits(afterRelocation, ids.toArray(new String[ids.size()])); } stopped.set(true); for (Thread searchThread : searchThreads) { searchThread.join(); } } public void testRelocateWhileWaitingForRefresh() { logger.info("--> starting [node1] ..."); final String node1 = internalCluster().startNode(); logger.info("--> creating test index ..."); prepareCreate("test", Settings.builder() .put("index.number_of_shards", 1) .put("index.number_of_replicas", 0) // we want to control refreshes .put("index.refresh_interval", -1)).get(); logger.info("--> index 10 docs"); for (int i = 0; i < 10; i++) { client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); } logger.info("--> flush so we have an actual index"); client().admin().indices().prepareFlush().execute().actionGet(); logger.info("--> index more docs so we have something in the translog"); for (int i = 10; i < 20; i++) { client().prepareIndex("test").setId(Integer.toString(i)).setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL) .setSource("field", "value" + i).execute(); } logger.info("--> start another node"); final String node2 = internalCluster().startNode(); ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2").execute().actionGet(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> relocate the shard from node1 to node2"); client().admin().cluster().prepareReroute() .add(new MoveAllocationCommand("test", 0, node1, node2)) .execute().actionGet(); clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> verifying count"); client().admin().indices().prepareRefresh().execute().actionGet(); assertThat(client().prepareSearch("test").setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(20L)); } public void testRelocateWhileContinuouslyIndexingAndWaitingForRefresh() throws Exception { logger.info("--> starting [node1] ..."); final String node1 = internalCluster().startNode(); logger.info("--> creating test index ..."); prepareCreate("test", Settings.builder() .put("index.number_of_shards", 1) .put("index.number_of_replicas", 0) .put("index.refresh_interval", -1) // we want to control refreshes ).get(); logger.info("--> index 10 docs"); for (int i = 0; i < 10; i++) { client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); } logger.info("--> flush so we have an actual index"); client().admin().indices().prepareFlush().execute().actionGet(); logger.info("--> index more docs so we have something in the translog"); final List<ActionFuture<IndexResponse>> pendingIndexResponses = new ArrayList<>(); for (int i = 10; i < 20; i++) { pendingIndexResponses.add(client().prepareIndex("test").setId(Integer.toString(i)) .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL) .setSource("field", "value" + i).execute()); } logger.info("--> start another node"); final String node2 = internalCluster().startNode(); ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2").execute().actionGet(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> relocate the shard from node1 to node2"); ActionFuture<ClusterRerouteResponse> relocationListener = client().admin().cluster().prepareReroute() .add(new MoveAllocationCommand("test", 0, node1, node2)) .execute(); logger.info("--> index 100 docs while relocating"); for (int i = 20; i < 120; i++) { pendingIndexResponses.add(client().prepareIndex("test").setId(Integer.toString(i)) .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL) .setSource("field", "value" + i).execute()); } relocationListener.actionGet(); clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> verifying count"); assertBusy(() -> { client().admin().indices().prepareRefresh().execute().actionGet(); assertTrue(pendingIndexResponses.stream().allMatch(ActionFuture::isDone)); }, 1, TimeUnit.MINUTES); assertThat(client().prepareSearch("test").setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(120L)); } public void testRelocationEstablishedPeerRecoveryRetentionLeases() throws Exception { int halfNodes = randomIntBetween(1, 3); String indexName = "test"; Settings[] nodeSettings = Stream.concat( Stream.generate(() -> Settings.builder().put("node.attr.color", "blue").build()).limit(halfNodes), Stream.generate(() -> Settings.builder().put("node.attr.color", "red").build()).limit(halfNodes)).toArray(Settings[]::new); List<String> nodes = internalCluster().startNodes(nodeSettings); String[] blueNodes = nodes.subList(0, halfNodes).toArray(String[]::new); String[] redNodes = nodes.subList(halfNodes, nodes.size()).toArray(String[]::new); logger.debug("--> blue nodes: [{}], red nodes: [{}]", blueNodes, redNodes); ensureStableCluster(halfNodes * 2); assertAcked( client().admin().indices().prepareCreate(indexName).setSettings(Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(0, halfNodes - 1)) .put("index.routing.allocation.include.color", "blue"))); ensureGreen("test"); assertBusy(() -> assertAllShardsOnNodes(indexName, blueNodes)); assertActiveCopiesEstablishedPeerRecoveryRetentionLeases(); client().admin().indices().prepareUpdateSettings(indexName) .setSettings(Settings.builder().put("index.routing.allocation.include.color", "red")).get(); assertBusy(() -> assertAllShardsOnNodes(indexName, redNodes)); ensureGreen("test"); assertActiveCopiesEstablishedPeerRecoveryRetentionLeases(); } private void assertActiveCopiesEstablishedPeerRecoveryRetentionLeases() throws Exception { assertBusy(() -> { for (ObjectCursor<String> it : client().admin().cluster().prepareState().get().getState().metadata().indices().keys()) { Map<ShardId, List<ShardStats>> byShardId = Stream.of(client().admin().indices().prepareStats(it.value).get().getShards()) .collect(Collectors.groupingBy(l -> l.getShardRouting().shardId())); for (List<ShardStats> shardStats : byShardId.values()) { Set<String> expectedLeaseIds = shardStats.stream() .map(s -> ReplicationTracker.getPeerRecoveryRetentionLeaseId(s.getShardRouting())).collect(Collectors.toSet()); for (ShardStats shardStat : shardStats) { Set<String> actualLeaseIds = shardStat.getRetentionLeaseStats().retentionLeases().leases().stream() .map(RetentionLease::id).collect(Collectors.toSet()); assertThat(expectedLeaseIds, everyItem(in(actualLeaseIds))); } } } }); } class RecoveryCorruption implements StubbableTransport.SendRequestBehavior { private final CountDownLatch corruptionCount; RecoveryCorruption(CountDownLatch corruptionCount) { this.corruptionCount = corruptionCount; } @Override public void sendRequest(Transport.Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException { if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) { RecoveryFileChunkRequest chunkRequest = (RecoveryFileChunkRequest) request; if (chunkRequest.name().startsWith(IndexFileNames.SEGMENTS)) { // corrupting the segments_N files in order to make sure future recovery re-send files logger.debug("corrupting [{}] to {}. file name: [{}]", action, connection.getNode(), chunkRequest.name()); assert chunkRequest.content().toBytesRef().bytes == chunkRequest.content().toBytesRef().bytes : "no internal reference!!"; byte[] array = chunkRequest.content().toBytesRef().bytes; array[0] = (byte) ~array[0]; // flip one byte in the content corruptionCount.countDown(); } connection.sendRequest(requestId, action, request, options); } else { connection.sendRequest(requestId, action, request, options); } } } }
apache-2.0
asedunov/intellij-community
java/java-tests/testSrc/com/intellij/java/codeInsight/daemon/quickFix/BringVariableIntoScopeTest.java
1040
/* * Copyright 2000-2017 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.java.codeInsight.daemon.quickFix; import com.intellij.codeInsight.daemon.quickFix.LightQuickFixParameterizedTestCase; /** * @author ven */ public class BringVariableIntoScopeTest extends LightQuickFixParameterizedTestCase { public void test() throws Exception { doAllTests(); } @Override protected String getBasePath() { return "/codeInsight/daemonCodeAnalyzer/quickFix/bringVariableIntoScope"; } }
apache-2.0
fogbeam/cas_mirror
support/cas-server-support-pac4j-webflow/src/main/java/org/apereo/cas/web/flow/DelegatedAuthenticationErrorViewResolver.java
1707
package org.apereo.cas.web.flow; import org.apereo.cas.services.UnauthorizedServiceException; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import lombok.val; import org.springframework.boot.autoconfigure.web.servlet.error.ErrorViewResolver; import org.springframework.http.HttpStatus; import org.springframework.web.servlet.ModelAndView; import javax.servlet.http.HttpServletRequest; import java.util.Map; /** * This is {@link DelegatedAuthenticationErrorViewResolver}. * * @author Misagh Moayyed * @since 5.0.0 */ @RequiredArgsConstructor @Slf4j public class DelegatedAuthenticationErrorViewResolver implements ErrorViewResolver { private final ErrorViewResolver conventionErrorViewResolver; @Override public ModelAndView resolveErrorView(final HttpServletRequest request, final HttpStatus status, final Map<String, Object> map) { val mv = DelegatedClientAuthenticationAction.hasDelegationRequestFailed(request, status.value()); val exception = request.getAttribute("javax.servlet.error.exception"); if (exception != null) { val cause = ((Throwable) exception).getCause(); if (cause instanceof UnauthorizedServiceException) { val mvError = new ModelAndView(CasWebflowConstants.VIEW_ID_DELEGATED_AUTHN_ERROR_VIEW, HttpStatus.FORBIDDEN); LOGGER.warn("Delegated authentication failed with the following details [{}]; Routing over to [{}]", map, mvError.getViewName()); return mvError; } } return mv.orElseGet(() -> conventionErrorViewResolver.resolveErrorView(request, status, map)); } }
apache-2.0
ricepanda/rice
rice-framework/krad-data/src/main/java/org/kuali/rice/krad/data/jpa/converters/BooleanYNConverter.java
2066
/** * Copyright 2005-2014 The Kuali Foundation * * Licensed under the Educational Community License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.opensource.org/licenses/ecl2.php * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kuali.rice.krad.data.jpa.converters; import java.util.HashSet; import java.util.Set; import javax.persistence.AttributeConverter; import javax.persistence.Converter; /** * Converts true/false represented by a set of yes characters and the character "N" to and from true and false. * * <p>The conversion treats the values as follows: "Y", "y", "true", and "TRUE" are all true and "N" is false.</p> * * @author Kuali Rice Team (rice.collab@kuali.org) */ @Converter(autoApply = true) public class BooleanYNConverter implements AttributeConverter<Boolean, String> { /** * Defines the set of values that all correspond to yes. */ protected static final Set<String> YES_VALUES = new HashSet<String>(); static { YES_VALUES.add("Y"); YES_VALUES.add("y"); YES_VALUES.add("true"); YES_VALUES.add("TRUE"); } /** * {@inheritDoc} * * This implementation will convert from a false or true value to an "N" or "Y" value. */ @Override public String convertToDatabaseColumn(Boolean objectValue) { if (objectValue == null) { return "N"; } return objectValue ? "Y" : "N"; } /** * {@inheritDoc} * * This implementation will convert from a "F" or any of the yes values to a false or true. */ @Override public Boolean convertToEntityAttribute(String dataValue) { if (dataValue == null) { return false; } return YES_VALUES.contains(dataValue); } }
apache-2.0
djechelon/spring-security
web/src/main/java/org/springframework/security/web/header/writers/frameoptions/RegExpAllowFromStrategy.java
1911
/* * Copyright 2002-2016 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.security.web.header.writers.frameoptions; import java.util.regex.Pattern; import org.springframework.util.Assert; /** * Implementation which uses a regular expression to validate the supplied origin. If the * value of the HTTP parameter matches the pattern, then the result will be ALLOW-FROM * &lt;paramter-value&gt;. * * @author Marten Deinum * @since 3.2 * @deprecated ALLOW-FROM is an obsolete directive that no longer works in modern * browsers. Instead use Content-Security-Policy with the <a href= * "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors">frame-ancestors</a> * directive. */ @Deprecated public final class RegExpAllowFromStrategy extends AbstractRequestParameterAllowFromStrategy { private final Pattern pattern; /** * Creates a new instance * @param pattern the Pattern to compare against the HTTP parameter value. If the * pattern matches, the domain will be allowed, else denied. */ public RegExpAllowFromStrategy(String pattern) { Assert.hasText(pattern, "Pattern cannot be empty."); this.pattern = Pattern.compile(pattern); } @Override protected boolean allowed(String allowFromOrigin) { return this.pattern.matcher(allowFromOrigin).matches(); } }
apache-2.0
wildfly/activemq-artemis
artemis-protocols/artemis-proton-plug/src/main/java/org/proton/plug/context/server/ProtonServerSenderContext.java
8703
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.proton.plug.context.server; import java.util.Map; import org.apache.qpid.proton.amqp.DescribedType; import org.apache.qpid.proton.amqp.Symbol; import org.apache.qpid.proton.amqp.messaging.Accepted; import org.apache.qpid.proton.amqp.messaging.Modified; import org.apache.qpid.proton.amqp.messaging.Rejected; import org.apache.qpid.proton.amqp.messaging.Released; import org.apache.qpid.proton.amqp.transport.DeliveryState; import org.apache.qpid.proton.amqp.transport.SenderSettleMode; import org.apache.qpid.proton.engine.Delivery; import org.apache.qpid.proton.engine.Sender; import org.apache.qpid.proton.message.ProtonJMessage; import org.proton.plug.AMQPSessionCallback; import org.proton.plug.context.AbstractConnectionContext; import org.proton.plug.context.AbstractProtonContextSender; import org.proton.plug.context.AbstractProtonSessionContext; import org.proton.plug.exceptions.ActiveMQAMQPException; import org.proton.plug.exceptions.ActiveMQAMQPInternalErrorException; import org.proton.plug.logger.ActiveMQAMQPProtocolMessageBundle; import org.proton.plug.context.ProtonPlugSender; import org.apache.qpid.proton.amqp.messaging.Source; public class ProtonServerSenderContext extends AbstractProtonContextSender implements ProtonPlugSender { private static final Symbol SELECTOR = Symbol.getSymbol("jms-selector"); private static final Symbol COPY = Symbol.valueOf("copy"); private Object brokerConsumer; public ProtonServerSenderContext(AbstractConnectionContext connection, Sender sender, AbstractProtonSessionContext protonSession, AMQPSessionCallback server) { super(connection, sender, protonSession, server); } public Object getBrokerConsumer() { return brokerConsumer; } public void onFlow(int currentCredits) { super.onFlow(currentCredits); sessionSPI.onFlowConsumer(brokerConsumer, currentCredits); } /* * start the session * */ public void start() throws ActiveMQAMQPException { super.start(); // protonSession.getServerSession().start(); //todo add flow control try { // to do whatever you need to make the broker start sending messages to the consumer sessionSPI.startSender(brokerConsumer); //protonSession.getServerSession().receiveConsumerCredits(consumerID, -1); } catch (Exception e) { throw ActiveMQAMQPProtocolMessageBundle.BUNDLE.errorStartingConsumer(e.getMessage()); } } /** * create the actual underlying ActiveMQ Artemis Server Consumer */ @Override public void initialise() throws Exception { super.initialise(); Source source = (Source) sender.getRemoteSource(); String queue; String selector = null; Map filter = source == null ? null : source.getFilter(); if (filter != null) { DescribedType value = (DescribedType) filter.get(SELECTOR); if (value != null) { selector = value.getDescribed().toString(); } } if (source != null) { if (source.getDynamic()) { //if dynamic we have to create the node (queue) and set the address on the target, the node is temporary and // will be deleted on closing of the session queue = java.util.UUID.randomUUID().toString(); try { sessionSPI.createTemporaryQueue(queue); //protonSession.getServerSession().createQueue(queue, queue, null, true, false); } catch (Exception e) { throw ActiveMQAMQPProtocolMessageBundle.BUNDLE.errorCreatingTemporaryQueue(e.getMessage()); } source.setAddress(queue); } else { //if not dynamic then we use the targets address as the address to forward the messages to, however there has to //be a queue bound to it so we nee to check this. queue = source.getAddress(); if (queue == null) { throw ActiveMQAMQPProtocolMessageBundle.BUNDLE.sourceAddressNotSet(); } try { if (!sessionSPI.queueQuery(queue)) { throw ActiveMQAMQPProtocolMessageBundle.BUNDLE.sourceAddressDoesntExist(); } } catch (Exception e) { throw new ActiveMQAMQPInternalErrorException(e.getMessage(), e); } } boolean browseOnly = source.getDistributionMode() != null && source.getDistributionMode().equals(COPY); try { brokerConsumer = sessionSPI.createSender(this, queue, selector, browseOnly); } catch (Exception e) { throw ActiveMQAMQPProtocolMessageBundle.BUNDLE.errorCreatingConsumer(e.getMessage()); } } } /* * close the session * */ public void close() throws ActiveMQAMQPException { super.close(); try { sessionSPI.closeSender(brokerConsumer); } catch (Exception e) { e.printStackTrace(); throw new ActiveMQAMQPInternalErrorException(e.getMessage()); } } public void onMessage(Delivery delivery) throws ActiveMQAMQPException { Object message = delivery.getContext(); boolean preSettle = sender.getRemoteSenderSettleMode() == SenderSettleMode.SETTLED; DeliveryState remoteState = delivery.getRemoteState(); if (remoteState != null) { if (remoteState instanceof Accepted) { //we have to individual ack as we can't guarantee we will get the delivery updates (including acks) in order // from dealer, a perf hit but a must try { sessionSPI.ack(brokerConsumer, message); } catch (Exception e) { throw ActiveMQAMQPProtocolMessageBundle.BUNDLE.errorAcknowledgingMessage(message.toString(), e.getMessage()); } } else if (remoteState instanceof Released) { try { sessionSPI.cancel(brokerConsumer, message, false); } catch (Exception e) { throw ActiveMQAMQPProtocolMessageBundle.BUNDLE.errorCancellingMessage(message.toString(), e.getMessage()); } } else if (remoteState instanceof Rejected || remoteState instanceof Modified) { try { sessionSPI.cancel(brokerConsumer, message, true); } catch (Exception e) { throw ActiveMQAMQPProtocolMessageBundle.BUNDLE.errorCancellingMessage(message.toString(), e.getMessage()); } } //todo add tag caching if (!preSettle) { protonSession.replaceTag(delivery.getTag()); } synchronized (connection.getLock()) { delivery.settle(); sender.offer(1); } } else { //todo not sure if we need to do anything here } } @Override public synchronized void checkState() { super.checkState(); sessionSPI.resumeDelivery(brokerConsumer); } /** * handle an out going message from ActiveMQ Artemis, send via the Proton Sender */ public int deliverMessage(Object message, int deliveryCount) throws Exception { if (closed) { System.err.println("Message can't be delivered as it's closed"); return 0; } //encode the message ProtonJMessage serverMessage; try { // This can be done a lot better here serverMessage = sessionSPI.encodeMessage(message, deliveryCount); } catch (Throwable e) { e.printStackTrace(); throw new ActiveMQAMQPInternalErrorException(e.getMessage(), e); } return performSend(serverMessage, message); } }
apache-2.0
juju790/FTNTLauncher_1.7Fix
src/main/java/net/ftb/tracking/google/JGoogleAnalyticsTracker.java
19717
/** * Copyright (c) 2010 Daniel Murphy, Stefan Brozinski * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /** * Created at Jul 20, 2010, 4:04:22 AM */ package net.ftb.tracking.google; import java.net.HttpURLConnection; import java.net.InetSocketAddress; import java.net.Proxy; import java.net.Proxy.Type; import java.net.SocketAddress; import java.net.URL; import java.util.LinkedList; import java.util.Scanner; import java.util.regex.MatchResult; import net.ftb.log.Logger; /** * Common tracking calls are implemented as methods, but if you want to control * what data to send, then use {@link #makeCustomRequest(AnalyticsRequestData)}. * If you are making custom calls, the only requirements are: * <ul> * <li>If you are tracking an event, * {@link AnalyticsRequestData#setEventCategory(String)} and * {@link AnalyticsRequestData#setEventAction(String)} must both be populated.</li> * <li>If you are not tracking an event, * {@link AnalyticsRequestData#setPageURL(String)} must be populated</li> * </ul> * See the <a href=http://code.google.com/intl/en-US/apis/analytics/docs/tracking/gaTrackingTroubleshooting.html#gifParameters> * Google Troubleshooting Guide</a> for more info on the tracking parameters (although it doesn't seem to be fully updated). * <p> * The tracker can operate in three modes: * <ul> * <li>synchronous mode: The HTTP request is sent to GA immediately, before the track * method returns. * This may slow your application down if GA doesn't respond fast. * <li>multi-thread mode: Each track method call creates a new short-lived thread that sends * the HTTP request to GA in the background and terminates. * <li>single-thread mode (the default): The track method stores the request in a FIFO and returns * immediately. A single long-lived background thread consumes the FIFO content and sends the HTTP * requests to GA. * </ul> * </p> * <p> * To halt the background thread safely, use the call {@link #stopBackgroundThread(long)}, where the parameter is the * timeout to wait for any remaining queued tracking calls to be made. Keep in mind that if new tracking requests are made * after the thread is stopped, they will just be stored in the queue, and will not be sent to GA until the thread is started again with * {@link #startBackgroundThread()} (This is assuming you are in single-threaded mode to begin with). * </p> * @author Daniel Murphy, Stefan Brozinski */ public class JGoogleAnalyticsTracker { public static enum DispatchMode { /** * Each tracking call will wait until the http request * completes before returning */ SYNCHRONOUS, /** * Each tracking call spawns a new thread to make the http request */ MULTI_THREAD, /** * Each tracking request is added to a queue, and a single dispatch thread makes the requests. */ SINGLE_THREAD } private static final ThreadGroup asyncThreadGroup = new ThreadGroup("Async Google Analytics Threads"); private static long asyncThreadsRunning = 0; private static Proxy proxy = Proxy.NO_PROXY; private static LinkedList<String> fifo = new LinkedList<String>(); private static Thread backgroundThread = null; // the thread used in 'queued' mode. private static boolean backgroundThreadMayRun = false; static { asyncThreadGroup.setMaxPriority(Thread.MIN_PRIORITY); asyncThreadGroup.setDaemon(true); } public static enum GoogleAnalyticsVersion { V_4_7_2 } private GoogleAnalyticsVersion gaVersion; private AnalyticsConfigData configData; private GoogleAnalytics builder; private DispatchMode mode; private boolean enabled; public JGoogleAnalyticsTracker(AnalyticsConfigData argConfigData, GoogleAnalyticsVersion argVersion) { this(argConfigData, argVersion, DispatchMode.SINGLE_THREAD); } public JGoogleAnalyticsTracker(AnalyticsConfigData argConfigData, GoogleAnalyticsVersion argVersion, DispatchMode argMode) { gaVersion = argVersion; configData = argConfigData; createBuilder(); enabled = true; setDispatchMode(argMode); } /** * Sets the dispatch mode * @see DispatchMode * @param argMode the mode to to put the tracker in. If this is null, the tracker * defaults to {@link DispatchMode#SINGLE_THREAD} */ public void setDispatchMode (DispatchMode argMode) { if (argMode == null) { argMode = DispatchMode.SINGLE_THREAD; } if (argMode == DispatchMode.SINGLE_THREAD) { startBackgroundThread(); } mode = argMode; } /** * Gets the current dispatch mode. Default is {@link DispatchMode#SINGLE_THREAD}. * @see DispatchMode * @return */ public DispatchMode getDispatchMode () { return mode; } /** * Convenience method to check if the tracker is in synchronous mode. * @return */ public boolean isSynchronous () { return mode == DispatchMode.SYNCHRONOUS; } /** * Convenience method to check if the tracker is in single-thread mode * @return */ public boolean isSingleThreaded () { return mode == DispatchMode.SINGLE_THREAD; } /** * Convenience method to check if the tracker is in multi-thread mode * @return */ public boolean isMultiThreaded () { return mode == DispatchMode.MULTI_THREAD; } /** * Resets the session cookie. */ public void resetSession () { builder.resetSession(); } /** * Sets if the api dispatches tracking requests. * * @param argEnabled */ public void setEnabled (boolean argEnabled) { enabled = argEnabled; } /** * If the api is dispatching tracking requests (default of true). * * @return */ public boolean isEnabled () { return enabled; } /** * Define the proxy to use for all GA tracking requests. * <p> * Call this static method early (before creating any tracking requests). * * @param argProxy The proxy to use */ public static void setProxy (Proxy argProxy) { proxy = (argProxy != null) ? argProxy : Proxy.NO_PROXY; } /** * Define the proxy to use for all GA tracking requests. * <p> * Call this static method early (before creating any tracking requests). * * @param proxyAddr "addr:port" of the proxy to use; may also be given as URL ("http://addr:port/"). */ public static void setProxy (String proxyAddr) { if (proxyAddr != null) { Scanner s = new Scanner(proxyAddr); // Split into "proxyAddr:proxyPort". proxyAddr = null; int proxyPort = 8080; try { s.findInLine("(http://|)([^:/]+)(:|)([0-9]*)(/|)"); MatchResult m = s.match(); if (m.groupCount() >= 2) { proxyAddr = m.group(2); } if ((m.groupCount() >= 4) && (!m.group(4).isEmpty())) { proxyPort = Integer.parseInt(m.group(4)); } } finally { s.close(); } if (proxyAddr != null) { SocketAddress sa = new InetSocketAddress(proxyAddr, proxyPort); setProxy(new Proxy(Type.HTTP, sa)); } } } /** * Wait for background tasks to complete. * <p> * This works in queued and asynchronous mode. * * @param timeoutMillis The maximum number of milliseconds to wait. */ public static void completeBackgroundTasks (long timeoutMillis) { boolean fifoEmpty; boolean asyncThreadsCompleted; long absTimeout = System.currentTimeMillis() + timeoutMillis; while (System.currentTimeMillis() < absTimeout) { synchronized (fifo) { fifoEmpty = (fifo.size() == 0); } synchronized (JGoogleAnalyticsTracker.class) { asyncThreadsCompleted = (asyncThreadsRunning == 0); } if (fifoEmpty && asyncThreadsCompleted) { break; } try { Thread.sleep(100); } catch (InterruptedException e) { break; } } } /** * Tracks a page view. * * @param argPageURL * required, Google won't track without it. Ex: * <code>"org/me/javaclass.java"</code>, or anything you want as * the page url. * @param argPageTitle * content title * @param argHostName * the host name for the url */ public void trackPageView (String argPageURL, String argPageTitle, String argHostName) { trackPageViewFromReferrer(argPageURL, argPageTitle, argHostName, "http://www.dmurph.com", "/"); } /** * Tracks a page view. * * @param argPageURL * required, Google won't track without it. Ex: * <code>"org/me/javaclass.java"</code>, or anything you want as * the page url. * @param argPageTitle * content title * @param argHostName * the host name for the url * @param argReferrerSite * site of the referrer. ex, www.dmurph.com * @param argReferrerPage * page of the referrer. ex, /mypage.php */ public void trackPageViewFromReferrer (String argPageURL, String argPageTitle, String argHostName, String argReferrerSite, String argReferrerPage) { if (argPageURL == null) { throw new IllegalArgumentException("Page URL cannot be null, Google will not track the data."); } AnalyticsRequestData data = new AnalyticsRequestData(); data.setHostName(argHostName); data.setPageTitle(argPageTitle); data.setPageURL(argPageURL); data.setReferrer(argReferrerSite, argReferrerPage); makeCustomRequest(data); } /** * Tracks a page view. * * @param argPageURL * required, Google won't track without it. Ex: * <code>"org/me/javaclass.java"</code>, or anything you want as * the page url. * @param argPageTitle * content title * @param argHostName * the host name for the url * @param argSearchSource * source of the search engine. ex: google * @param argSearchKeywords * the keywords of the search. ex: java google analytics tracking * utility */ public void trackPageViewFromSearch (String argPageURL, String argPageTitle, String argHostName, String argSearchSource, String argSearchKeywords) { if (argPageURL == null) { throw new IllegalArgumentException("Page URL cannot be null, Google will not track the data."); } AnalyticsRequestData data = new AnalyticsRequestData(); data.setHostName(argHostName); data.setPageTitle(argPageTitle); data.setPageURL(argPageURL); data.setSearchReferrer(argSearchSource, argSearchKeywords); makeCustomRequest(data); } /** * Tracks an event. To provide more info about the page, use * {@link #makeCustomRequest(AnalyticsRequestData)}. * * @param argCategory * @param argAction */ public void trackEvent (String argCategory, String argAction) { trackEvent(argCategory, argAction, null, null); } /** * Tracks an event. To provide more info about the page, use * {@link #makeCustomRequest(AnalyticsRequestData)}. * * @param argCategory * @param argAction * @param argLabel */ public void trackEvent (String argCategory, String argAction, String argLabel) { trackEvent(argCategory, argAction, argLabel, null); } /** * Tracks an event. To provide more info about the page, use * {@link #makeCustomRequest(AnalyticsRequestData)}. * * @param argCategory * required * @param argAction * required * @param argLabel * optional * @param argValue * optional */ public void trackEvent (String argCategory, String argAction, String argLabel, Integer argValue) { AnalyticsRequestData data = new AnalyticsRequestData(); data.setEventCategory(argCategory); data.setEventAction(argAction); data.setEventLabel(argLabel); data.setEventValue(argValue); makeCustomRequest(data); } /** * Makes a custom tracking request based from the given data. * * @param argData * @throws NullPointerException * if argData is null or if the URL builder is null */ public synchronized void makeCustomRequest (AnalyticsRequestData argData) { if (!enabled) { Logger.logInfo("Ignoring tracking request, enabled is false"); return; } if (argData == null) { throw new NullPointerException("Data cannot be null"); } if (builder == null) { throw new NullPointerException("Class was not initialized"); } final String url = builder.buildURL(argData); switch (mode) { case MULTI_THREAD: Thread t = new Thread(asyncThreadGroup, "AnalyticsThread-" + asyncThreadGroup.activeCount()) { @Override public void run () { synchronized (JGoogleAnalyticsTracker.class) { asyncThreadsRunning++; } try { dispatchRequest(url); } finally { synchronized (JGoogleAnalyticsTracker.class) { asyncThreadsRunning--; } } } }; t.setDaemon(true); t.start(); break; case SYNCHRONOUS: dispatchRequest(url); break; default: // in case it's null, we default to the single-thread synchronized (fifo) { fifo.addLast(url); fifo.notify(); } if (!backgroundThreadMayRun) { Logger.logError("A tracker request has been added to the queue but the background thread isn't running." + url); } break; } } private static void dispatchRequest (String argURL) { try { URL url = new URL(argURL); HttpURLConnection connection = (HttpURLConnection) url.openConnection(proxy); connection.setRequestProperty("Cache-Control", "no-transform"); connection.setRequestMethod("GET"); connection.setInstanceFollowRedirects(true); connection.connect(); int responseCode = connection.getResponseCode(); if (responseCode != HttpURLConnection.HTTP_OK) { Logger.logError("JGoogleAnalyticsTracker: Error requesting url '{}', received response code {}" + argURL + responseCode); } // else { // Logger.logInfo("JGoogleAnalyticsTracker: Tracking success"); // } } catch (Exception e) { Logger.logError("Error making tracking request", e); } } private void createBuilder () { switch (gaVersion) { case V_4_7_2: builder = new GoogleAnalytics(configData); break; default: builder = new GoogleAnalytics(configData); break; } } /** * If the background thread for 'queued' mode is not running, start it now. */ private synchronized static void startBackgroundThread () { if (backgroundThread == null) { backgroundThreadMayRun = true; backgroundThread = new Thread(asyncThreadGroup, "AnalyticsBackgroundThread") { @Override public void run () { Logger.logInfo("AnalyticsBackgroundThread started"); while (backgroundThreadMayRun) { try { String url = null; synchronized (fifo) { if (fifo.isEmpty()) { fifo.wait(); } if (!fifo.isEmpty()) { // Get a reference to the oldest element in the FIFO, but leave it in the FIFO until it is processed. url = fifo.getFirst(); } } if (url != null) { try { dispatchRequest(url); } finally { // Now that we have completed the HTTP request to GA, remove the element from the FIFO. synchronized (fifo) { fifo.removeFirst(); } } } } catch (Exception e) { Logger.logError("Got exception from dispatch thread", e); } } } }; backgroundThread.setDaemon(true); backgroundThread.start(); } } /** * Stop the long-lived background thread. * <p> * This method is needed for debugging purposes only. Calling it in an application is not really * required: The background thread will terminate automatically when the application exits. * * @param timeoutMillis If nonzero, wait for thread completion before returning. */ public static void stopBackgroundThread (long timeoutMillis) { backgroundThreadMayRun = false; synchronized (fifo) { fifo.notify(); } if ((backgroundThread != null) && (timeoutMillis > 0)) { try { backgroundThread.join(timeoutMillis); } catch (InterruptedException e) { } backgroundThread = null; } } }
apache-2.0
ixa-ehu/kaflib
src/main/java/ixa/kaflib/KAFNotValidException.java
274
package ixa.kaflib; import java.io.IOException; public class KAFNotValidException extends IOException { private static final String commonMsg = "Input KAF document is not valid."; public KAFNotValidException(String msg) { super(commonMsg + " " + msg); } }
apache-2.0
ilovesoup/hyracks
hyracks/hyracks-storage-am-common/src/main/java/edu/uci/ics/hyracks/storage/am/common/dataflow/IndexInsertUpdateDeleteOperatorNodePushable.java
6948
/* * Copyright 2009-2013 by The Regents of the University of California * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * you may obtain a copy of the License from * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.hyracks.storage.am.common.dataflow; import java.nio.ByteBuffer; import edu.uci.ics.hyracks.api.context.IHyracksTaskContext; import edu.uci.ics.hyracks.api.dataflow.value.IRecordDescriptorProvider; import edu.uci.ics.hyracks.api.dataflow.value.RecordDescriptor; import edu.uci.ics.hyracks.api.exceptions.HyracksDataException; import edu.uci.ics.hyracks.dataflow.common.comm.io.FrameTupleAccessor; import edu.uci.ics.hyracks.dataflow.common.comm.util.FrameUtils; import edu.uci.ics.hyracks.dataflow.common.data.accessors.FrameTupleReference; import edu.uci.ics.hyracks.dataflow.std.base.AbstractUnaryInputUnaryOutputOperatorNodePushable; import edu.uci.ics.hyracks.storage.am.common.api.IIndex; import edu.uci.ics.hyracks.storage.am.common.api.IIndexAccessor; import edu.uci.ics.hyracks.storage.am.common.api.IIndexDataflowHelper; import edu.uci.ics.hyracks.storage.am.common.api.IModificationOperationCallback; import edu.uci.ics.hyracks.storage.am.common.api.ITupleFilter; import edu.uci.ics.hyracks.storage.am.common.api.ITupleFilterFactory; import edu.uci.ics.hyracks.storage.am.common.exceptions.TreeIndexDuplicateKeyException; import edu.uci.ics.hyracks.storage.am.common.exceptions.TreeIndexNonExistentKeyException; import edu.uci.ics.hyracks.storage.am.common.impls.NoOpOperationCallback; import edu.uci.ics.hyracks.storage.am.common.ophelpers.IndexOperation; import edu.uci.ics.hyracks.storage.am.common.tuples.PermutingFrameTupleReference; public class IndexInsertUpdateDeleteOperatorNodePushable extends AbstractUnaryInputUnaryOutputOperatorNodePushable { protected final IIndexOperatorDescriptor opDesc; protected final IHyracksTaskContext ctx; protected final IIndexDataflowHelper indexHelper; protected final IRecordDescriptorProvider recordDescProvider; protected final IndexOperation op; protected final PermutingFrameTupleReference tuple = new PermutingFrameTupleReference(); protected FrameTupleAccessor accessor; protected FrameTupleReference frameTuple; protected ByteBuffer writeBuffer; protected IIndexAccessor indexAccessor; protected ITupleFilter tupleFilter; protected IModificationOperationCallback modCallback; public IndexInsertUpdateDeleteOperatorNodePushable(IIndexOperatorDescriptor opDesc, IHyracksTaskContext ctx, int partition, int[] fieldPermutation, IRecordDescriptorProvider recordDescProvider, IndexOperation op) { this.opDesc = opDesc; this.ctx = ctx; this.indexHelper = opDesc.getIndexDataflowHelperFactory().createIndexDataflowHelper(opDesc, ctx, partition); this.recordDescProvider = recordDescProvider; this.op = op; tuple.setFieldPermutation(fieldPermutation); } @Override public void open() throws HyracksDataException { RecordDescriptor inputRecDesc = recordDescProvider.getInputRecordDescriptor(opDesc.getActivityId(), 0); accessor = new FrameTupleAccessor(ctx.getFrameSize(), inputRecDesc); writeBuffer = ctx.allocateFrame(); writer.open(); indexHelper.open(); IIndex index = indexHelper.getIndexInstance(); try { modCallback = opDesc.getModificationOpCallbackFactory().createModificationOperationCallback( indexHelper.getResourceID(), index, ctx); indexAccessor = index.createAccessor(modCallback, NoOpOperationCallback.INSTANCE); ITupleFilterFactory tupleFilterFactory = opDesc.getTupleFilterFactory(); if (tupleFilterFactory != null) { tupleFilter = tupleFilterFactory.createTupleFilter(indexHelper.getTaskContext()); frameTuple = new FrameTupleReference(); } } catch (Exception e) { indexHelper.close(); throw new HyracksDataException(e); } } @Override public void nextFrame(ByteBuffer buffer) throws HyracksDataException { accessor.reset(buffer); int tupleCount = accessor.getTupleCount(); for (int i = 0; i < tupleCount; i++) { try { if (tupleFilter != null) { frameTuple.reset(accessor, i); if (!tupleFilter.accept(frameTuple)) { continue; } } tuple.reset(accessor, i); switch (op) { case INSERT: { try { indexAccessor.insert(tuple); } catch (TreeIndexDuplicateKeyException e) { // ingnore that exception to allow inserting existing keys which becomes an NoOp } break; } case UPDATE: { indexAccessor.update(tuple); break; } case UPSERT: { indexAccessor.upsert(tuple); break; } case DELETE: { try { indexAccessor.delete(tuple); } catch (TreeIndexNonExistentKeyException e) { // ingnore that exception to allow deletions of non-existing keys } break; } default: { throw new HyracksDataException("Unsupported operation " + op + " in tree index InsertUpdateDelete operator"); } } } catch (HyracksDataException e) { throw e; } catch (Exception e) { throw new HyracksDataException(e); } } // Pass a copy of the frame to next op. System.arraycopy(buffer.array(), 0, writeBuffer.array(), 0, buffer.capacity()); FrameUtils.flushFrame(writeBuffer, writer); } @Override public void close() throws HyracksDataException { try { writer.close(); } finally { indexHelper.close(); } } @Override public void fail() throws HyracksDataException { writer.fail(); } }
apache-2.0
ind9/gocd
plugin-infra/go-plugin-access/src/main/java/com/thoughtworks/go/plugin/access/analytics/V2/AnalyticsMessageConverterV2.java
2678
/* * Copyright 2018 ThoughtWorks, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.thoughtworks.go.plugin.access.analytics.V2; import com.google.gson.Gson; import com.thoughtworks.go.plugin.access.analytics.AnalyticsMessageConverter; import com.thoughtworks.go.plugin.access.analytics.V2.models.Capabilities; import com.thoughtworks.go.plugin.access.common.models.ImageDeserializer; import com.thoughtworks.go.plugin.domain.analytics.AnalyticsData; import com.thoughtworks.go.plugin.domain.common.Image; import org.apache.commons.lang3.StringUtils; import java.util.HashMap; import java.util.Map; public class AnalyticsMessageConverterV2 implements AnalyticsMessageConverter { public static final String VERSION = "2.0"; private static final Gson GSON = new Gson(); public String getAnalyticsRequestBody(String type, String metricId, Map params) { Map<String, Object> requestMap = new HashMap<>(); requestMap.put("type", type); requestMap.put("id", metricId); requestMap.put("params", params); return GSON.toJson(requestMap); } @Override public com.thoughtworks.go.plugin.domain.analytics.Capabilities getCapabilitiesFromResponseBody(String responseBody) { return Capabilities.fromJSON(responseBody).toCapabilities(); } @Override public AnalyticsData getAnalyticsFromResponseBody(String responseBody) { com.thoughtworks.go.plugin.access.analytics.V2.models.AnalyticsData analyticsData = com.thoughtworks.go.plugin.access.analytics.V2.models.AnalyticsData.fromJSON(responseBody); analyticsData.validate(); return analyticsData.toAnalyticsData(); } @Override public String getStaticAssetsFromResponseBody(String responseBody) { String assets = (String) new Gson().fromJson(responseBody, Map.class).get("assets"); if (StringUtils.isBlank(assets)) { throw new RuntimeException("No assets defined!"); } return assets; } @Override public Image getImageFromResponseBody(String responseBody) { return new ImageDeserializer().fromJSON(responseBody); } }
apache-2.0
mirkosertic/Bytecoder
classlib/java.base/src/main/resources/META-INF/modules/java.base/classes/jdk/internal/perf/Perf.java
23708
/* * Copyright (c) 2002, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package jdk.internal.perf; import java.nio.ByteBuffer; import java.security.Permission; import java.security.PrivilegedAction; import java.io.IOException; import sun.nio.cs.UTF_8; import jdk.internal.ref.CleanerFactory; /** * The Perf class provides the ability to attach to an instrumentation * buffer maintained by a Java virtual machine. The instrumentation * buffer may be for the Java virtual machine running the methods of * this class or it may be for another Java virtual machine on the * same system. * <p> * In addition, this class provides methods to create instrumentation * objects in the instrumentation buffer for the Java virtual machine * that is running these methods. It also contains methods for acquiring * the value of a platform specific high resolution clock for time * stamp and interval measurement purposes. * * @author Brian Doherty * @since 1.4.2 * @see #getPerf * @see jdk.internal.perf.Perf.GetPerfAction * @see java.nio.ByteBuffer */ public final class Perf { private static Perf instance; private static final int PERF_MODE_RO = 0; private static final int PERF_MODE_RW = 1; private Perf() { } // prevent instantiation /** * The GetPerfAction class is a convenience class for acquiring access * to the singleton Perf instance using the * <code>AccessController.doPrivileged()</code> method. * <p> * An instance of this class can be used as the argument to * <code>AccessController.doPrivileged(PrivilegedAction)</code>. * <p> Here is a suggested idiom for use of this class: * * <blockquote><pre>{@code * class MyTrustedClass { * private static final Perf perf = * AccessController.doPrivileged(new Perf.GetPerfAction<Perf>()); * ... * } * }</pre></blockquote> * <p> * In the presence of a security manager, the <code>MyTrustedClass</code> * class in the above example will need to be granted the * <em>"sun.misc.Perf.getPerf"</em> <code>RuntimePermission</code> * permission in order to successfully acquire the singleton Perf instance. * <p> * Please note that the <em>"sun.misc.Perf.getPerf"</em> permission * is not a JDK specified permission. * * @see java.security.AccessController#doPrivileged(PrivilegedAction) * @see java.lang.RuntimePermission */ public static class GetPerfAction implements PrivilegedAction<Perf> { /** * Run the <code>Perf.getPerf()</code> method in a privileged context. * * @see #getPerf */ public Perf run() { return getPerf(); } } /** * Return a reference to the singleton Perf instance. * <p> * The getPerf() method returns the singleton instance of the Perf * class. The returned object provides the caller with the capability * for accessing the instrumentation buffer for this or another local * Java virtual machine. * <p> * If a security manager is installed, its <code>checkPermission</code> * method is called with a <code>RuntimePermission</code> with a target * of <em>"sun.misc.Perf.getPerf"</em>. A security exception will result * if the caller has not been granted this permission. * <p> * Access to the returned <code>Perf</code> object should be protected * by its caller and not passed on to untrusted code. This object can * be used to attach to the instrumentation buffer provided by this Java * virtual machine or for those of other Java virtual machines running * on the same system. The instrumentation buffer may contain senstitive * information. API's built on top of this interface may want to provide * finer grained access control to the contents of individual * instrumentation objects contained within the buffer. * <p> * Please note that the <em>"sun.misc.Perf.getPerf"</em> permission * is not a JDK specified permission. * * @return A reference to the singleton Perf instance. * @throws SecurityException if a security manager exists and its * <code>checkPermission</code> method doesn't allow access * to the <em>"jdk.internal.perf.Perf.getPerf""</em> target. * @see java.lang.RuntimePermission * @see #attach */ public static Perf getPerf() { @SuppressWarnings("removal") SecurityManager security = System.getSecurityManager(); if (security != null) { Permission perm = new RuntimePermission("jdk.internal.perf.Perf.getPerf"); security.checkPermission(perm); } return instance; } /** * Attach to the instrumentation buffer for the specified Java virtual * machine. * <p> * This method will attach to the instrumentation buffer for the * specified virtual machine. It returns a <code>ByteBuffer</code> object * that is initialized to access the instrumentation buffer for the * indicated Java virtual machine. The <code>lvmid</code> parameter is * a integer value that uniquely identifies the target local Java virtual * machine. It is typically, but not necessarily, the process id of * the target Java virtual machine. * <p> * If the <code>lvmid</code> identifies a Java virtual machine different * from the one running this method, then the coherency characteristics * of the buffer are implementation dependent. Implementations that do * not support named, coherent, shared memory may return a * <code>ByteBuffer</code> object that contains only a snap shot of the * data in the instrumentation buffer. Implementations that support named, * coherent, shared memory, may return a <code>ByteBuffer</code> object * that will be changing dynamically over time as the target Java virtual * machine updates its mapping of this buffer. * <p> * If the <code>lvmid</code> is 0 or equal to the actual <code>lvmid</code> * for the Java virtual machine running this method, then the returned * <code>ByteBuffer</code> object will always be coherent and dynamically * changing. * <p> * The attach mode specifies the access permissions requested for the * instrumentation buffer of the target virtual machine. The permitted * access permissions are: * <ul> * <li>"r" - Read only access. This Java virtual machine has only * read access to the instrumentation buffer for the target Java * virtual machine. * <li>"rw" - Read/Write access. This Java virtual machine has read and * write access to the instrumentation buffer for the target Java virtual * machine. This mode is currently not supported and is reserved for * future enhancements. * </ul> * * @param lvmid an integer that uniquely identifies the * target local Java virtual machine. * @param mode a string indicating the attach mode. * @return ByteBuffer a direct allocated byte buffer * @throws IllegalArgumentException The lvmid or mode was invalid. * @throws IOException An I/O error occurred while trying to acquire * the instrumentation buffer. * @throws OutOfMemoryError The instrumentation buffer could not be mapped * into the virtual machine's address space. * @see java.nio.ByteBuffer */ public ByteBuffer attach(int lvmid, String mode) throws IllegalArgumentException, IOException { if (mode.compareTo("r") == 0) { return attachImpl(null, lvmid, PERF_MODE_RO); } else if (mode.compareTo("rw") == 0) { return attachImpl(null, lvmid, PERF_MODE_RW); } else { throw new IllegalArgumentException("unknown mode"); } } /** * Attach to the instrumentation buffer for the specified Java virtual * machine owned by the given user. * <p> * This method behaves just as the <code>attach(int lvmid, String mode) * </code> method, except that it only searches for Java virtual machines * owned by the specified user. * * @param user A <code>String</code> object containing the * name of the user that owns the target Java * virtual machine. * @param lvmid an integer that uniquely identifies the * target local Java virtual machine. * @param mode a string indicating the attach mode. * @return ByteBuffer a direct allocated byte buffer * @throws IllegalArgumentException The lvmid or mode was invalid. * @throws IOException An I/O error occurred while trying to acquire * the instrumentation buffer. * @throws OutOfMemoryError The instrumentation buffer could not be mapped * into the virtual machine's address space. * @see java.nio.ByteBuffer */ public ByteBuffer attach(String user, int lvmid, String mode) throws IllegalArgumentException, IOException { if (mode.compareTo("r") == 0) { return attachImpl(user, lvmid, PERF_MODE_RO); } else if (mode.compareTo("rw") == 0) { return attachImpl(user, lvmid, PERF_MODE_RW); } else { throw new IllegalArgumentException("unknown mode"); } } /** * Call the implementation specific attach method. * <p> * This method calls into the Java virtual machine to perform the platform * specific attach method. Buffers returned from this method are * internally managed as <code>PhantomRefereces</code> to provide for * guaranteed, secure release of the native resources. * * @param user A <code>String</code> object containing the * name of the user that owns the target Java * virtual machine. * @param lvmid an integer that uniquely identifies the * target local Java virtual machine. * @param mode a string indicating the attach mode. * @return ByteBuffer a direct allocated byte buffer * @throws IllegalArgumentException The lvmid or mode was invalid. * @throws IOException An I/O error occurred while trying to acquire * the instrumentation buffer. * @throws OutOfMemoryError The instrumentation buffer could not be mapped * into the virtual machine's address space. */ private ByteBuffer attachImpl(String user, int lvmid, int mode) throws IllegalArgumentException, IOException { final ByteBuffer b = attach(user, lvmid, mode); if (lvmid == 0) { // The native instrumentation buffer for this Java virtual // machine is never unmapped. return b; } else { // This is an instrumentation buffer for another Java virtual // machine with native resources that need to be managed. We // create a duplicate of the native ByteBuffer and manage it // with a Cleaner. When the duplicate becomes phantom reachable, // the native resources will be released. final ByteBuffer dup = b.duplicate(); CleanerFactory.cleaner() .register(dup, new CleanerAction(instance, b)); return dup; } } private static class CleanerAction implements Runnable { private final ByteBuffer bb; private final Perf perf; CleanerAction(Perf perf, ByteBuffer bb) { this.perf = perf; this.bb = bb; } public void run() { try { perf.detach(bb); } catch (Throwable th) { // avoid crashing the reference handler thread, // but provide for some diagnosability assert false : th.toString(); } } } /** * Native method to perform the implementation specific attach mechanism. * <p> * The implementation of this method may return distinct or identical * <code>ByteBuffer</code> objects for two distinct calls requesting * attachment to the same Java virtual machine. * <p> * For the Sun HotSpot JVM, two distinct calls to attach to the same * target Java virtual machine will result in two distinct ByteBuffer * objects returned by this method. This may change in a future release. * * @param user A <code>String</code> object containing the * name of the user that owns the target Java * virtual machine. * @param lvmid an integer that uniquely identifies the * target local Java virtual machine. * @param mode a string indicating the attach mode. * @return ByteBuffer a direct allocated byte buffer * @throws IllegalArgumentException The lvmid or mode was invalid. * @throws IOException An I/O error occurred while trying to acquire * the instrumentation buffer. * @throws OutOfMemoryError The instrumentation buffer could not be mapped * into the virtual machine's address space. */ private native ByteBuffer attach(String user, int lvmid, int mode) throws IllegalArgumentException, IOException; /** * Native method to perform the implementation specific detach mechanism. * <p> * If this method is passed a <code>ByteBuffer</code> object that is * not created by the <code>attach</code> method, then the results of * this method are undefined, with unpredictable and potentially damaging * effects to the Java virtual machine. To prevent accidental or malicious * use of this method, all native ByteBuffer created by the <code> * attach</code> method are managed internally as PhantomReferences * and resources are freed by the system. * <p> * If this method is passed a <code>ByteBuffer</code> object created * by the <code>attach</code> method with a lvmid for the Java virtual * machine running this method (lvmid=0, for example), then the detach * request is silently ignored. * * @param bb A direct allocated byte buffer created by the * <code>attach</code> method. * @see java.nio.ByteBuffer * @see #attach */ private native void detach(ByteBuffer bb); /** * Create a <code>long</code> scalar entry in the instrumentation buffer * with the given variability characteristic, units, and initial value. * <p> * Access to the instrument is provided through the returned <code> * ByteBuffer</code> object. Typically, this object should be wrapped * with <code>LongBuffer</code> view object. * * @param variability the variability characteristic for this entry. * @param units the units for this entry. * @param name the name of this entry. * @param value the initial value for this entry. * @return ByteBuffer a direct allocated ByteBuffer object that * allows write access to a native memory location * containing a <code>long</code> value. * * see sun.misc.perf.Variability * see sun.misc.perf.Units * @see java.nio.ByteBuffer */ public native ByteBuffer createLong(String name, int variability, int units, long value); /** * Create a <code>String</code> entry in the instrumentation buffer with * the given variability characteristic, units, and initial value. * <p> * The maximum length of the <code>String</code> stored in this string * instrument is given in by <code>maxLength</code> parameter. Updates * to this instrument with <code>String</code> values with lengths greater * than <code>maxLength</code> will be truncated to <code>maxLength</code>. * The truncated value will be terminated by a null character. * <p> * The underlying implementation may further limit the length of the * value, but will continue to preserve the null terminator. * <p> * Access to the instrument is provided through the returned <code> * ByteBuffer</code> object. * * @param variability the variability characteristic for this entry. * @param units the units for this entry. * @param name the name of this entry. * @param value the initial value for this entry. * @param maxLength the maximum string length for this string * instrument. * @return ByteBuffer a direct allocated ByteBuffer that allows * write access to a native memory location * containing a <code>long</code> value. * * see sun.misc.perf.Variability * see sun.misc.perf.Units * @see java.nio.ByteBuffer */ public ByteBuffer createString(String name, int variability, int units, String value, int maxLength) { byte[] v = value.getBytes(UTF_8.INSTANCE); byte[] v1 = new byte[v.length+1]; System.arraycopy(v, 0, v1, 0, v.length); v1[v.length] = '\0'; return createByteArray(name, variability, units, v1, Math.max(v1.length, maxLength)); } /** * Create a <code>String</code> entry in the instrumentation buffer with * the given variability characteristic, units, and initial value. * <p> * The maximum length of the <code>String</code> stored in this string * instrument is implied by the length of the <code>value</code> parameter. * Subsequent updates to the value of this instrument will be truncated * to this implied maximum length. The truncated value will be terminated * by a null character. * <p> * The underlying implementation may further limit the length of the * initial or subsequent value, but will continue to preserve the null * terminator. * <p> * Access to the instrument is provided through the returned <code> * ByteBuffer</code> object. * * @param variability the variability characteristic for this entry. * @param units the units for this entry. * @param name the name of this entry. * @param value the initial value for this entry. * @return ByteBuffer a direct allocated ByteBuffer that allows * write access to a native memory location * containing a <code>long</code> value. * * see sun.misc.perf.Variability * see sun.misc.perf.Units * @see java.nio.ByteBuffer */ public ByteBuffer createString(String name, int variability, int units, String value) { byte[] v = value.getBytes(UTF_8.INSTANCE); byte[] v1 = new byte[v.length+1]; System.arraycopy(v, 0, v1, 0, v.length); v1[v.length] = '\0'; return createByteArray(name, variability, units, v1, v1.length); } /** * Create a <code>byte</code> vector entry in the instrumentation buffer * with the given variability characteristic, units, and initial value. * <p> * The <code>maxLength</code> parameter limits the size of the byte * array instrument such that the initial or subsequent updates beyond * this length are silently ignored. No special handling of truncated * updates is provided. * <p> * The underlying implementation may further limit the length of the * length of the initial or subsequent value. * <p> * Access to the instrument is provided through the returned <code> * ByteBuffer</code> object. * * @param variability the variability characteristic for this entry. * @param units the units for this entry. * @param name the name of this entry. * @param value the initial value for this entry. * @param maxLength the maximum length of this byte array. * @return ByteBuffer a direct allocated byte buffer that allows * write access to a native memory location * containing a <code>long</code> value. * * see sun.misc.perf.Variability * see sun.misc.perf.Units * @see java.nio.ByteBuffer */ public native ByteBuffer createByteArray(String name, int variability, int units, byte[] value, int maxLength); /** * Return the value of the High Resolution Counter. * * The High Resolution Counter returns the number of ticks since * since the start of the Java virtual machine. The resolution of * the counter is machine dependent and can be determined from the * value return by the {@link #highResFrequency} method. * * @return the number of ticks of machine dependent resolution since * the start of the Java virtual machine. * * @see #highResFrequency * @see java.lang.System#currentTimeMillis() */ public native long highResCounter(); /** * Returns the frequency of the High Resolution Counter, in ticks per * second. * * This value can be used to convert the value of the High Resolution * Counter, as returned from a call to the {@link #highResCounter} method, * into the number of seconds since the start of the Java virtual machine. * * @return the frequency of the High Resolution Counter. * @see #highResCounter */ public native long highResFrequency(); private static native void registerNatives(); static { registerNatives(); instance = new Perf(); } }
apache-2.0
lukhnos/j2objc
jre_emul/android/platform/external/icu/android_icu4j/src/main/java/android/icu/util/CopticCalendar.java
9464
/* GENERATED SOURCE. DO NOT MODIFY. */ // © 2016 and later: Unicode, Inc. and others. // License & terms of use: http://www.unicode.org/copyright.html#License /* ******************************************************************************* * Copyright (C) 2005-2016, International Business Machines Corporation and * * others. All Rights Reserved. * ******************************************************************************* */ package android.icu.util; import java.util.Date; import java.util.Locale; /** * Implement the Coptic calendar system. * <p> * CopticCalendar usually should be instantiated using * {@link android.icu.util.Calendar#getInstance(ULocale)} passing in a <code>ULocale</code> * with the tag <code>"@calendar=coptic"</code>.</p> * * @see android.icu.util.Calendar */ public final class CopticCalendar extends CECalendar { // jdk1.4.2 serialver private static final long serialVersionUID = 5903818751846742911L; /** * Constant for ωογτ / تﻮﺗ, * the 1st month of the Coptic year. */ public static final int TOUT = 0; /** * Constant for Παοπι / ﻪﺑﺎﺑ, * the 2nd month of the Coptic year. */ public static final int BABA = 1; /** * Constant for Αθορ / رﻮﺗﺎﻫ, * the 3rd month of the Coptic year. */ public static final int HATOR = 2; /** * Constant for Χοιακ / ﻚﻬﻴﻛ;, * the 4th month of the Coptic year. */ public static final int KIAHK = 3; /** * Constant for Τωβι / طﻮﺒﻫ, * the 5th month of the Coptic year. */ public static final int TOBA = 4; /** * Constant for Μεϣιρ / ﺮﻴﺸﻣأ, * the 6th month of the Coptic year. */ public static final int AMSHIR = 5; /** * Constant for Παρεμϩατ / تﺎﻬﻣﺮﺑ, * the 7th month of the Coptic year. */ public static final int BARAMHAT = 6; /** * Constant for Φαρμοθι / هدﻮﻣﺮﺑ, * the 8th month of the Coptic year. */ public static final int BARAMOUDA = 7; /** * Constant for Παϣαν / ﺲﻨﺸﺑ;, * the 9th month of the Coptic year. */ public static final int BASHANS = 8; /** * Constant for Παωνι / ﻪﻧؤﻮﺑ, * the 10th month of the Coptic year. */ public static final int PAONA = 9; /** * Constant for Επηπ / ﺐﻴﺑأ, * the 11th month of the Coptic year. */ public static final int EPEP = 10; /** * Constant for Μεϲωρη / ىﺮﺴﻣ, * the 12th month of the Coptic year. */ public static final int MESRA = 11; /** * Constant for Πικογϫι μαβοτ / ﺮﻴﻐﺼﻟاﺮﻬﺸﻟا, * the 13th month of the Coptic year. */ public static final int NASIE = 12; private static final int JD_EPOCH_OFFSET = 1824665; // Eras private static final int BCE = 0; private static final int CE = 1; /** * Constructs a default <code>CopticCalendar</code> using the current time * in the default time zone with the default locale. */ public CopticCalendar() { super(); } /** * Constructs a <code>CopticCalendar</code> based on the current time * in the given time zone with the default locale. * * @param zone The time zone for the new calendar. */ public CopticCalendar(TimeZone zone) { super(zone); } /** * Constructs a <code>CopticCalendar</code> based on the current time * in the default time zone with the given locale. * * @param aLocale The locale for the new calendar. */ public CopticCalendar(Locale aLocale) { super(aLocale); } /** * Constructs a <code>CopticCalendar</code> based on the current time * in the default time zone with the given locale. * * @param locale The icu locale for the new calendar. */ public CopticCalendar(ULocale locale) { super(locale); } /** * Constructs a <code>CopticCalendar</code> based on the current time * in the given time zone with the given locale. * * @param zone The time zone for the new calendar. * @param aLocale The locale for the new calendar. */ public CopticCalendar(TimeZone zone, Locale aLocale) { super(zone, aLocale); } /** * Constructs a <code>CopticCalendar</code> based on the current time * in the given time zone with the given locale. * * @param zone The time zone for the new calendar. * @param locale The icu locale for the new calendar. */ public CopticCalendar(TimeZone zone, ULocale locale) { super(zone, locale); } /** * Constructs a <code>CopticCalendar</code> with the given date set * in the default time zone with the default locale. * * @param year The value used to set the calendar's {@link #YEAR YEAR} time field. * @param month The value used to set the calendar's {@link #MONTH MONTH} time field. * The value is 0-based. e.g., 0 for Tout. * @param date The value used to set the calendar's {@link #DATE DATE} time field. */ public CopticCalendar(int year, int month, int date) { super(year, month, date); } /** * Constructs a <code>CopticCalendar</code> with the given date set * in the default time zone with the default locale. * * @param date The date to which the new calendar is set. */ public CopticCalendar(Date date) { super(date); } /** * Constructs a <code>CopticCalendar</code> with the given date * and time set for the default time zone with the default locale. * * @param year The value used to set the calendar's {@link #YEAR YEAR} time field. * @param month The value used to set the calendar's {@link #MONTH MONTH} time field. * The value is 0-based. e.g., 0 for Tout. * @param date The value used to set the calendar's {@link #DATE DATE} time field. * @param hour The value used to set the calendar's {@link #HOUR_OF_DAY HOUR_OF_DAY} time field. * @param minute The value used to set the calendar's {@link #MINUTE MINUTE} time field. * @param second The value used to set the calendar's {@link #SECOND SECOND} time field. */ public CopticCalendar(int year, int month, int date, int hour, int minute, int second) { super(year, month, date, hour, minute, second); } /** * {@inheritDoc} */ public String getType() { return "coptic"; } /** * {@inheritDoc} * @deprecated This API is ICU internal only. * @hide original deprecated declaration * @hide draft / provisional / internal are hidden on Android */ @Deprecated protected int handleGetExtendedYear() { int eyear; if (newerField(EXTENDED_YEAR, YEAR) == EXTENDED_YEAR) { eyear = internalGet(EXTENDED_YEAR, 1); // Default to year 1 } else { // The year defaults to the epoch start, the era to AD int era = internalGet(ERA, CE); if (era == BCE) { eyear = 1 - internalGet(YEAR, 1); // Convert to extended year } else { eyear = internalGet(YEAR, 1); // Default to year 1 } } return eyear; } /** * {@inheritDoc} * @deprecated This API is ICU internal only. * @hide original deprecated declaration * @hide draft / provisional / internal are hidden on Android */ @Deprecated protected void handleComputeFields(int julianDay) { int era, year; int[] fields = new int[3]; jdToCE(julianDay, getJDEpochOffset(), fields); // fields[0] eyear // fields[1] month // fields[2] day if (fields[0] <= 0) { era = BCE; year = 1 - fields[0]; } else { era = CE; year = fields[0]; } internalSet(EXTENDED_YEAR, fields[0]); internalSet(ERA, era); internalSet(YEAR, year); internalSet(MONTH, fields[1]); internalSet(DAY_OF_MONTH, fields[2]); internalSet(DAY_OF_YEAR, (30 * fields[1]) + fields[2]); } /** * {@inheritDoc} * @deprecated This API is ICU internal only. * @hide original deprecated declaration * @hide draft / provisional / internal are hidden on Android */ @Deprecated protected int getJDEpochOffset() { return JD_EPOCH_OFFSET; } /** * Convert an Coptic year, month, and day to a Julian day. * * @param year the year * @param month the month * @param date the day * @hide draft / provisional / internal are hidden on Android */ // The equivalent operation can be done by public Calendar API. // This API was accidentally marked as @draft, but we have no good // reason to keep this. For now, we leave it as is, but may be // removed in future. 2008-03-21 yoshito public static int copticToJD(long year, int month, int date) { return ceToJD(year, month, date, JD_EPOCH_OFFSET); } }
apache-2.0
WilliamDo/ignite
modules/core/src/test/java/org/apache/ignite/internal/processors/database/IgniteDbMemoryLeakAbstractTest.java
7609
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.internal.processors.database; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import org.apache.ignite.IgniteCache; import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.processors.cache.IgniteCacheProxy; import org.apache.ignite.internal.processors.cache.persistence.DataStructure; import static org.apache.ignite.IgniteSystemProperties.getInteger; /** * Base class for memory leaks tests. */ public abstract class IgniteDbMemoryLeakAbstractTest extends IgniteDbAbstractTest { /** */ private static final int CONCURRENCY_LEVEL = 16; /** */ private static final int MIN_PAGE_CACHE_SIZE = 1048576 * CONCURRENCY_LEVEL; /** */ private volatile Exception ex; /** */ private long warmUpEndTime; /** */ private long endTime; /** */ private long loadedPages; /** */ private long delta; /** */ private long probeCnt; /** {@inheritDoc} */ @Override protected void beforeTest() throws Exception { super.beforeTest(); DataStructure.rnd = null; long startTime = System.nanoTime(); warmUpEndTime = startTime + TimeUnit.SECONDS.toNanos(warmUp()); endTime = warmUpEndTime + TimeUnit.SECONDS.toNanos(duration()); } /** {@inheritDoc} */ @Override protected void configure(IgniteConfiguration cfg) { cfg.setMetricsLogFrequency(5000); } /** {@inheritDoc} */ @Override protected void configure(DataStorageConfiguration mCfg) { mCfg.setConcurrencyLevel(CONCURRENCY_LEVEL); long size = (1024 * (isLargePage() ? 16 : 1) + 24) * pagesMax(); mCfg.setDefaultDataRegionConfiguration( new DataRegionConfiguration().setMaxSize(Math.max(size, MIN_PAGE_CACHE_SIZE)).setName("default")); } /** * @return Test duration in seconds. */ protected int duration() { return getInteger("IGNITE_MEMORY_LEAKS_TEST_DURATION", 300); } /** * @return Warm up duration in seconds. */ @SuppressWarnings("WeakerAccess") protected int warmUp() { return getInteger("IGNITE_MEMORY_LEAKS_TEST_WARM_UP", 450); } /** {@inheritDoc} */ @Override protected int gridCount() { return 1; } /** {@inheritDoc} */ @Override protected boolean indexingEnabled() { return false; } /** {@inheritDoc} */ @Override protected long getTestTimeout() { return (warmUp() + duration() + 10) * 1000; // Extra seconds to stop all threads. } /** * @param ig Ignite instance. * @return IgniteCache. */ protected abstract IgniteCache<Object, Object> cache(IgniteEx ig); /** * @return Cache key to perform an operation. */ protected abstract Object key(); /** * @param key Cache key to perform an operation. * @return Cache value to perform an operation. */ protected abstract Object value(Object key); /** * @param cache IgniteCache. */ protected void operation(IgniteCache<Object, Object> cache) { Object key = key(); Object val = value(key); switch (nextInt(3)) { case 0: cache.getAndPut(key, val); break; case 1: cache.get(key); break; case 2: cache.getAndRemove(key); } } /** * @param bound Upper bound (exclusive). Must be positive. * @return Random int value. */ protected static int nextInt(int bound) { return ThreadLocalRandom.current().nextInt(bound); } /** * @return Random int value. */ protected static int nextInt() { return ThreadLocalRandom.current().nextInt(); } /** * @throws Exception If failed. */ public void testMemoryLeak() throws Exception { final IgniteEx ignite = grid(0); final IgniteCache<Object, Object> cache = cache(ignite); Runnable target = new Runnable() { @Override public void run() { while (ex == null && System.nanoTime() < endTime) { try { operation(cache); } catch (Exception e) { ex = e; break; } } } }; Thread[] threads = new Thread[CONCURRENCY_LEVEL]; info("Warming up is started."); for (int i = 0; i < threads.length; i++) { threads[i] = new Thread(target); threads[i].start(); } while (ex == null && System.nanoTime() < warmUpEndTime) Thread.sleep(100); if (ex != null) throw ex; info("Warming up is ended."); while (ex == null && System.nanoTime() < endTime) { try { check(cache); } catch (Exception e) { ex = e; break; } Thread.sleep(TimeUnit.SECONDS.toMillis(5)); } if (ex != null) throw ex; } /** * Callback to check the current state. * * @param cache Cache instance. * @throws Exception If failed. */ protected final void check(IgniteCache cache) throws Exception { long pagesActual = ((IgniteCacheProxy)cache).context().dataRegion().pageMemory().loadedPages(); if (loadedPages > 0) { delta += pagesActual - loadedPages; int allowedDelta = pagesDelta(); if (probeCnt++ > 12) { // We need some statistic first. Minimal statistic is taken for a minute. long actualDelta = delta / probeCnt; assertTrue( "Average growth pages in the number is more than expected [allowed=" + allowedDelta + ", actual=" + actualDelta + "]", actualDelta <= allowedDelta); } } long pagesAllowed = pagesMax(); assertTrue("Allocated pages count is more than expected [allowed=" + pagesAllowed + ", actual=" + pagesActual + "]", pagesActual < pagesAllowed); loadedPages = pagesActual; } /** * @return Maximal allowed pages number. */ protected abstract long pagesMax(); /** * @return Expected average number of pages, on which their total number can grow per 5 seconds. */ @SuppressWarnings("WeakerAccess") protected int pagesDelta() { return 3; } }
apache-2.0
KidEinstein/giraph
giraph-core/src/main/java/org/apache/giraph/types/ops/collections/array/WArrayList.java
2368
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.giraph.types.ops.collections.array; import org.apache.giraph.types.ops.collections.WCollection; /** * Array list over mutable elements, which are probably * internally stored differently/efficiently, and are accessed * through methods providing "return" value. * * @param <T> Element type */ public interface WArrayList<T> extends WCollection<T> { /** * Sets the size of this * * <P> * If the specified size is smaller than the current size, * the last elements are discarded. * Otherwise, they are filled with 0/<code>null</code>/<code>false</code>. * * @param newSize the new size. */ void size(int newSize); /** * Trims this array list so that the capacity is equal to the size. * * @see java.util.ArrayList#trimToSize() */ void trim(); /** * Pop value from the end of the array, storing it into 'to' argument * @param to Object to store value into */ void popIntoW(T to); /** * Get element at given index in the array, storing it into 'to' argument * @param index Index * @param to Object to store value into */ void getIntoW(int index, T to); /** * Set element at given index in the array * @param index Index * @param value Value */ void setW(int index, T value); /** * Sets given range of elements to a specified value. * * @param from From index (inclusive) * @param to To index (exclusive) * @param value Value */ void fillW(int from, int to, T value); /** Sort the array in ascending order */ void sort(); }
apache-2.0
hugosato/apache-axis
src/javax/xml/rpc/holders/ByteHolder.java
1202
/* * Copyright 2001-2004 The Apache Software Foundation. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package javax.xml.rpc.holders; /** * Holder for <code>byte</code>s. * * @version 1.0 */ public final class ByteHolder implements Holder { /** The <code>byte</code> contained by this holder. */ public byte value; /** * Make a new <code>ByteHolder</code> with a <code>null</code> value. */ public ByteHolder() {} /** * Make a new <code>ByteHolder</code> with <code>value</code> as * the value. * * @param value the <code>byte</code> to hold */ public ByteHolder(byte value) { this.value = value; } }
apache-2.0
dignwei/language-detector
src/main/java/com/cybozu/labs/langdetect/util/LangProfile.java
5939
/* * Copyright 2011 Nakatani Shuyo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * This file has been modified by François ROLAND. */ package com.cybozu.labs.langdetect.util; import org.jetbrains.annotations.NotNull; import java.io.Serializable; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Set; /** * {@link LangProfile} is a Language Profile Class. * Users don't use this class directly. * * TODO split into builder and immutable class. * * TODO currently this only makes n-grams with the space before a word included. no n-gram with the space after the word. * Example: "foo" creates " fo" as 3gram, but not "oo ". Either this is a bug, or if intended then needs documentation. * * @author Nakatani Shuyo * @deprecated replaced by LanguageProfile */ @Deprecated public class LangProfile implements Serializable { private static final long serialVersionUID = 1L; /** * n-grams that occur less than this often can be removed using omitLessFreq(). * This number can change, see LESS_FREQ_RATIO. */ private static final int MINIMUM_FREQ = 2; /** * Explanation by example: * * If the most frequent n-gram occurs 1 mio times, then * 1'000'000 / this (100'000) = 10. * 10 is larger than MINIMUM_FREQ (2), thus MINIMUM_FREQ remains at 2. * All n-grams that occur less than 2 times can be removed as noise using omitLessFreq(). * * If the most frequent n-gram occurs 5000 times, then * 5'000 / this (100'000) = 0.05. * 0.05 is smaller than MINIMUM_FREQ (2), thus MINIMUM_FREQ becomes 0. * No n-grams are removed because of insignificance when calling omitLessFreq(). */ private static final int LESS_FREQ_RATIO = 100000; /** * The language name (identifier). */ private String name = null; /** * Key = ngram, value = count. * All n-grams are in here (1-gram, 2-gram, 3-gram). */ private Map<String, Integer> freq = new HashMap<>(); /** * Tells how many occurrences of n-grams exist per gram length. * When making 1grams, 2grams and 3grams (currently) then this contains 3 entries where * element 0 = number occurrences of 1-grams * element 1 = number occurrences of 2-grams * element 2 = number occurrences of 3-grams * Example: if there are 57 1-grams (English language has about that many) and the training text is * fairly long, then this number is in the millions. */ private int[] nWords = new int[NGram.N_GRAM]; /** * Constructor for JSONIC */ public LangProfile() {} /** * Normal Constructor * @param name language name */ public LangProfile(String name) { this.setName(name); } /** * Add n-gram to profile * @param gram */ public void add(@NotNull String gram) { if (name == null) throw new IllegalStateException(); int len = gram.length(); if (len < 1 || len > NGram.N_GRAM) { throw new IllegalArgumentException("ngram length must be 1-3 but was "+len+": >>>"+gram+"<<<!"); } nWords[len - 1]++; if (freq.containsKey(gram)) { freq.put(gram, freq.get(gram) + 1); } else { freq.put(gram, 1); } } /** * Removes ngrams that occur fewer times than MINIMUM_FREQ to get rid of rare ngrams. * * Also removes ascii ngrams if the total number of ascii ngrams is less than one third of the total. * This is done because non-latin text (such as Chinese) often has some latin noise in between. * * TODO split the 2 cleaning to separate methods. * TODO distinguish ascii/latin, currently it looks for latin only, should include characters with diacritics, eg Vietnamese. * TODO current code counts ascii, but removes any latin. is that desired? if so then this needs documentation. */ public void omitLessFreq() { if (name == null) throw new IllegalStateException(); int threshold = nWords[0] / LESS_FREQ_RATIO; if (threshold < MINIMUM_FREQ) threshold = MINIMUM_FREQ; Set<String> keys = freq.keySet(); int roman = 0; for(Iterator<String> i = keys.iterator(); i.hasNext(); ){ String key = i.next(); int count = freq.get(key); if (count <= threshold) { nWords[key.length()-1] -= count; i.remove(); } else { if (key.matches("^[A-Za-z]$")) { roman += count; } } } // roman check if (roman < nWords[0] / 3) { Set<String> keys2 = freq.keySet(); for(Iterator<String> i = keys2.iterator(); i.hasNext(); ){ String key = i.next(); if (key.matches(".*[A-Za-z].*")) { nWords[key.length()-1] -= freq.get(key); i.remove(); } } } } public String getName() { return name; } public void setName(String name) { this.name = name; } public Map<String, Integer> getFreq() { return freq; } public void setFreq(Map<String, Integer> freq) { this.freq = freq; } public int[] getNWords() { return nWords; } public void setNWords(int[] nWords) { this.nWords = nWords; } }
apache-2.0
robin13/elasticsearch
server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java
55245
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License * 2.0 and the Server Side Public License, v 1; you may not use this file except * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.index.Index; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.search.profile.SearchProfileShardResults; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.function.BiFunction; import java.util.function.Function; import java.util.function.LongSupplier; import java.util.stream.Collectors; import java.util.stream.StreamSupport; import static org.elasticsearch.action.search.SearchType.DFS_QUERY_THEN_FETCH; import static org.elasticsearch.action.search.SearchType.QUERY_THEN_FETCH; import static org.elasticsearch.search.sort.FieldSortBuilder.hasPrimaryFieldSort; public class TransportSearchAction extends HandledTransportAction<SearchRequest, SearchResponse> { /** The maximum number of shards for a single search request. */ public static final Setting<Long> SHARD_COUNT_LIMIT_SETTING = Setting.longSetting( "action.search.shard_count.limit", Long.MAX_VALUE, 1L, Property.Dynamic, Property.NodeScope); private final ThreadPool threadPool; private final ClusterService clusterService; private final SearchTransportService searchTransportService; private final RemoteClusterService remoteClusterService; private final SearchPhaseController searchPhaseController; private final SearchService searchService; private final IndexNameExpressionResolver indexNameExpressionResolver; private final NamedWriteableRegistry namedWriteableRegistry; private final CircuitBreaker circuitBreaker; @Inject public TransportSearchAction(ThreadPool threadPool, CircuitBreakerService circuitBreakerService, TransportService transportService, SearchService searchService, SearchTransportService searchTransportService, SearchPhaseController searchPhaseController, ClusterService clusterService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, NamedWriteableRegistry namedWriteableRegistry) { super(SearchAction.NAME, transportService, actionFilters, (Writeable.Reader<SearchRequest>) SearchRequest::new); this.threadPool = threadPool; this.circuitBreaker = circuitBreakerService.getBreaker(CircuitBreaker.REQUEST); this.searchPhaseController = searchPhaseController; this.searchTransportService = searchTransportService; this.remoteClusterService = searchTransportService.getRemoteClusterService(); SearchTransportService.registerRequestHandler(transportService, searchService); this.clusterService = clusterService; this.searchService = searchService; this.indexNameExpressionResolver = indexNameExpressionResolver; this.namedWriteableRegistry = namedWriteableRegistry; } private Map<String, AliasFilter> buildPerIndexAliasFilter(SearchRequest request, ClusterState clusterState, Index[] concreteIndices, Map<String, AliasFilter> remoteAliasMap) { final Map<String, AliasFilter> aliasFilterMap = new HashMap<>(); final Set<String> indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, request.indices()); for (Index index : concreteIndices) { clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index.getName()); AliasFilter aliasFilter = searchService.buildAliasFilter(clusterState, index.getName(), indicesAndAliases); assert aliasFilter != null; aliasFilterMap.put(index.getUUID(), aliasFilter); } aliasFilterMap.putAll(remoteAliasMap); return aliasFilterMap; } private Map<String, Float> resolveIndexBoosts(SearchRequest searchRequest, ClusterState clusterState) { if (searchRequest.source() == null) { return Collections.emptyMap(); } SearchSourceBuilder source = searchRequest.source(); if (source.indexBoosts() == null) { return Collections.emptyMap(); } Map<String, Float> concreteIndexBoosts = new HashMap<>(); for (SearchSourceBuilder.IndexBoost ib : source.indexBoosts()) { Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest.indicesOptions(), ib.getIndex()); for (Index concreteIndex : concreteIndices) { concreteIndexBoosts.putIfAbsent(concreteIndex.getUUID(), ib.getBoost()); } } return Collections.unmodifiableMap(concreteIndexBoosts); } /** * Search operations need two clocks. One clock is to fulfill real clock needs (e.g., resolving * "now" to an index name). Another clock is needed for measuring how long a search operation * took. These two uses are at odds with each other. There are many issues with using a real * clock for measuring how long an operation took (they often lack precision, they are subject * to moving backwards due to NTP and other such complexities, etc.). There are also issues with * using a relative clock for reporting real time. Thus, we simply separate these two uses. */ static final class SearchTimeProvider { private final long absoluteStartMillis; private final long relativeStartNanos; private final LongSupplier relativeCurrentNanosProvider; /** * Instantiates a new search time provider. The absolute start time is the real clock time * used for resolving index expressions that include dates. The relative start time is the * start of the search operation according to a relative clock. The total time the search * operation took can be measured against the provided relative clock and the relative start * time. * * @param absoluteStartMillis the absolute start time in milliseconds since the epoch * @param relativeStartNanos the relative start time in nanoseconds * @param relativeCurrentNanosProvider provides the current relative time */ SearchTimeProvider( final long absoluteStartMillis, final long relativeStartNanos, final LongSupplier relativeCurrentNanosProvider) { this.absoluteStartMillis = absoluteStartMillis; this.relativeStartNanos = relativeStartNanos; this.relativeCurrentNanosProvider = relativeCurrentNanosProvider; } long getAbsoluteStartMillis() { return absoluteStartMillis; } long buildTookInMillis() { return TimeUnit.NANOSECONDS.toMillis(relativeCurrentNanosProvider.getAsLong() - relativeStartNanos); } } @Override protected void doExecute(Task task, SearchRequest searchRequest, ActionListener<SearchResponse> listener) { executeRequest(task, searchRequest, this::searchAsyncAction, listener); } public interface SinglePhaseSearchAction { void executeOnShardTarget(SearchTask searchTask, SearchShardTarget target, Transport.Connection connection, ActionListener<SearchPhaseResult> listener); } public void executeRequest(Task task, SearchRequest searchRequest, String actionName, boolean includeSearchContext, SinglePhaseSearchAction phaseSearchAction, ActionListener<SearchResponse> listener) { executeRequest(task, searchRequest, new SearchAsyncActionProvider() { @Override public AbstractSearchAsyncAction<? extends SearchPhaseResult> asyncSearchAction( SearchTask task, SearchRequest searchRequest, Executor executor, GroupShardsIterator<SearchShardIterator> shardsIts, SearchTimeProvider timeProvider, BiFunction<String, String, Transport.Connection> connectionLookup, ClusterState clusterState, Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts, ActionListener<SearchResponse> listener, boolean preFilter, ThreadPool threadPool, SearchResponse.Clusters clusters) { return new AbstractSearchAsyncAction<>( actionName, logger, searchTransportService, connectionLookup, aliasFilter, concreteIndexBoosts, executor, searchRequest, listener, shardsIts, timeProvider, clusterState, task, new ArraySearchPhaseResults<>(shardsIts.size()), 1, clusters) { @Override protected void executePhaseOnShard(SearchShardIterator shardIt, SearchShardTarget shard, SearchActionListener<SearchPhaseResult> listener) { final Transport.Connection connection = getConnection(shard.getClusterAlias(), shard.getNodeId()); phaseSearchAction.executeOnShardTarget(task, shard, connection, listener); } @Override protected SearchPhase getNextPhase(SearchPhaseResults<SearchPhaseResult> results, SearchPhaseContext context) { return new SearchPhase(getName()) { @Override public void run() { final AtomicArray<SearchPhaseResult> atomicArray = results.getAtomicArray(); sendSearchResponse(InternalSearchResponse.empty(), atomicArray); } }; } @Override boolean buildPointInTimeFromSearchResults() { return includeSearchContext; } }; } }, listener); } private void executeRequest(Task task, SearchRequest original, SearchAsyncActionProvider searchAsyncActionProvider, ActionListener<SearchResponse> listener) { final long relativeStartNanos = System.nanoTime(); final SearchTimeProvider timeProvider = new SearchTimeProvider(original.getOrCreateAbsoluteStartMillis(), relativeStartNanos, System::nanoTime); ActionListener<SearchRequest> rewriteListener = ActionListener.wrap(rewritten -> { final SearchContextId searchContext; final Map<String, OriginalIndices> remoteClusterIndices; if (rewritten.pointInTimeBuilder() != null) { searchContext = rewritten.pointInTimeBuilder().getSearchContextId(namedWriteableRegistry); remoteClusterIndices = getIndicesFromSearchContexts(searchContext, rewritten.indicesOptions()); } else { searchContext = null; remoteClusterIndices = remoteClusterService.groupIndices(rewritten.indicesOptions(), rewritten.indices()); } OriginalIndices localIndices = remoteClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); final ClusterState clusterState = clusterService.state(); if (remoteClusterIndices.isEmpty()) { executeLocalSearch( task, timeProvider, rewritten, localIndices, clusterState, listener, searchContext, searchAsyncActionProvider); } else { if (shouldMinimizeRoundtrips(rewritten)) { final TaskId parentTaskId = task.taskInfo(clusterService.localNode().getId(), false).getTaskId(); ccsRemoteReduce(parentTaskId, rewritten, localIndices, remoteClusterIndices, timeProvider, searchService.aggReduceContextBuilder(rewritten), remoteClusterService, threadPool, listener, (r, l) -> executeLocalSearch( task, timeProvider, r, localIndices, clusterState, l, searchContext, searchAsyncActionProvider)); } else { AtomicInteger skippedClusters = new AtomicInteger(0); collectSearchShards(rewritten.indicesOptions(), rewritten.preference(), rewritten.routing(), skippedClusters, remoteClusterIndices, remoteClusterService, threadPool, ActionListener.wrap( searchShardsResponses -> { final BiFunction<String, String, DiscoveryNode> clusterNodeLookup = getRemoteClusterNodeLookup(searchShardsResponses); final Map<String, AliasFilter> remoteAliasFilters; final List<SearchShardIterator> remoteShardIterators; if (searchContext != null) { remoteAliasFilters = searchContext.aliasFilter(); remoteShardIterators = getRemoteShardsIteratorFromPointInTime(searchShardsResponses, searchContext, rewritten.pointInTimeBuilder().getKeepAlive(), remoteClusterIndices); } else { remoteAliasFilters = getRemoteAliasFilters(searchShardsResponses); remoteShardIterators = getRemoteShardsIterator(searchShardsResponses, remoteClusterIndices, remoteAliasFilters); } int localClusters = localIndices == null ? 0 : 1; int totalClusters = remoteClusterIndices.size() + localClusters; int successfulClusters = searchShardsResponses.size() + localClusters; executeSearch((SearchTask) task, timeProvider, rewritten, localIndices, remoteShardIterators, clusterNodeLookup, clusterState, remoteAliasFilters, listener, new SearchResponse.Clusters(totalClusters, successfulClusters, skippedClusters.get()), searchContext, searchAsyncActionProvider); }, listener::onFailure)); } } }, listener::onFailure); Rewriteable.rewriteAndFetch(original, searchService.getRewriteContext(timeProvider::getAbsoluteStartMillis), rewriteListener); } static boolean shouldMinimizeRoundtrips(SearchRequest searchRequest) { if (searchRequest.isCcsMinimizeRoundtrips() == false) { return false; } if (searchRequest.scroll() != null) { return false; } if (searchRequest.pointInTimeBuilder() != null) { return false; } if (searchRequest.searchType() == DFS_QUERY_THEN_FETCH) { return false; } SearchSourceBuilder source = searchRequest.source(); return source == null || source.collapse() == null || source.collapse().getInnerHits() == null || source.collapse().getInnerHits().isEmpty(); } static void ccsRemoteReduce(TaskId parentTaskId, SearchRequest searchRequest, OriginalIndices localIndices, Map<String, OriginalIndices> remoteIndices, SearchTimeProvider timeProvider, InternalAggregation.ReduceContextBuilder aggReduceContextBuilder, RemoteClusterService remoteClusterService, ThreadPool threadPool, ActionListener<SearchResponse> listener, BiConsumer<SearchRequest, ActionListener<SearchResponse>> localSearchConsumer) { if (localIndices == null && remoteIndices.size() == 1) { //if we are searching against a single remote cluster, we simply forward the original search request to such cluster //and we directly perform final reduction in the remote cluster Map.Entry<String, OriginalIndices> entry = remoteIndices.entrySet().iterator().next(); String clusterAlias = entry.getKey(); boolean skipUnavailable = remoteClusterService.isSkipUnavailable(clusterAlias); OriginalIndices indices = entry.getValue(); SearchRequest ccsSearchRequest = SearchRequest.subSearchRequest(parentTaskId, searchRequest, indices.indices(), clusterAlias, timeProvider.getAbsoluteStartMillis(), true); Client remoteClusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias); remoteClusterClient.search(ccsSearchRequest, new ActionListener<SearchResponse>() { @Override public void onResponse(SearchResponse searchResponse) { Map<String, ProfileShardResult> profileResults = searchResponse.getProfileResults(); SearchProfileShardResults profile = profileResults == null || profileResults.isEmpty() ? null : new SearchProfileShardResults(profileResults); InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchResponse.getHits(), (InternalAggregations) searchResponse.getAggregations(), searchResponse.getSuggest(), profile, searchResponse.isTimedOut(), searchResponse.isTerminatedEarly(), searchResponse.getNumReducePhases()); listener.onResponse(new SearchResponse(internalSearchResponse, searchResponse.getScrollId(), searchResponse.getTotalShards(), searchResponse.getSuccessfulShards(), searchResponse.getSkippedShards(), timeProvider.buildTookInMillis(), searchResponse.getShardFailures(), new SearchResponse.Clusters(1, 1, 0), searchResponse.pointInTimeId())); } @Override public void onFailure(Exception e) { if (skipUnavailable) { listener.onResponse(SearchResponse.empty(timeProvider::buildTookInMillis, new SearchResponse.Clusters(1, 0, 1))); } else { listener.onFailure(wrapRemoteClusterFailure(clusterAlias, e)); } } }); } else { SearchResponseMerger searchResponseMerger = createSearchResponseMerger( searchRequest.source(), timeProvider, aggReduceContextBuilder); AtomicInteger skippedClusters = new AtomicInteger(0); final AtomicReference<Exception> exceptions = new AtomicReference<>(); int totalClusters = remoteIndices.size() + (localIndices == null ? 0 : 1); final CountDown countDown = new CountDown(totalClusters); for (Map.Entry<String, OriginalIndices> entry : remoteIndices.entrySet()) { String clusterAlias = entry.getKey(); boolean skipUnavailable = remoteClusterService.isSkipUnavailable(clusterAlias); OriginalIndices indices = entry.getValue(); SearchRequest ccsSearchRequest = SearchRequest.subSearchRequest(parentTaskId, searchRequest, indices.indices(), clusterAlias, timeProvider.getAbsoluteStartMillis(), false); ActionListener<SearchResponse> ccsListener = createCCSListener(clusterAlias, skipUnavailable, countDown, skippedClusters, exceptions, searchResponseMerger, totalClusters, listener); Client remoteClusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias); remoteClusterClient.search(ccsSearchRequest, ccsListener); } if (localIndices != null) { ActionListener<SearchResponse> ccsListener = createCCSListener(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, false, countDown, skippedClusters, exceptions, searchResponseMerger, totalClusters, listener); SearchRequest ccsLocalSearchRequest = SearchRequest.subSearchRequest(parentTaskId, searchRequest, localIndices.indices(), RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, timeProvider.getAbsoluteStartMillis(), false); localSearchConsumer.accept(ccsLocalSearchRequest, ccsListener); } } } static SearchResponseMerger createSearchResponseMerger(SearchSourceBuilder source, SearchTimeProvider timeProvider, InternalAggregation.ReduceContextBuilder aggReduceContextBuilder) { final int from; final int size; final int trackTotalHitsUpTo; if (source == null) { from = SearchService.DEFAULT_FROM; size = SearchService.DEFAULT_SIZE; trackTotalHitsUpTo = SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO; } else { from = source.from() == -1 ? SearchService.DEFAULT_FROM : source.from(); size = source.size() == -1 ? SearchService.DEFAULT_SIZE : source.size(); trackTotalHitsUpTo = source.trackTotalHitsUpTo() == null ? SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO : source.trackTotalHitsUpTo(); //here we modify the original source so we can re-use it by setting it to each outgoing search request source.from(0); source.size(from + size); } return new SearchResponseMerger(from, size, trackTotalHitsUpTo, timeProvider, aggReduceContextBuilder); } static void collectSearchShards(IndicesOptions indicesOptions, String preference, String routing, AtomicInteger skippedClusters, Map<String, OriginalIndices> remoteIndicesByCluster, RemoteClusterService remoteClusterService, ThreadPool threadPool, ActionListener<Map<String, ClusterSearchShardsResponse>> listener) { final CountDown responsesCountDown = new CountDown(remoteIndicesByCluster.size()); final Map<String, ClusterSearchShardsResponse> searchShardsResponses = new ConcurrentHashMap<>(); final AtomicReference<Exception> exceptions = new AtomicReference<>(); for (Map.Entry<String, OriginalIndices> entry : remoteIndicesByCluster.entrySet()) { final String clusterAlias = entry.getKey(); boolean skipUnavailable = remoteClusterService.isSkipUnavailable(clusterAlias); Client clusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias); final String[] indices = entry.getValue().indices(); ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(indices) .indicesOptions(indicesOptions).local(true).preference(preference).routing(routing); clusterClient.admin().cluster().searchShards(searchShardsRequest, new CCSActionListener<ClusterSearchShardsResponse, Map<String, ClusterSearchShardsResponse>>( clusterAlias, skipUnavailable, responsesCountDown, skippedClusters, exceptions, listener) { @Override void innerOnResponse(ClusterSearchShardsResponse clusterSearchShardsResponse) { searchShardsResponses.put(clusterAlias, clusterSearchShardsResponse); } @Override Map<String, ClusterSearchShardsResponse> createFinalResponse() { return searchShardsResponses; } } ); } } private static ActionListener<SearchResponse> createCCSListener(String clusterAlias, boolean skipUnavailable, CountDown countDown, AtomicInteger skippedClusters, AtomicReference<Exception> exceptions, SearchResponseMerger searchResponseMerger, int totalClusters, ActionListener<SearchResponse> originalListener) { return new CCSActionListener<SearchResponse, SearchResponse>(clusterAlias, skipUnavailable, countDown, skippedClusters, exceptions, originalListener) { @Override void innerOnResponse(SearchResponse searchResponse) { searchResponseMerger.add(searchResponse); } @Override SearchResponse createFinalResponse() { SearchResponse.Clusters clusters = new SearchResponse.Clusters(totalClusters, searchResponseMerger.numResponses(), skippedClusters.get()); return searchResponseMerger.getMergedResponse(clusters); } }; } private void executeLocalSearch(Task task, SearchTimeProvider timeProvider, SearchRequest searchRequest, OriginalIndices localIndices, ClusterState clusterState, ActionListener<SearchResponse> listener, SearchContextId searchContext, SearchAsyncActionProvider searchAsyncActionProvider) { executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, Collections.emptyList(), (clusterName, nodeId) -> null, clusterState, Collections.emptyMap(), listener, SearchResponse.Clusters.EMPTY, searchContext, searchAsyncActionProvider); } static BiFunction<String, String, DiscoveryNode> getRemoteClusterNodeLookup(Map<String, ClusterSearchShardsResponse> searchShardsResp) { Map<String, Map<String, DiscoveryNode>> clusterToNode = new HashMap<>(); for (Map.Entry<String, ClusterSearchShardsResponse> entry : searchShardsResp.entrySet()) { String clusterAlias = entry.getKey(); for (DiscoveryNode remoteNode : entry.getValue().getNodes()) { clusterToNode.computeIfAbsent(clusterAlias, k -> new HashMap<>()).put(remoteNode.getId(), remoteNode); } } return (clusterAlias, nodeId) -> { Map<String, DiscoveryNode> clusterNodes = clusterToNode.get(clusterAlias); if (clusterNodes == null) { throw new IllegalArgumentException("unknown remote cluster: " + clusterAlias); } return clusterNodes.get(nodeId); }; } static Map<String, AliasFilter> getRemoteAliasFilters(Map<String, ClusterSearchShardsResponse> searchShardsResp) { final Map<String, AliasFilter> aliasFilterMap = new HashMap<>(); for (Map.Entry<String, ClusterSearchShardsResponse> entry : searchShardsResp.entrySet()) { ClusterSearchShardsResponse searchShardsResponse = entry.getValue(); final Map<String, AliasFilter> indicesAndFilters = searchShardsResponse.getIndicesAndFilters(); for (ClusterSearchShardsGroup clusterSearchShardsGroup : searchShardsResponse.getGroups()) { ShardId shardId = clusterSearchShardsGroup.getShardId(); final AliasFilter aliasFilter; if (indicesAndFilters == null) { aliasFilter = AliasFilter.EMPTY; } else { aliasFilter = indicesAndFilters.get(shardId.getIndexName()); assert aliasFilter != null : "alias filter must not be null for index: " + shardId.getIndex(); } // here we have to map the filters to the UUID since from now on we use the uuid for the lookup aliasFilterMap.put(shardId.getIndex().getUUID(), aliasFilter); } } return aliasFilterMap; } static List<SearchShardIterator> getRemoteShardsIterator(Map<String, ClusterSearchShardsResponse> searchShardsResponses, Map<String, OriginalIndices> remoteIndicesByCluster, Map<String, AliasFilter> aliasFilterMap) { final List<SearchShardIterator> remoteShardIterators = new ArrayList<>(); for (Map.Entry<String, ClusterSearchShardsResponse> entry : searchShardsResponses.entrySet()) { for (ClusterSearchShardsGroup clusterSearchShardsGroup : entry.getValue().getGroups()) { //add the cluster name to the remote index names for indices disambiguation //this ends up in the hits returned with the search response ShardId shardId = clusterSearchShardsGroup.getShardId(); AliasFilter aliasFilter = aliasFilterMap.get(shardId.getIndex().getUUID()); String[] aliases = aliasFilter.getAliases(); String clusterAlias = entry.getKey(); String[] finalIndices = aliases.length == 0 ? new String[]{shardId.getIndexName()} : aliases; final OriginalIndices originalIndices = remoteIndicesByCluster.get(clusterAlias); assert originalIndices != null : "original indices are null for clusterAlias: " + clusterAlias; SearchShardIterator shardIterator = new SearchShardIterator(clusterAlias, shardId, Arrays.asList(clusterSearchShardsGroup.getShards()), new OriginalIndices(finalIndices, originalIndices.indicesOptions())); remoteShardIterators.add(shardIterator); } } return remoteShardIterators; } static List<SearchShardIterator> getRemoteShardsIteratorFromPointInTime(Map<String, ClusterSearchShardsResponse> searchShardsResponses, SearchContextId searchContextId, TimeValue searchContextKeepAlive, Map<String, OriginalIndices> remoteClusterIndices) { final List<SearchShardIterator> remoteShardIterators = new ArrayList<>(); for (Map.Entry<String, ClusterSearchShardsResponse> entry : searchShardsResponses.entrySet()) { for (ClusterSearchShardsGroup group : entry.getValue().getGroups()) { final ShardId shardId = group.getShardId(); final String clusterAlias = entry.getKey(); final SearchContextIdForNode perNode = searchContextId.shards().get(shardId); assert clusterAlias.equals(perNode.getClusterAlias()) : clusterAlias + " != " + perNode.getClusterAlias(); final List<String> targetNodes = new ArrayList<>(group.getShards().length); targetNodes.add(perNode.getNode()); if (perNode.getSearchContextId().getSearcherId() != null) { for (ShardRouting shard : group.getShards()) { if (shard.currentNodeId().equals(perNode.getNode()) == false) { targetNodes.add(shard.currentNodeId()); } } } SearchShardIterator shardIterator = new SearchShardIterator(clusterAlias, shardId, targetNodes, remoteClusterIndices.get(clusterAlias), perNode.getSearchContextId(), searchContextKeepAlive); remoteShardIterators.add(shardIterator); } } return remoteShardIterators; } private Index[] resolveLocalIndices(OriginalIndices localIndices, ClusterState clusterState, SearchTimeProvider timeProvider) { if (localIndices == null) { return Index.EMPTY_ARRAY; //don't search on any local index (happens when only remote indices were specified) } return indexNameExpressionResolver.concreteIndices(clusterState, localIndices, timeProvider.getAbsoluteStartMillis()); } private void executeSearch(SearchTask task, SearchTimeProvider timeProvider, SearchRequest searchRequest, OriginalIndices localIndices, List<SearchShardIterator> remoteShardIterators, BiFunction<String, String, DiscoveryNode> remoteConnections, ClusterState clusterState, Map<String, AliasFilter> remoteAliasMap, ActionListener<SearchResponse> listener, SearchResponse.Clusters clusters, @Nullable SearchContextId searchContext, SearchAsyncActionProvider searchAsyncActionProvider) { clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name // date math expressions and $now in scripts. This way all apis will deal with now in the same way instead // of just for the _search api final List<SearchShardIterator> localShardIterators; final Map<String, AliasFilter> aliasFilter; final String[] concreteLocalIndices; if (searchContext != null) { assert searchRequest.pointInTimeBuilder() != null; aliasFilter = searchContext.aliasFilter(); concreteLocalIndices = localIndices == null ? new String[0] : localIndices.indices(); localShardIterators = getLocalLocalShardsIteratorFromPointInTime(clusterState, localIndices, searchRequest.getLocalClusterAlias(), searchContext, searchRequest.pointInTimeBuilder().getKeepAlive()); } else { final Index[] indices = resolveLocalIndices(localIndices, clusterState, timeProvider); Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), searchRequest.indices()); routingMap = routingMap == null ? Collections.emptyMap() : Collections.unmodifiableMap(routingMap); concreteLocalIndices = new String[indices.length]; for (int i = 0; i < indices.length; i++) { concreteLocalIndices[i] = indices[i].getName(); } Map<String, Long> nodeSearchCounts = searchTransportService.getPendingSearchRequests(); GroupShardsIterator<ShardIterator> localShardRoutings = clusterService.operationRouting().searchShards(clusterState, concreteLocalIndices, routingMap, searchRequest.preference(), searchService.getResponseCollectorService(), nodeSearchCounts); localShardIterators = StreamSupport.stream(localShardRoutings.spliterator(), false) .map(it -> new SearchShardIterator( searchRequest.getLocalClusterAlias(), it.shardId(), it.getShardRoutings(), localIndices)) .collect(Collectors.toList()); aliasFilter = buildPerIndexAliasFilter(searchRequest, clusterState, indices, remoteAliasMap); } final GroupShardsIterator<SearchShardIterator> shardIterators = mergeShardsIterators(localShardIterators, remoteShardIterators); failIfOverShardCountLimit(clusterService, shardIterators.size()); Map<String, Float> concreteIndexBoosts = resolveIndexBoosts(searchRequest, clusterState); // optimize search type for cases where there is only one shard group to search on if (shardIterators.size() == 1) { // if we only have one group, then we always want Q_T_F, no need for DFS, and no need to do THEN since we hit one shard searchRequest.searchType(QUERY_THEN_FETCH); } if (searchRequest.allowPartialSearchResults() == null) { // No user preference defined in search request - apply cluster service default searchRequest.allowPartialSearchResults(searchService.defaultAllowPartialSearchResults()); } if (searchRequest.isSuggestOnly()) { // disable request cache if we have only suggest searchRequest.requestCache(false); switch (searchRequest.searchType()) { case DFS_QUERY_THEN_FETCH: // convert to Q_T_F if we have only suggest searchRequest.searchType(QUERY_THEN_FETCH); break; } } final DiscoveryNodes nodes = clusterState.nodes(); BiFunction<String, String, Transport.Connection> connectionLookup = buildConnectionLookup(searchRequest.getLocalClusterAlias(), nodes::get, remoteConnections, searchTransportService::getConnection); final Executor asyncSearchExecutor = asyncSearchExecutor(concreteLocalIndices, clusterState); final boolean preFilterSearchShards = shouldPreFilterSearchShards(clusterState, searchRequest, concreteLocalIndices, localShardIterators.size() + remoteShardIterators.size()); searchAsyncActionProvider.asyncSearchAction( task, searchRequest, asyncSearchExecutor, shardIterators, timeProvider, connectionLookup, clusterState, Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, listener, preFilterSearchShards, threadPool, clusters).start(); } Executor asyncSearchExecutor(final String[] indices, final ClusterState clusterState) { final boolean onlySystemIndices = Arrays.stream(indices) .allMatch(index -> { final IndexMetadata indexMetadata = clusterState.metadata().index(index); return indexMetadata != null && indexMetadata.isSystem(); }); return onlySystemIndices ? threadPool.executor(ThreadPool.Names.SYSTEM_READ) : threadPool.executor(ThreadPool.Names.SEARCH); } static BiFunction<String, String, Transport.Connection> buildConnectionLookup(String requestClusterAlias, Function<String, DiscoveryNode> localNodes, BiFunction<String, String, DiscoveryNode> remoteNodes, BiFunction<String, DiscoveryNode, Transport.Connection> nodeToConnection) { return (clusterAlias, nodeId) -> { final DiscoveryNode discoveryNode; final boolean remoteCluster; if (clusterAlias == null || requestClusterAlias != null) { assert requestClusterAlias == null || requestClusterAlias.equals(clusterAlias); discoveryNode = localNodes.apply(nodeId); remoteCluster = false; } else { discoveryNode = remoteNodes.apply(clusterAlias, nodeId); remoteCluster = true; } if (discoveryNode == null) { throw new IllegalStateException("no node found for id: " + nodeId); } return nodeToConnection.apply(remoteCluster ? clusterAlias : null, discoveryNode); }; } static boolean shouldPreFilterSearchShards(ClusterState clusterState, SearchRequest searchRequest, String[] indices, int numShards) { SearchSourceBuilder source = searchRequest.source(); Integer preFilterShardSize = searchRequest.getPreFilterShardSize(); if (preFilterShardSize == null && (hasReadOnlyIndices(indices, clusterState) || hasPrimaryFieldSort(source))) { preFilterShardSize = 1; } else if (preFilterShardSize == null) { preFilterShardSize = SearchRequest.DEFAULT_PRE_FILTER_SHARD_SIZE; } return searchRequest.searchType() == QUERY_THEN_FETCH // we can't do this for DFS it needs to fan out to all shards all the time && (SearchService.canRewriteToMatchNone(source) || hasPrimaryFieldSort(source)) && preFilterShardSize < numShards; } private static boolean hasReadOnlyIndices(String[] indices, ClusterState clusterState) { for (String index : indices) { ClusterBlockException writeBlock = clusterState.blocks().indexBlockedException(ClusterBlockLevel.WRITE, index); if (writeBlock != null) { return true; } } return false; } static GroupShardsIterator<SearchShardIterator> mergeShardsIterators(List<SearchShardIterator> localShardIterators, List<SearchShardIterator> remoteShardIterators) { List<SearchShardIterator> shards = new ArrayList<>(remoteShardIterators); shards.addAll(localShardIterators); return GroupShardsIterator.sortAndCreate(shards); } interface SearchAsyncActionProvider { AbstractSearchAsyncAction<? extends SearchPhaseResult> asyncSearchAction( SearchTask task, SearchRequest searchRequest, Executor executor, GroupShardsIterator<SearchShardIterator> shardIterators, SearchTimeProvider timeProvider, BiFunction<String, String, Transport.Connection> connectionLookup, ClusterState clusterState, Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts, ActionListener<SearchResponse> listener, boolean preFilter, ThreadPool threadPool, SearchResponse.Clusters clusters); } private AbstractSearchAsyncAction<? extends SearchPhaseResult> searchAsyncAction( SearchTask task, SearchRequest searchRequest, Executor executor, GroupShardsIterator<SearchShardIterator> shardIterators, SearchTimeProvider timeProvider, BiFunction<String, String, Transport.Connection> connectionLookup, ClusterState clusterState, Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts, ActionListener<SearchResponse> listener, boolean preFilter, ThreadPool threadPool, SearchResponse.Clusters clusters) { if (preFilter) { return new CanMatchPreFilterSearchPhase(logger, searchTransportService, connectionLookup, aliasFilter, concreteIndexBoosts, executor, searchRequest, listener, shardIterators, timeProvider, clusterState, task, (iter) -> { AbstractSearchAsyncAction<? extends SearchPhaseResult> action = searchAsyncAction( task, searchRequest, executor, iter, timeProvider, connectionLookup, clusterState, aliasFilter, concreteIndexBoosts, listener, false, threadPool, clusters); return new SearchPhase(action.getName()) { @Override public void run() { action.start(); } }; }, clusters, searchService.getCoordinatorRewriteContextProvider(timeProvider::getAbsoluteStartMillis)); } else { final QueryPhaseResultConsumer queryResultConsumer = searchPhaseController.newSearchPhaseResults(executor, circuitBreaker, task.getProgressListener(), searchRequest, shardIterators.size(), exc -> searchTransportService.cancelSearchTask(task, "failed to merge result [" + exc.getMessage() + "]")); AbstractSearchAsyncAction<? extends SearchPhaseResult> searchAsyncAction; switch (searchRequest.searchType()) { case DFS_QUERY_THEN_FETCH: searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, connectionLookup, aliasFilter, concreteIndexBoosts, searchPhaseController, executor, queryResultConsumer, searchRequest, listener, shardIterators, timeProvider, clusterState, task, clusters); break; case QUERY_THEN_FETCH: searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, connectionLookup, aliasFilter, concreteIndexBoosts, searchPhaseController, executor, queryResultConsumer, searchRequest, listener, shardIterators, timeProvider, clusterState, task, clusters); break; default: throw new IllegalStateException("Unknown search type: [" + searchRequest.searchType() + "]"); } return searchAsyncAction; } } private static void failIfOverShardCountLimit(ClusterService clusterService, int shardCount) { final long shardCountLimit = clusterService.getClusterSettings().get(SHARD_COUNT_LIMIT_SETTING); if (shardCount > shardCountLimit) { throw new IllegalArgumentException("Trying to query " + shardCount + " shards, which is over the limit of " + shardCountLimit + ". This limit exists because querying many shards at the same time can make the " + "job of the coordinating node very CPU and/or memory intensive. It is usually a better idea to " + "have a smaller number of larger shards. Update [" + SHARD_COUNT_LIMIT_SETTING.getKey() + "] to a greater value if you really want to query that many shards at the same time."); } } abstract static class CCSActionListener<Response, FinalResponse> implements ActionListener<Response> { private final String clusterAlias; private final boolean skipUnavailable; private final CountDown countDown; private final AtomicInteger skippedClusters; private final AtomicReference<Exception> exceptions; private final ActionListener<FinalResponse> originalListener; CCSActionListener(String clusterAlias, boolean skipUnavailable, CountDown countDown, AtomicInteger skippedClusters, AtomicReference<Exception> exceptions, ActionListener<FinalResponse> originalListener) { this.clusterAlias = clusterAlias; this.skipUnavailable = skipUnavailable; this.countDown = countDown; this.skippedClusters = skippedClusters; this.exceptions = exceptions; this.originalListener = originalListener; } @Override public final void onResponse(Response response) { innerOnResponse(response); maybeFinish(); } abstract void innerOnResponse(Response response); @Override public final void onFailure(Exception e) { if (skipUnavailable) { skippedClusters.incrementAndGet(); } else { Exception exception = e; if (RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(clusterAlias) == false) { exception = wrapRemoteClusterFailure(clusterAlias, e); } if (exceptions.compareAndSet(null, exception) == false) { exceptions.accumulateAndGet(exception, (previous, current) -> { current.addSuppressed(previous); return current; }); } } maybeFinish(); } private void maybeFinish() { if (countDown.countDown()) { Exception exception = exceptions.get(); if (exception == null) { FinalResponse response; try { response = createFinalResponse(); } catch(Exception e) { originalListener.onFailure(e); return; } originalListener.onResponse(response); } else { originalListener.onFailure(exceptions.get()); } } } abstract FinalResponse createFinalResponse(); } private static RemoteTransportException wrapRemoteClusterFailure(String clusterAlias, Exception e) { return new RemoteTransportException("error while communicating with remote cluster [" + clusterAlias + "]", e); } static Map<String, OriginalIndices> getIndicesFromSearchContexts(SearchContextId searchContext, IndicesOptions indicesOptions) { final Map<String, Set<String>> indices = new HashMap<>(); for (Map.Entry<ShardId, SearchContextIdForNode> entry : searchContext.shards().entrySet()) { String clusterAlias = entry.getValue().getClusterAlias() == null ? RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY : entry.getValue().getClusterAlias(); indices.computeIfAbsent(clusterAlias, k -> new HashSet<>()).add(entry.getKey().getIndexName()); } return indices.entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> new OriginalIndices(e.getValue().toArray(String[]::new), indicesOptions))); } static List<SearchShardIterator> getLocalLocalShardsIteratorFromPointInTime(ClusterState clusterState, OriginalIndices originalIndices, String localClusterAlias, SearchContextId searchContext, TimeValue keepAlive) { final List<SearchShardIterator> iterators = new ArrayList<>(searchContext.shards().size()); for (Map.Entry<ShardId, SearchContextIdForNode> entry : searchContext.shards().entrySet()) { final SearchContextIdForNode perNode = entry.getValue(); if (Strings.isEmpty(perNode.getClusterAlias())) { final ShardId shardId = entry.getKey(); final ShardIterator shards = OperationRouting.getShards(clusterState, shardId); final List<String> targetNodes = new ArrayList<>(shards.size()); targetNodes.add(perNode.getNode()); if (perNode.getSearchContextId().getSearcherId() != null) { for (ShardRouting shard : shards) { if (shard.currentNodeId().equals(perNode.getNode()) == false) { targetNodes.add(shard.currentNodeId()); } } } iterators.add(new SearchShardIterator(localClusterAlias, shardId, targetNodes, originalIndices, perNode.getSearchContextId(), keepAlive)); } } return iterators; } }
apache-2.0
gabby2212/gs-collections
jmh-tests/src/main/java/com/gs/collections/impl/jmh/AnagramBagTest.java
7479
/* * Copyright 2014 Goldman Sachs. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.gs.collections.impl.jmh; import java.util.Arrays; import java.util.Comparator; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import com.google.common.collect.HashMultiset; import com.google.common.collect.Multiset; import com.gs.collections.api.RichIterable; import com.gs.collections.api.bag.ParallelUnsortedBag; import com.gs.collections.api.multimap.MutableMultimap; import com.gs.collections.api.multimap.bag.UnsortedBagMultimap; import com.gs.collections.api.multimap.list.MutableListMultimap; import com.gs.collections.impl.bag.mutable.HashBag; import com.gs.collections.impl.block.factory.Comparators; import com.gs.collections.impl.block.factory.Procedures; import com.gs.collections.impl.forkjoin.FJIterate; import com.gs.collections.impl.list.mutable.FastList; import com.gs.collections.impl.multimap.list.FastListMultimap; import com.gs.collections.impl.parallel.ParallelIterate; import org.apache.commons.lang.RandomStringUtils; import org.junit.Assert; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Measurement; import org.openjdk.jmh.annotations.Mode; import org.openjdk.jmh.annotations.OutputTimeUnit; import org.openjdk.jmh.annotations.Scope; import org.openjdk.jmh.annotations.Setup; import org.openjdk.jmh.annotations.State; import org.openjdk.jmh.annotations.TearDown; import org.openjdk.jmh.annotations.Warmup; @State(Scope.Thread) @BenchmarkMode(Mode.Throughput) @OutputTimeUnit(TimeUnit.SECONDS) public class AnagramBagTest { private static final int SIZE = 1_000_000; private static final int BATCH_SIZE = 10_000; private static final int SIZE_THRESHOLD = 10; private final HashBag<String> gscWords = HashBag.newBag(FastList.newWithNValues(SIZE, () -> RandomStringUtils.randomAlphabetic(5).toUpperCase())); private final Multiset<String> guavaWords = HashMultiset.create(this.gscWords); private ExecutorService executorService; @Setup public void setUp() { this.executorService = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors()); } @TearDown public void tearDown() throws InterruptedException { this.executorService.shutdownNow(); this.executorService.awaitTermination(1L, TimeUnit.SECONDS); } @Warmup(iterations = 20) @Measurement(iterations = 10) @Benchmark public void serial_eager_gsc() { MutableListMultimap<Alphagram, String> groupBy = this.gscWords.groupBy(Alphagram::new, FastListMultimap.newMultimap()); groupBy.multiValuesView() .select(iterable -> iterable.size() >= SIZE_THRESHOLD) .toSortedList(Comparators.<RichIterable<String>>byIntFunction(RichIterable::size)) .asReversed() .collect(iterable -> iterable.size() + ": " + iterable) .forEach(Procedures.cast(e -> Assert.assertFalse(e.isEmpty()))); } @Benchmark public void parallel_eager_gsc() { MutableMultimap<Alphagram, String> groupBy = ParallelIterate.groupBy(this.gscWords, Alphagram::new); groupBy.multiValuesView() .select(iterable -> iterable.size() >= SIZE_THRESHOLD) .toSortedList(Comparators.<RichIterable<String>>byIntFunction(RichIterable::size)) .asReversed() .collect(iterable -> iterable.size() + ": " + iterable) .forEach(Procedures.cast(e -> Assert.assertFalse(e.isEmpty()))); } @Benchmark public void parallel_lazy_gsc() { ParallelUnsortedBag<String> parallelUnsortedBag = this.gscWords.asParallel(this.executorService, BATCH_SIZE); UnsortedBagMultimap<Alphagram, String> groupBy = parallelUnsortedBag.groupBy(Alphagram::new); groupBy.multiValuesView() .select(iterable -> iterable.size() >= SIZE_THRESHOLD) .toSortedList(Comparators.<RichIterable<String>>byIntFunction(RichIterable::size)) .asReversed() .collect(iterable -> iterable.size() + ": " + iterable) .forEach(Procedures.cast(e -> Assert.assertFalse(e.isEmpty()))); } @Benchmark public void parallel_eager_forkjoin_gsc() { MutableMultimap<Alphagram, String> groupBy = FJIterate.groupBy(this.gscWords, Alphagram::new); groupBy.multiValuesView() .select(iterable -> iterable.size() >= SIZE_THRESHOLD) .toSortedList(Comparators.<RichIterable<String>>byIntFunction(RichIterable::size)) .asReversed() .collect(iterable -> iterable.size() + ": " + iterable) .forEach(Procedures.cast(e -> Assert.assertFalse(e.isEmpty()))); } @Benchmark public void serial_lazy_jdk() { Map<Alphagram, List<String>> groupBy = this.guavaWords.stream().collect(Collectors.groupingBy(Alphagram::new)); groupBy.entrySet() .stream() .map(Map.Entry::getValue) .filter(list -> list.size() >= SIZE_THRESHOLD) .sorted(Comparator.<List<String>>comparingInt(List::size).reversed()) .map(list -> list.size() + ": " + list) .forEach(e -> Assert.assertFalse(e.isEmpty())); } @Benchmark public void parallel_lazy_jdk() { Map<Alphagram, List<String>> groupBy = this.guavaWords.parallelStream().collect(Collectors.groupingBy(Alphagram::new)); groupBy.entrySet() .parallelStream() .map(Map.Entry::getValue) .filter(list -> list.size() >= SIZE_THRESHOLD) .sorted(Comparator.<List<String>>comparingInt(List::size).reversed()) .map(list -> list.size() + ": " + list) .forEach(e -> Assert.assertFalse(e.isEmpty())); } private static final class Alphagram { private final char[] key; private Alphagram(String string) { this.key = string.toCharArray(); Arrays.sort(this.key); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || this.getClass() != o.getClass()) { return false; } Alphagram alphagram = (Alphagram) o; return Arrays.equals(this.key, alphagram.key); } @Override public int hashCode() { return Arrays.hashCode(this.key); } @Override public String toString() { return new String(this.key); } } }
apache-2.0
mhcxp/Karaf-Tutorial
cxf/personservice-rest/model/src/main/java/net/lr/tutorial/karaf/cxf/personrest/model/PersonService.java
640
package net.lr.tutorial.karaf.cxf.personrest.model; import javax.ws.rs.GET; import javax.ws.rs.POST; import javax.ws.rs.PUT; import javax.ws.rs.Path; import javax.ws.rs.PathParam; import javax.ws.rs.Produces; import javax.ws.rs.core.MediaType; @Produces(MediaType.APPLICATION_XML) public interface PersonService { @GET @Path("/") public Person[] getAll(); @GET @Path("/{id}") public Person getPerson(@PathParam("id") String id); @PUT @Path("/{id}") public void updatePerson(@PathParam("id") String id, Person person); @POST @Path("/") public void addPerson(Person person); }
apache-2.0
pomack/closure-templates
java/src/com/google/template/soy/basetree/MixinParentNode.java
8517
/* * Copyright 2008 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.template.soy.basetree; import com.google.common.collect.Lists; import java.util.List; /** * Mixin implementation of the parent-specific aspect of the ParentNode interface. * Requires the master to be a ParentNode. * * <p> Important: Do not use outside of Soy code (treat as superpackage-private). * * <p> The parameter N represents the interface or class that is the superclass of all possible * children for the master ParentNode. E.g. for a Soy parse tree node, N is usually SoyNode, but for * SoyFileSetNode N is SoyFileNode, for SoyFileNode N is TemplateNode, etc; for a Soy expression * parse tree, N is usually ExprNode. * */ public final class MixinParentNode<N extends Node> { /** Just spaces. */ protected static final String SPACES = " "; /** The master node that delegates to this instance. */ private final ParentNode<N> master; /** The children of the master node (accessed via this instance). */ private final List<N> children; /** Whether the master node needs an env frame when being interpreted, or null if unknown. */ private Boolean needsEnvFrameDuringInterp; /** * @param master The master node that delegates to this instance. */ public MixinParentNode(ParentNode<N> master) { this.master = master; needsEnvFrameDuringInterp = null; children = Lists.newArrayList(); } /** * Copy constructor. * @param orig The node to copy. * @param newMaster The master node for the copy. */ public MixinParentNode(MixinParentNode<N> orig, ParentNode<N> newMaster) { this.master = newMaster; this.needsEnvFrameDuringInterp = orig.needsEnvFrameDuringInterp; this.children = Lists.newArrayListWithCapacity(orig.children.size()); for (N origChild : orig.children) { @SuppressWarnings("unchecked") N newChild = (N) origChild.clone(); this.children.add(newChild); newChild.setParent(this.master); } } /** * Sets whether this node needs an env frame when the template is being interpreted. * @param needsEnvFrameDuringInterp Whether this node needs an env frame during interpretation, * or null if unknown. */ public void setNeedsEnvFrameDuringInterp(Boolean needsEnvFrameDuringInterp) { this.needsEnvFrameDuringInterp = needsEnvFrameDuringInterp; } /** * Returns whether this node needs an env frame during interpretation, or null if unknown. * @return Whether this node needs an env frame during interpretation, or null if unknown. */ public Boolean needsEnvFrameDuringInterp() { return needsEnvFrameDuringInterp; } /** * Gets the number of children. * @return The number of children. */ public int numChildren() { return children.size(); } /** * Gets the child at the given index. * @param index The index of the child to get. * @return The child at the given index. */ public N getChild(int index) { return children.get(index); } /** * Finds the index of the given child. * @param child The child to find the index of. * @return The index of the given child, or -1 if the given child is not a child of this node. */ public int getChildIndex(N child) { return children.indexOf(child); } /** * Gets the list of children. * * Note: The returned list is not a copy. Please do not modify the list directly. Instead, use * the other methods in this class that are intended for modifying children. Also, if you're * iterating over the children list as you're modifying it, then you should first make a copy of * the children list to iterate over, in order to avoid ConcurrentModificationException. * * @return The list of children. */ public List<N> getChildren() { return children; } /** * Adds the given child. * @param child The child to add. */ public void addChild(N child) { children.add(child); child.setParent(master); } /** * Adds the given child at the given index (shifting existing children if necessary). * @param index The index to add the child at. * @param child The child to add. */ public void addChild(int index, N child) { children.add(index, child); child.setParent(master); } /** * Removes the child at the given index. * @param index The index of the child to remove. */ public void removeChild(int index) { N child = children.remove(index); child.setParent(null); } /** * Removes the given child. * @param child The child to remove. */ public void removeChild(N child) { children.remove(child); child.setParent(null); } /** * Replaces the child at the given index with the given new child. * @param index The index of the child to replace. * @param newChild The new child. */ public void replaceChild(int index, N newChild) { N oldChild = children.set(index, newChild); oldChild.setParent(null); newChild.setParent(master); } /** * Replaces the given current child with the given new child. * @param currChild The current child to be replaced. * @param newChild The new child. */ public void replaceChild(N currChild, N newChild) { replaceChild(getChildIndex(currChild), newChild); } /** * Clears the list of children. */ public void clearChildren() { children.clear(); } /** * Adds the given children. * @param children The children to add. */ public void addChildren(List<? extends N> children) { for (N child : children) { addChild(child); } } /** * Adds the given children at the given index (shifting existing children if necessary). * @param index The index to add the children at. * @param children The children to add. */ public void addChildren(int index, List<? extends N> children) { List<N> origChildren = Lists.newArrayList(this.children); int origNumChildren = this.children.size(); // Temporarily remove the original children from index onward (in reverse order). for (int i = origNumChildren - 1; i >= index; i--) { removeChild(i); } // Add the new children. addChildren(children); // Add back the original children that we temporarily removed (in correct order). addChildren(origChildren.subList(index, origNumChildren)); } /** * Appends the source strings for all the children to the given StringBuilder. * @param sb The StringBuilder to which to append the children's source strings. */ public void appendSourceStringForChildren(StringBuilder sb) { for (N child : children) { sb.append(child.toSourceString()); } } /** * Appends the tree strings for all the children to the given StringBuilder, at one further * indentation level (3 spaces) than the given current indentation level. * @param sb The StringBuilder to which to append the children's tree strings. * @param indent The current indentation level of this parent node. */ public void appendTreeStringForChildren(StringBuilder sb, int indent) { for (N child : children) { sb.append(child.toTreeString(indent + 3)); } } /** * Builds a string that visually shows the subtree rooted at this node (for debugging). * Each line of the string will be indented by the given indentation amount. You should pass an * indentation of 0 unless this method is being called as part of building a larger tree string. * @param indent The indentation for each line of the tree string (usually pass 0). * @return A string that visually shows the subtree rooted at this node. */ public String toTreeString(int indent) { StringBuilder sb = new StringBuilder(); sb.append(SPACES.substring(0, indent)).append("[").append(master.toString()).append("]\n"); appendTreeStringForChildren(sb, indent); return sb.toString(); } }
apache-2.0
rancherio/cattle
modules/model/src/main/java/io/cattle/platform/core/addon/RestartPolicy.java
1254
package io.cattle.platform.core.addon; import io.github.ibuildthecloud.gdapi.annotation.Field; import io.github.ibuildthecloud.gdapi.annotation.Type; import org.apache.commons.lang3.StringUtils; import com.fasterxml.jackson.annotation.JsonIgnore; @Type(list = false) public class RestartPolicy { public static String RESTART_NEVER = "no"; public static String RESTART_ALWAYS = "always"; public static String RESTART_ON_FAILURE = "on-failure"; String name; int maximumRetryCount; public String getName() { return name; } public void setName(String name) { this.name = name; } public int getMaximumRetryCount() { return maximumRetryCount; } public void setMaximumRetryCount(int maximumRetryCount) { this.maximumRetryCount = maximumRetryCount; } @Field(include = false) @JsonIgnore public boolean isNever() { return RESTART_NEVER.equals(name) || StringUtils.isBlank(name); } @Field(include = false) @JsonIgnore public boolean isAlways() { return RESTART_ALWAYS.equals(name); } @Field(include = false) @JsonIgnore public boolean isOnFailure() { return RESTART_ON_FAILURE.equals(name); } }
apache-2.0
dhalperi/batfish
projects/batfish/src/main/java/org/batfish/representation/palo_alto/DeviceGroup.java
1942
package org.batfish.representation.palo_alto; import com.google.common.collect.ImmutableSet; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import javax.annotation.Nonnull; import javax.annotation.Nullable; import javax.annotation.ParametersAreNonnullByDefault; /** * Represents a Palo Alto device-group, which contains some regular device configuration along with * some device-group specific configuration. * * <p>Device-group configuration is inherited by any devices associated with it. */ @ParametersAreNonnullByDefault public final class DeviceGroup extends PaloAltoConfiguration { private @Nullable String _description; private final Set<String> _devices; private final String _name; /** The parent {@link DeviceGroup}. */ private @Nullable String _parentDg; /** Map of Device name to set of Vsyses */ private final Map<String, Set<String>> _vsys; public DeviceGroup(String name) { super(); _devices = new HashSet<>(); _vsys = new HashMap<>(); _name = name; } public void addDevice(String device) { _devices.add(device); } public void addVsys(String device, String vsys) { Set<String> vsysSet = _vsys.computeIfAbsent(device, d -> new HashSet<>()); vsysSet.add(vsys); } public @Nullable String getDescription() { return _description; } public void setDescription(String description) { _description = description; } public @Nonnull Set<String> getDevices() { return ImmutableSet.copyOf(_devices); } public @Nonnull String getName() { return _name; } public @Nullable String getParentDg() { return _parentDg; } public void setParentDg(@Nullable String parentDg) { _parentDg = parentDg; } /** Return map of device name to set of vsys names, for vsys associated with this device-group. */ public @Nonnull Map<String, Set<String>> getVsys() { return _vsys; } }
apache-2.0
zwets/flowable-engine
modules/flowable5-engine/src/main/java/org/activiti/engine/impl/juel/TypeConverterImpl.java
14074
/* * Based on JUEL 2.2.1 code, 2006-2009 Odysseus Software GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.activiti.engine.impl.juel; import java.beans.PropertyEditor; import java.beans.PropertyEditorManager; import java.math.BigDecimal; import java.math.BigInteger; import java.util.Date; import org.activiti.engine.impl.javax.el.ELException; import org.joda.time.DateTime; import org.joda.time.format.DateTimeFormatter; import org.joda.time.format.ISODateTimeFormat; /** * Type Conversions as described in EL 2.1 specification (section 1.17). */ public class TypeConverterImpl implements TypeConverter { private static final long serialVersionUID = 1L; protected Boolean coerceToBoolean(Object value) { if (value == null || "".equals(value)) { return Boolean.FALSE; } if (value instanceof Boolean) { return (Boolean) value; } if (value instanceof String) { return Boolean.valueOf((String) value); } throw new ELException(LocalMessages.get("error.coerce.type", value.getClass(), Boolean.class)); } protected Character coerceToCharacter(Object value) { if (value == null || "".equals(value)) { return Character.valueOf((char) 0); } if (value instanceof Character) { return (Character) value; } if (value instanceof Number) { return Character.valueOf((char) ((Number) value).shortValue()); } if (value instanceof String) { return Character.valueOf(((String) value).charAt(0)); } throw new ELException(LocalMessages.get("error.coerce.type", value.getClass(), Character.class)); } protected BigDecimal coerceToBigDecimal(Object value) { if (value == null || "".equals(value)) { return BigDecimal.valueOf(0l); } if (value instanceof BigDecimal) { return (BigDecimal) value; } if (value instanceof BigInteger) { return new BigDecimal((BigInteger) value); } if (value instanceof Number) { return new BigDecimal(((Number) value).doubleValue()); } if (value instanceof String) { try { return new BigDecimal((String) value); } catch (NumberFormatException e) { throw new ELException(LocalMessages.get("error.coerce.value", value, BigDecimal.class)); } } if (value instanceof Character) { return new BigDecimal((short) ((Character) value).charValue()); } throw new ELException(LocalMessages.get("error.coerce.type", value.getClass(), BigDecimal.class)); } protected BigInteger coerceToBigInteger(Object value) { if (value == null || "".equals(value)) { return BigInteger.valueOf(0l); } if (value instanceof BigInteger) { return (BigInteger) value; } if (value instanceof BigDecimal) { return ((BigDecimal) value).toBigInteger(); } if (value instanceof Number) { return BigInteger.valueOf(((Number) value).longValue()); } if (value instanceof String) { try { return new BigInteger((String) value); } catch (NumberFormatException e) { throw new ELException(LocalMessages.get("error.coerce.value", value, BigInteger.class)); } } if (value instanceof Character) { return BigInteger.valueOf((short) ((Character) value).charValue()); } throw new ELException(LocalMessages.get("error.coerce.type", value.getClass(), BigInteger.class)); } protected Double coerceToDouble(Object value) { if (value == null || "".equals(value)) { return Double.valueOf(0); } if (value instanceof Double) { return (Double) value; } if (value instanceof Number) { return Double.valueOf(((Number) value).doubleValue()); } if (value instanceof String) { try { return Double.valueOf((String) value); } catch (NumberFormatException e) { throw new ELException(LocalMessages.get("error.coerce.value", value, Double.class)); } } if (value instanceof Character) { return Double.valueOf((short) ((Character) value).charValue()); } throw new ELException(LocalMessages.get("error.coerce.type", value.getClass(), Double.class)); } protected Float coerceToFloat(Object value) { if (value == null || "".equals(value)) { return Float.valueOf(0); } if (value instanceof Float) { return (Float) value; } if (value instanceof Number) { return Float.valueOf(((Number) value).floatValue()); } if (value instanceof String) { try { return Float.valueOf((String) value); } catch (NumberFormatException e) { throw new ELException(LocalMessages.get("error.coerce.value", value, Float.class)); } } if (value instanceof Character) { return Float.valueOf((short) ((Character) value).charValue()); } throw new ELException(LocalMessages.get("error.coerce.type", value.getClass(), Float.class)); } protected Long coerceToLong(Object value) { if (value == null || "".equals(value)) { return Long.valueOf(0l); } if (value instanceof Long) { return (Long) value; } if (value instanceof Number) { return Long.valueOf(((Number) value).longValue()); } if (value instanceof String) { try { return Long.valueOf((String) value); } catch (NumberFormatException e) { throw new ELException(LocalMessages.get("error.coerce.value", value, Long.class)); } } if (value instanceof Character) { return Long.valueOf((short) ((Character) value).charValue()); } throw new ELException(LocalMessages.get("error.coerce.type", value.getClass(), Long.class)); } protected Integer coerceToInteger(Object value) { if (value == null || "".equals(value)) { return Integer.valueOf(0); } if (value instanceof Integer) { return (Integer) value; } if (value instanceof Number) { return Integer.valueOf(((Number) value).intValue()); } if (value instanceof String) { try { return Integer.valueOf((String) value); } catch (NumberFormatException e) { throw new ELException(LocalMessages.get("error.coerce.value", value, Integer.class)); } } if (value instanceof Character) { return Integer.valueOf((short) ((Character) value).charValue()); } throw new ELException(LocalMessages.get("error.coerce.type", value.getClass(), Integer.class)); } protected Short coerceToShort(Object value) { if (value == null || "".equals(value)) { return Short.valueOf((short) 0); } if (value instanceof Short) { return (Short) value; } if (value instanceof Number) { return Short.valueOf(((Number) value).shortValue()); } if (value instanceof String) { try { return Short.valueOf((String) value); } catch (NumberFormatException e) { throw new ELException(LocalMessages.get("error.coerce.value", value, Short.class)); } } if (value instanceof Character) { return Short.valueOf((short) ((Character) value).charValue()); } throw new ELException(LocalMessages.get("error.coerce.type", value.getClass(), Short.class)); } protected Byte coerceToByte(Object value) { if (value == null || "".equals(value)) { return Byte.valueOf((byte) 0); } if (value instanceof Byte) { return (Byte) value; } if (value instanceof Number) { return Byte.valueOf(((Number) value).byteValue()); } if (value instanceof String) { try { return Byte.valueOf((String) value); } catch (NumberFormatException e) { throw new ELException(LocalMessages.get("error.coerce.value", value, Byte.class)); } } if (value instanceof Character) { return Byte.valueOf(Short.valueOf((short) ((Character) value).charValue()).byteValue()); } throw new ELException(LocalMessages.get("error.coerce.type", value.getClass(), Byte.class)); } protected String coerceToString(Object value) { if (value == null) { return ""; } if (value instanceof String) { return (String) value; } if (value instanceof Enum<?>) { return ((Enum<?>) value).name(); } if (value instanceof Date) { DateTimeFormatter fmt = ISODateTimeFormat.dateTime(); DateTime dt = new DateTime(value); return fmt.print(dt); } return value.toString(); } @SuppressWarnings("unchecked") protected <T extends Enum<T>> T coerceToEnum(Object value, Class<T> type) { if (value == null || "".equals(value)) { return null; } if (type.isInstance(value)) { return (T) value; } if (value instanceof String) { try { return Enum.valueOf(type, (String) value); } catch (IllegalArgumentException e) { throw new ELException(LocalMessages.get("error.coerce.value", value, type)); } } throw new ELException(LocalMessages.get("error.coerce.type", value.getClass(), type)); } protected Object coerceStringToType(String value, Class<?> type) { PropertyEditor editor = PropertyEditorManager.findEditor(type); if (editor == null) { if ("".equals(value)) { return null; } throw new ELException(LocalMessages.get("error.coerce.type", String.class, type)); } else { if ("".equals(value)) { try { editor.setAsText(value); } catch (IllegalArgumentException e) { return null; } } else { try { editor.setAsText(value); } catch (IllegalArgumentException e) { throw new ELException(LocalMessages.get("error.coerce.value", value, type)); } } return editor.getValue(); } } @SuppressWarnings("unchecked") protected Object coerceToType(Object value, Class<?> type) { if (type == String.class) { return coerceToString(value); } if (type == Long.class || type == long.class) { return coerceToLong(value); } if (type == Double.class || type == double.class) { return coerceToDouble(value); } if (type == Boolean.class || type == boolean.class) { return coerceToBoolean(value); } if (type == Integer.class || type == int.class) { return coerceToInteger(value); } if (type == Float.class || type == float.class) { return coerceToFloat(value); } if (type == Short.class || type == short.class) { return coerceToShort(value); } if (type == Byte.class || type == byte.class) { return coerceToByte(value); } if (type == Character.class || type == char.class) { return coerceToCharacter(value); } if (type == BigDecimal.class) { return coerceToBigDecimal(value); } if (type == BigInteger.class) { return coerceToBigInteger(value); } if (type.getSuperclass() == Enum.class) { return coerceToEnum(value, (Class<? extends Enum>) type); } if (value == null || value.getClass() == type || type.isInstance(value)) { return value; } if (value instanceof String) { return coerceStringToType((String) value, type); } throw new ELException(LocalMessages.get("error.coerce.type", value.getClass(), type)); } @Override public boolean equals(Object obj) { return obj != null && obj.getClass().equals(getClass()); } @Override public int hashCode() { return getClass().hashCode(); } @Override @SuppressWarnings("unchecked") public <T> T convert(Object value, Class<T> type) throws ELException { return (T) coerceToType(value, type); } }
apache-2.0
zstackorg/zstack
utils/src/test/java/org/zstack/utils/test/TestArrayHelper.java
673
package org.zstack.utils.test; import org.junit.Test; import org.zstack.utils.data.ArrayHelper; import java.util.ArrayList; import java.util.List; public class TestArrayHelper { class Boy { String name; int age; } @Test public void test() { List<Boy> boys = new ArrayList<Boy>(10); for (int i=0; i<10; i++) { Boy b = new Boy(); b.name = "Boy-" + i; b.age = i; boys.add(b); } String[] names = ArrayHelper.arrayFromField(boys, "name", String.class); for (int i=0; i<names.length; i++) { System.out.println(names[i]); } } }
apache-2.0
illiya/PDT-19
addressbook-selenium-tests/src/com/example/tests/ContactData.java
829
package com.example.tests; public class ContactData { public String firstname; public String lastname; public String address; public String mobtelefon; public String email; public String date; public String month; public String year; public String groupname; public String address2; public String home2; public ContactData() {} public ContactData(String firstname, String lastname, String address, String mobtelefon, String email, String date, String month, String year, String groupname, String address2, String home2) { this.firstname = firstname; this.lastname = lastname; this.address = address; this.mobtelefon = mobtelefon; this.email = email; this.date = date; this.month = month; this.year = year; this.groupname = groupname; this.address2 = address2; this.home2 = home2; } }
apache-2.0
pwachira/droolsexamples
drools-examples/src/main/java/org/drools/examples/cashflow/CashFlowType.java
87
package org.drools.examples.cashflow; public enum CashFlowType { CREDIT, DEBIT; }
apache-2.0
zwets/flowable-engine
modules/flowable-engine/src/main/java/org/flowable/engine/delegate/event/impl/FlowableActivityEventImpl.java
1985
/* Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.flowable.engine.delegate.event.impl; import org.flowable.engine.common.api.delegate.event.FlowableEngineEventType; import org.flowable.engine.delegate.event.FlowableActivityEvent; /** * Implementation of an {@link FlowableActivityEvent}. * * @author Frederik Heremans * @author Joram Barrez */ public class FlowableActivityEventImpl extends FlowableProcessEventImpl implements FlowableActivityEvent { protected String activityId; protected String activityName; protected String activityType; protected String behaviorClass; public FlowableActivityEventImpl(FlowableEngineEventType type) { super(type); } @Override public String getActivityId() { return activityId; } public void setActivityId(String activityId) { this.activityId = activityId; } @Override public String getActivityName() { return activityName; } public void setActivityName(String activityName) { this.activityName = activityName; } @Override public String getActivityType() { return activityType; } public void setActivityType(String activityType) { this.activityType = activityType; } @Override public String getBehaviorClass() { return behaviorClass; } public void setBehaviorClass(String behaviorClass) { this.behaviorClass = behaviorClass; } }
apache-2.0
saandrews/pulsar
pulsar-websocket/src/main/java/org/apache/pulsar/websocket/stats/ProxyStats.java
6708
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.pulsar.websocket.stats; import static org.apache.pulsar.websocket.ProducerHandler.ENTRY_LATENCY_BUCKETS_USEC; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.stats.Metrics; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.apache.pulsar.websocket.WebSocketService; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * It periodically generates stats metrics of proxy service, * */ public class ProxyStats { private final WebSocketService service; private final JvmMetrics jvmMetrics; private ConcurrentOpenHashMap<String, ProxyNamespaceStats> topicStats; private List<Metrics> metricsCollection; public ProxyStats(WebSocketService service) { super(); this.service = service; this.jvmMetrics = new JvmMetrics(service); this.topicStats = new ConcurrentOpenHashMap<>(); this.metricsCollection = Lists.newArrayList(); // schedule stat generation task every 1 minute service.getExecutor().scheduleAtFixedRate(() -> generate(), 120, 60, TimeUnit.SECONDS); } /** * generates stats-metrics of proxy service and updates metricsCollection cache with latest stats. */ public synchronized void generate() { if (log.isDebugEnabled()) { log.debug("Start generating proxy metrics"); } topicStats.clear(); service.getProducers().forEach((topic, handlers) -> { if (log.isDebugEnabled()) { log.debug("Collect stats from {} producer handlers for topic {}", handlers.size(), topic); } final String namespaceName = TopicName.get(topic).getNamespace(); ProxyNamespaceStats nsStat = topicStats.computeIfAbsent(namespaceName, ns -> new ProxyNamespaceStats()); handlers.forEach(handler -> { nsStat.numberOfMsgPublished += handler.getAndResetNumMsgsSent(); nsStat.numberOfBytesPublished += handler.getAndResetNumBytesSent(); nsStat.numberOfPublishFailure += handler.getAndResetNumMsgsFailed(); handler.getPublishLatencyStatsUSec().refresh(); nsStat.publishMsgLatency.addAll(handler.getPublishLatencyStatsUSec()); }); }); service.getConsumers().forEach((topic, handlers) -> { if (log.isDebugEnabled()) { log.debug("Collect stats from {} consumer handlers for topic {}", handlers.size(), topic); } final String namespaceName = TopicName.get(topic).getNamespace(); ProxyNamespaceStats nsStat = topicStats.computeIfAbsent(namespaceName, ns -> new ProxyNamespaceStats()); handlers.forEach(handler -> { nsStat.numberOfMsgDelivered += handler.getAndResetNumMsgsAcked(); nsStat.numberOfBytesDelivered += handler.getAndResetNumBytesDelivered(); nsStat.numberOfMsgsAcked += handler.getAndResetNumMsgsAcked(); }); }); List<Metrics> tempMetricsCollection = Lists.newArrayList(); topicStats.forEach((namespace, stats) -> { if (log.isDebugEnabled()) { log.debug("Add ns-stats of namespace {} to metrics", namespace); } tempMetricsCollection.add(stats.add(namespace)); }); // add jvm-metrics if (log.isDebugEnabled()) { log.debug("Add jvm-stats to metrics"); } tempMetricsCollection.add(jvmMetrics.generate()); // swap tempmetrics to stat-metrics List<Metrics> tempRef = metricsCollection; metricsCollection = tempMetricsCollection; tempRef.clear(); if (log.isDebugEnabled()) { log.debug("Complete generating proxy metrics"); } } public synchronized List<Metrics> getMetrics() { return metricsCollection; } private static class ProxyNamespaceStats { public long numberOfMsgPublished; public long numberOfBytesPublished; public long numberOfPublishFailure; public StatsBuckets publishMsgLatency; public long numberOfMsgDelivered; public long numberOfBytesDelivered; public long numberOfMsgsAcked; public ProxyNamespaceStats() { this.publishMsgLatency = new StatsBuckets(ENTRY_LATENCY_BUCKETS_USEC); } public Metrics add(String namespace) { publishMsgLatency.refresh(); long[] latencyBuckets = publishMsgLatency.getBuckets(); Map<String, String> dimensionMap = Maps.newHashMap(); dimensionMap.put("namespace", namespace); Metrics dMetrics = Metrics.create(dimensionMap); dMetrics.put("ns_msg_publish_rate", numberOfMsgPublished); dMetrics.put("ns_byte_publish_rate", numberOfBytesPublished); dMetrics.put("ns_msg_failure_rate", numberOfPublishFailure); dMetrics.put("ns_msg_deliver_rate", numberOfMsgDelivered); dMetrics.put("ns_byte_deliver_rate", numberOfBytesDelivered); dMetrics.put("ns_msg_ack_rate", numberOfMsgsAcked); for (int i = 0; i < latencyBuckets.length; i++) { final String latencyBucket = i >= ENTRY_LATENCY_BUCKETS_USEC.length ? ENTRY_LATENCY_BUCKETS_USEC[ENTRY_LATENCY_BUCKETS_USEC.length-1] + "_higher" : Long.toString(ENTRY_LATENCY_BUCKETS_USEC[i]); dMetrics.put("ns_msg_publish_latency_" + latencyBucket, latencyBuckets[i]); } return dMetrics; } } private static final Logger log = LoggerFactory.getLogger(ProxyStats.class); }
apache-2.0
kanivel/android-mvc-framework
src/com/android_mvc/sample_project/domain/DBDeleteAction.java
1763
package com.android_mvc.sample_project.domain; import android.app.Activity; import com.android_mvc.sample_project.activities.func_db.DBListActivity; import com.android_mvc.sample_project.db.dao.FriendDAO; import com.android_mvc.sample_project.db.entity.Friend; import com.android_mvc.framework.controller.action.ActionResult; import com.android_mvc.framework.controller.action.BaseAction; import com.android_mvc.framework.ui.UIUtil; /** * DBからの削除に関するBL。 * @author id:language_and_engineering * */ public class DBDeleteAction extends BaseAction { private DBListActivity activity; private Long friend_id; public DBDeleteAction(DBListActivity activity, Long friend_id) { this.activity = activity; this.friend_id = friend_id; } // BL本体 @Override public ActionResult exec() { Friend f = new FriendDAO(activity).findById(friend_id); String target_friend_name = f.getName(); // DBからの削除を実行 new FriendDAO(activity).deleteById(friend_id); // 実行結果を返す DBDeleteActionResult ares = new DBDeleteActionResult(); ares.setRouteId("success"); ares.friend_name = target_friend_name; // 名前だけ控えておく return ares; } // 実行結果オブジェクト static class DBDeleteActionResult extends ActionResult { private static final long serialVersionUID = 1L; protected String friend_name; @Override public void onNextActivityStarted(Activity activity) { UIUtil.longToast(activity, friend_name + "さんを削除しました。"); } } }
apache-2.0
apache/jmeter
src/core/src/main/java/org/apache/jmeter/gui/GUIFactory.java
6283
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to you under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jmeter.gui; import java.lang.reflect.InvocationTargetException; import java.util.HashMap; import java.util.Map; import javax.swing.ImageIcon; import javax.swing.JComponent; import org.apache.jmeter.testbeans.gui.TestBeanGUI; /** * Provides a way to register and retrieve GUI classes and icons. * */ public final class GUIFactory { /** A Map from String to JMeterGUIComponent of registered GUI classes. */ private static final Map<String, JMeterGUIComponent> GUI_MAP = new HashMap<>(); /** A Map from String to ImageIcon of registered icons. */ private static final Map<String, ImageIcon> ICON_MAP = new HashMap<>(); /** A Map from String to ImageIcon of registered icons. */ private static final Map<String, ImageIcon> DISABLED_ICON_MAP = new HashMap<>(); /** * Prevent instantiation since this is a static utility class. */ private GUIFactory() { } /** * Get an icon which has previously been registered for this class object. * * @param elementClass * the class object which we want to get an icon for * * @return the associated icon, or null if this class or its superclass has * not been registered */ public static ImageIcon getIcon(Class<?> elementClass) { return getIcon(elementClass, true); } /** * Get icon/disabledicon which has previously been registered for this class * object. * * @param elementClass * the class object which we want to get an icon for * @param enabled - * is icon enabled * * @return the associated icon, or null if this class or its superclass has * not been registered */ public static ImageIcon getIcon(Class<?> elementClass, boolean enabled) { String key = elementClass.getName(); ImageIcon icon = enabled ? ICON_MAP.get(key) : DISABLED_ICON_MAP.get(key); if (icon != null) { return icon; } if (elementClass.getSuperclass() != null) { return getIcon(elementClass.getSuperclass(), enabled); } return null; } /** * Get a component instance which has previously been registered for this * class object. * * @param elementClass * the class object which we want to get an instance of * * @return an instance of the class, or null if this class or its superclass * has not been registered */ public static JComponent getGUI(Class<?> elementClass) { // TODO: This method doesn't appear to be used. String key = elementClass.getName(); JComponent gui = (JComponent) GUI_MAP.get(key); if (gui != null) { return gui; } if (elementClass.getSuperclass() != null) { return getGUI(elementClass.getSuperclass()); } return null; } /** * Register an icon so that it can later be retrieved via * {@link #getIcon(Class)}. The key should match the fully-qualified class * name for the class used as the parameter when retrieving the icon. * * @param key * the name which can be used to retrieve this icon later * @param icon * the icon to store */ public static void registerIcon(String key, ImageIcon icon) { ICON_MAP.put(key, icon); } /** * Register an icon so that it can later be retrieved via * {@link #getIcon(Class)}. The key should match the fully-qualified class * name for the class used as the parameter when retrieving the icon. * * @param key * the name which can be used to retrieve this icon later * @param icon * the icon to store */ public static void registerDisabledIcon(String key, ImageIcon icon) { DISABLED_ICON_MAP.put(key, icon); } /** * Register a GUI class so that it can later be retrieved via * {@link #getGUI(Class)}. The key should match the fully-qualified class * name for the class used as the parameter when retrieving the GUI. * * @param key * the name which can be used to retrieve this GUI later * @param guiClass * the class object for the GUI component * @param testClass * the class of the objects edited by this GUI * * @throws InstantiationException * if an instance of the GUI class can not be instantiated * @throws IllegalAccessException * if access rights do not permit an instance of the GUI class * to be created * @throws NoSuchMethodException * when no constructor can be found on the given {@code guiClass} * @throws InvocationTargetException * when the called constructor throws an exception */ public static void registerGUI(String key, Class<?> guiClass, Class<?> testClass) throws InstantiationException, IllegalAccessException, NoSuchMethodException, InvocationTargetException { // TODO: This method doesn't appear to be used. JMeterGUIComponent gui; if (guiClass == TestBeanGUI.class) { gui = new TestBeanGUI(testClass); } else { gui = (JMeterGUIComponent) guiClass.getDeclaredConstructor().newInstance(); } GUI_MAP.put(key, gui); } }
apache-2.0
phambryan/dropwizard
dropwizard-migrations/src/main/java/io/dropwizard/migrations/DbRollbackCommand.java
3791
package io.dropwizard.migrations; import io.dropwizard.Configuration; import io.dropwizard.db.DatabaseConfiguration; import liquibase.Liquibase; import net.sourceforge.argparse4j.impl.Arguments; import net.sourceforge.argparse4j.inf.Namespace; import net.sourceforge.argparse4j.inf.Subparser; import java.io.OutputStreamWriter; import java.io.PrintStream; import java.nio.charset.StandardCharsets; import java.util.Date; import java.util.List; import java.util.Objects; import java.util.stream.Collectors; import java.util.stream.Stream; public class DbRollbackCommand<T extends Configuration> extends AbstractLiquibaseCommand<T> { private PrintStream outputStream = System.out; public DbRollbackCommand(DatabaseConfiguration<T> strategy, Class<T> configurationClass, String migrationsFileName) { super("rollback", "Rollback the database schema to a previous version.", strategy, configurationClass, migrationsFileName); } void setOutputStream(PrintStream outputStream) { this.outputStream = outputStream; } @Override public void configure(Subparser subparser) { super.configure(subparser); subparser.addArgument("-n", "--dry-run") .action(Arguments.storeTrue()) .dest("dry-run") .setDefault(Boolean.FALSE) .help("Output the DDL to stdout, don't run it"); subparser.addArgument("-t", "--tag").dest("tag").help("Rollback to the given tag"); subparser.addArgument("-d", "--date") .dest("date") .type(Date.class) .help("Rollback to the given date"); subparser.addArgument("-c", "--count") .dest("count") .type(Integer.class) .help("Rollback the specified number of change sets"); subparser.addArgument("-i", "--include") .action(Arguments.append()) .dest("contexts") .help("include change sets from the given context"); } @Override public void run(Namespace namespace, Liquibase liquibase) throws Exception { final String tag = namespace.getString("tag"); final Integer count = namespace.getInt("count"); final Date date = namespace.get("date"); final boolean dryRun = namespace.getBoolean("dry-run") != null && namespace.getBoolean("dry-run"); final String context = getContext(namespace); if (Stream.of(tag, count, date).filter(Objects::nonNull).count() != 1) { throw new IllegalArgumentException("Must specify either a count, a tag, or a date."); } if (count != null) { if (dryRun) { liquibase.rollback(count, context, new OutputStreamWriter(outputStream, StandardCharsets.UTF_8)); } else { liquibase.rollback(count, context); } } else if (tag != null) { if (dryRun) { liquibase.rollback(tag, context, new OutputStreamWriter(outputStream, StandardCharsets.UTF_8)); } else { liquibase.rollback(tag, context); } } else { if (dryRun) { liquibase.rollback(date, context, new OutputStreamWriter(outputStream, StandardCharsets.UTF_8)); } else { liquibase.rollback(date, context); } } } private String getContext(Namespace namespace) { final List<Object> contexts = namespace.getList("contexts"); if (contexts == null) { return ""; } return contexts.stream() .map(Object::toString) .collect(Collectors.joining(",")); } }
apache-2.0
struberg/deltaspike
deltaspike/modules/data/impl/src/main/java/org/apache/deltaspike/data/impl/param/Parameters.java
6133
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.deltaspike.data.impl.param; import java.lang.annotation.Annotation; import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.logging.Level; import java.util.logging.Logger; import javax.persistence.Query; import org.apache.deltaspike.data.api.FirstResult; import org.apache.deltaspike.data.api.MaxResults; import org.apache.deltaspike.data.api.QueryParam; import org.apache.deltaspike.data.api.mapping.QueryInOutMapper; import org.apache.deltaspike.data.impl.meta.RepositoryMethodMetadata; /** * Convenience class to manage method and query parameters. */ public final class Parameters { private static final Logger LOG = Logger.getLogger(Parameters.class.getName()); private static final int DEFAULT_MAX = 0; private static final int DEFAULT_FIRST = -1; private final List<Parameter> parameterList; private final int max; private final int firstResult; private Parameters(List<Parameter> parameters, int max, int firstResult) { this.parameterList = parameters; this.max = max; this.firstResult = firstResult; } public static Parameters createEmpty() { List<Parameter> empty = Collections.emptyList(); return new Parameters(empty, DEFAULT_MAX, DEFAULT_FIRST); } public static Parameters create(Method method, Object[] parameters, RepositoryMethodMetadata repositoryMethod) { int max = extractSizeRestriction(method, repositoryMethod); int first = DEFAULT_FIRST; List<Parameter> result = new ArrayList<Parameter>(parameters.length); int paramIndex = 1; Annotation[][] annotations = method.getParameterAnnotations(); for (int i = 0; i < parameters.length; i++) { if (isParameter(method.getParameterAnnotations()[i])) { QueryParam qpAnnotation = extractFrom(annotations[i], QueryParam.class); if (qpAnnotation != null) { result.add(new NamedParameter(qpAnnotation.value(), parameters[i])); } else { result.add(new IndexedParameter(paramIndex++, parameters[i])); } } else { max = extractInt(parameters[i], annotations[i], MaxResults.class, max); first = extractInt(parameters[i], annotations[i], FirstResult.class, first); } } return new Parameters(result, max, first); } public void applyMapper(QueryInOutMapper<?> mapper) { for (Parameter param : parameterList) { param.applyMapper(mapper); } } public void updateValues(List<ParameterUpdate> updates) { for (ParameterUpdate update : updates) { for (Parameter param : parameterList) { if (param.is(update.forParamWithId())) { param.updateValue(update.newParamValue(param.queryValue())); } } } } public Query applyTo(Query query) { for (Parameter param : parameterList) { param.apply(query); } return query; } public boolean hasSizeRestriction() { return max > DEFAULT_MAX; } public int getSizeRestriciton() { return max; } public boolean hasFirstResult() { return firstResult > DEFAULT_FIRST; } public int getFirstResult() { return firstResult; } private static int extractSizeRestriction(Method method, RepositoryMethodMetadata repositoryMethod) { if (repositoryMethod.getQuery() != null) { return repositoryMethod.getQuery().max(); } return repositoryMethod.getMethodPrefix().getDefinedMaxResults(); } @SuppressWarnings("unchecked") private static <A extends Annotation> A extractFrom(Annotation[] annotations, Class<A> target) { for (Annotation annotation : annotations) { if (annotation.annotationType().isAssignableFrom(target)) { return (A) annotation; } } return null; } private static <A extends Annotation> int extractInt(Object parameter, Annotation[] annotations, Class<A> target, int defaultVal) { if (parameter != null) { A result = extractFrom(annotations, target); if (result != null) { if (parameter instanceof Integer) { return (Integer) parameter; } else { LOG.log(Level.WARNING, "Method parameter extraction: " + "Param type must be int: {0}->is:{1}", new Object[] { target, parameter.getClass() }); } } } return defaultVal; } private static boolean isParameter(Annotation[] annotations) { return extractFrom(annotations, MaxResults.class) == null && extractFrom(annotations, FirstResult.class) == null; } }
apache-2.0
thiagoolsilva/automation-lecture
code/BasicProject-Robotium/app/src/main/java/lopes/br/basicproject/ui/HomeActivity.java
2036
/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * */ package lopes.br.basicproject.ui; import android.content.Intent; import android.os.Bundle; import android.support.annotation.NonNull; import android.support.annotation.Nullable; import android.support.v4.app.Fragment; import lopes.br.basicproject.R; import lopes.br.basicproject.model.Place; import lopes.br.basicproject.ui.fragment.HomeFragment; public class HomeActivity extends BaseActivity implements HomeFragment.ClickCallback { @Override protected void onCreate(@Nullable Bundle savedInstanceState) { super.onCreate(savedInstanceState); // Configuring the toolbar toolbarTxt.setText(getString(R.string.toolbar_home)); } @Override public Fragment loadFragment() { return HomeFragment.newInstance(); } @Override public void itemSelected(@NonNull Place placeSelected) { if (placeSelected != null) { callDetailsActivity(placeSelected); } } private void callDetailsActivity(@NonNull Place placeSelected) { Intent callDetailActivity = new Intent(this, DetailsActivity.class); callDetailActivity.putExtra(DetailsActivity.EXTRA_PLACE, placeSelected); startActivity(callDetailActivity); } }
apache-2.0
hurricup/intellij-community
java/java-psi-impl/src/com/intellij/lang/java/lexer/JavaLexer.java
8283
/* * Copyright 2000-2016 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.lang.java.lexer; import com.intellij.lexer.LexerBase; import com.intellij.pom.java.LanguageLevel; import com.intellij.psi.JavaTokenType; import com.intellij.psi.TokenType; import com.intellij.psi.impl.source.tree.JavaDocElementType; import com.intellij.psi.tree.IElementType; import com.intellij.util.containers.ContainerUtil; import com.intellij.util.text.CharArrayUtil; import org.jetbrains.annotations.NotNull; import java.io.IOException; import java.util.Set; import static com.intellij.psi.PsiKeyword.*; public class JavaLexer extends LexerBase { private static final Set<String> KEYWORDS = ContainerUtil.newTroveSet( ABSTRACT, BOOLEAN, BREAK, BYTE, CASE, CATCH, CHAR, CLASS, CONST, CONTINUE, DEFAULT, DO, DOUBLE, ELSE, EXTENDS, FINAL, FINALLY, FLOAT, FOR, GOTO, IF, IMPLEMENTS, IMPORT, INSTANCEOF, INT, INTERFACE, LONG, NATIVE, NEW, PACKAGE, PRIVATE, PROTECTED, PUBLIC, RETURN, SHORT, STATIC, STRICTFP, SUPER, SWITCH, SYNCHRONIZED, THIS, THROW, THROWS, TRANSIENT, TRY, VOID, VOLATILE, WHILE, TRUE, FALSE, NULL); private static final Set<String> JAVA9_KEYWORDS = ContainerUtil.newTroveSet( MODULE, REQUIRES, EXPORTS, USES, PROVIDES, TO, WITH); public static boolean isKeyword(String id, @NotNull LanguageLevel level) { return KEYWORDS.contains(id) || level.isAtLeast(LanguageLevel.JDK_1_4) && ASSERT.equals(id) || level.isAtLeast(LanguageLevel.JDK_1_5) && ENUM.equals(id); } public static boolean isSoftKeyword(String id, @NotNull LanguageLevel level) { return level.isAtLeast(LanguageLevel.JDK_1_9) && JAVA9_KEYWORDS.contains(id); } private final _JavaLexer myFlexLexer; private CharSequence myBuffer; private char[] myBufferArray; private int myBufferIndex; private int myBufferEndOffset; private int myTokenEndOffset; // positioned after the last symbol of the current token private IElementType myTokenType; public JavaLexer(@NotNull LanguageLevel level) { myFlexLexer = new _JavaLexer(level); } @Override public final void start(@NotNull CharSequence buffer, int startOffset, int endOffset, int initialState) { myBuffer = buffer; myBufferArray = CharArrayUtil.fromSequenceWithoutCopying(buffer); myBufferIndex = startOffset; myBufferEndOffset = endOffset; myTokenType = null; myTokenEndOffset = startOffset; myFlexLexer.reset(myBuffer, startOffset, endOffset, 0); } @Override public int getState() { return 0; } @Override public final IElementType getTokenType() { if (myTokenType == null) _locateToken(); return myTokenType; } @Override public final int getTokenStart() { return myBufferIndex; } @Override public final int getTokenEnd() { if (myTokenType == null) _locateToken(); return myTokenEndOffset; } @Override public final void advance() { if (myTokenType == null) _locateToken(); myTokenType = null; } private void _locateToken() { if (myTokenEndOffset == myBufferEndOffset) { myTokenType = null; myBufferIndex = myBufferEndOffset; return; } myBufferIndex = myTokenEndOffset; char c = myBufferArray != null ? myBufferArray[myBufferIndex] : myBuffer.charAt(myBufferIndex); switch (c) { case ' ': case '\t': case '\n': case '\r': case '\f': myTokenType = TokenType.WHITE_SPACE; myTokenEndOffset = getWhitespaces(myBufferIndex + 1); break; case '/': if (myBufferIndex + 1 >= myBufferEndOffset) { myTokenType = JavaTokenType.DIV; myTokenEndOffset = myBufferEndOffset; } else { char nextChar = myBufferArray != null ? myBufferArray[myBufferIndex + 1] : myBuffer.charAt(myBufferIndex + 1); if (nextChar == '/') { myTokenType = JavaTokenType.END_OF_LINE_COMMENT; myTokenEndOffset = getLineTerminator(myBufferIndex + 2); } else if (nextChar == '*') { if (myBufferIndex + 2 >= myBufferEndOffset || (myBufferArray != null ? myBufferArray[myBufferIndex + 2] : myBuffer.charAt(myBufferIndex + 2)) != '*' || (myBufferIndex + 3 < myBufferEndOffset && (myBufferArray != null ? myBufferArray[myBufferIndex + 3] : myBuffer.charAt(myBufferIndex + 3)) == '/')) { myTokenType = JavaTokenType.C_STYLE_COMMENT; myTokenEndOffset = getClosingComment(myBufferIndex + 2); } else { myTokenType = JavaDocElementType.DOC_COMMENT; myTokenEndOffset = getClosingComment(myBufferIndex + 3); } } else { flexLocateToken(); } } break; case '"': case '\'': myTokenType = c == '"' ? JavaTokenType.STRING_LITERAL : JavaTokenType.CHARACTER_LITERAL; myTokenEndOffset = getClosingParenthesis(myBufferIndex + 1, c); break; default: flexLocateToken(); } if (myTokenEndOffset > myBufferEndOffset) { myTokenEndOffset = myBufferEndOffset; } } private int getWhitespaces(int offset) { if (offset >= myBufferEndOffset) { return myBufferEndOffset; } int pos = offset; char c = myBufferArray != null ? myBufferArray[pos] : myBuffer.charAt(pos); while (c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f') { pos++; if (pos == myBufferEndOffset) return pos; c = myBufferArray != null ? myBufferArray[pos] : myBuffer.charAt(pos); } return pos; } private void flexLocateToken() { try { myFlexLexer.goTo(myBufferIndex); myTokenType = myFlexLexer.advance(); myTokenEndOffset = myFlexLexer.getTokenEnd(); } catch (IOException e) { /* impossible */ } } private int getClosingParenthesis(int offset, char c) { if (offset >= myBufferEndOffset) { return myBufferEndOffset; } int pos = offset; char cur = myBufferArray != null ? myBufferArray[pos] : myBuffer.charAt(pos); while (true) { while (cur != c && cur != '\n' && cur != '\r' && cur != '\\') { pos++; if (pos >= myBufferEndOffset) return myBufferEndOffset; cur = myBufferArray != null ? myBufferArray[pos] : myBuffer.charAt(pos); } if (cur == '\\') { pos++; if (pos >= myBufferEndOffset) return myBufferEndOffset; cur = myBufferArray != null ? myBufferArray[pos] : myBuffer.charAt(pos); if (cur == '\n' || cur == '\r') continue; pos++; if (pos >= myBufferEndOffset) return myBufferEndOffset; cur = myBufferArray != null ? myBufferArray[pos] : myBuffer.charAt(pos); } else if (cur == c) { break; } else { pos--; break; } } return pos + 1; } private int getClosingComment(int offset) { int pos = offset; while (pos < myBufferEndOffset - 1) { char c = myBufferArray != null ? myBufferArray[pos] : myBuffer.charAt(pos); if (c == '*' && (myBufferArray != null ? myBufferArray[pos + 1] : myBuffer.charAt(pos + 1)) == '/') { break; } pos++; } return pos + 2; } private int getLineTerminator(int offset) { int pos = offset; while (pos < myBufferEndOffset) { char c = myBufferArray != null ? myBufferArray[pos] : myBuffer.charAt(pos); if (c == '\r' || c == '\n') break; pos++; } return pos; } @NotNull @Override public CharSequence getBufferSequence() { return myBuffer; } @Override public final int getBufferEnd() { return myBufferEndOffset; } }
apache-2.0
googleapis/google-api-java-client-services
clients/google-api-services-speech/v2beta1/1.31.0/com/google/api/services/speech/v2beta1/model/SpeechRecognitionAlternative.java
5255
/* * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ /* * This code was generated by https://github.com/googleapis/google-api-java-client-services/ * Modify at your own risk. */ package com.google.api.services.speech.v2beta1.model; /** * Alternative hypotheses (a.k.a. n-best list). * * <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is * transmitted over HTTP when working with the Cloud Speech-to-Text API. For a detailed explanation * see: * <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a> * </p> * * @author Google, Inc. */ @SuppressWarnings("javadoc") public final class SpeechRecognitionAlternative extends com.google.api.client.json.GenericJson { /** * Output only. The confidence estimate between 0.0 and 1.0. A higher number indicates an * estimated greater likelihood that the recognized words are correct. This field is set only for * the top alternative of a non-streaming result or, of a streaming result where `is_final=true`. * This field is not guaranteed to be accurate and users should not rely on it to be always * provided. The default of 0.0 is a sentinel value indicating `confidence` was not set. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.Float confidence; /** * Output only. Transcript text representing the words that the user spoke. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String transcript; /** * Output only. A list of word-specific information for each recognized word. Note: When * `enable_speaker_diarization` is true, you will see all the words from the beginning of the * audio. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.util.List<WordInfo> words; /** * Output only. The confidence estimate between 0.0 and 1.0. A higher number indicates an * estimated greater likelihood that the recognized words are correct. This field is set only for * the top alternative of a non-streaming result or, of a streaming result where `is_final=true`. * This field is not guaranteed to be accurate and users should not rely on it to be always * provided. The default of 0.0 is a sentinel value indicating `confidence` was not set. * @return value or {@code null} for none */ public java.lang.Float getConfidence() { return confidence; } /** * Output only. The confidence estimate between 0.0 and 1.0. A higher number indicates an * estimated greater likelihood that the recognized words are correct. This field is set only for * the top alternative of a non-streaming result or, of a streaming result where `is_final=true`. * This field is not guaranteed to be accurate and users should not rely on it to be always * provided. The default of 0.0 is a sentinel value indicating `confidence` was not set. * @param confidence confidence or {@code null} for none */ public SpeechRecognitionAlternative setConfidence(java.lang.Float confidence) { this.confidence = confidence; return this; } /** * Output only. Transcript text representing the words that the user spoke. * @return value or {@code null} for none */ public java.lang.String getTranscript() { return transcript; } /** * Output only. Transcript text representing the words that the user spoke. * @param transcript transcript or {@code null} for none */ public SpeechRecognitionAlternative setTranscript(java.lang.String transcript) { this.transcript = transcript; return this; } /** * Output only. A list of word-specific information for each recognized word. Note: When * `enable_speaker_diarization` is true, you will see all the words from the beginning of the * audio. * @return value or {@code null} for none */ public java.util.List<WordInfo> getWords() { return words; } /** * Output only. A list of word-specific information for each recognized word. Note: When * `enable_speaker_diarization` is true, you will see all the words from the beginning of the * audio. * @param words words or {@code null} for none */ public SpeechRecognitionAlternative setWords(java.util.List<WordInfo> words) { this.words = words; return this; } @Override public SpeechRecognitionAlternative set(String fieldName, Object value) { return (SpeechRecognitionAlternative) super.set(fieldName, value); } @Override public SpeechRecognitionAlternative clone() { return (SpeechRecognitionAlternative) super.clone(); } }
apache-2.0
variacode/rundeck
core/src/main/java/com/dtolabs/rundeck/core/execution/workflow/WorkflowExecutionResult.java
1825
/* * Copyright 2016 SimplifyOps, Inc. (http://simplifyops.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * WorkflowExecutionResult.java * * User: Greg Schueler <a href="mailto:greg@dtosolutions.com">greg@dtosolutions.com</a> * Created: 3/23/11 2:06 PM * */ package com.dtolabs.rundeck.core.execution.workflow; import com.dtolabs.rundeck.core.execution.ExceptionStatusResult; import com.dtolabs.rundeck.core.execution.workflow.steps.StepExecutionResult; import java.util.Collection; import java.util.List; import java.util.Map; /** * WorkflowExecutionResult contains a list of workflow item results, indexed by workflow step number, and * node names to failure messages. * * * @author Greg Schueler <a href="mailto:greg@dtosolutions.com">greg@dtosolutions.com</a> */ public interface WorkflowExecutionResult extends ExceptionStatusResult, WorkflowStatusResult, WorkflowDataResult { /** * @return list of step results */ public List<StepExecutionResult> getResultSet(); /** * @return map of workflow item failures, keyed by node name */ public Map<String, Collection<StepExecutionResult>> getNodeFailures(); /** * @return map of workflow item failures, keyed by node name */ public Map<Integer, StepExecutionResult> getStepFailures(); }
apache-2.0
meruvian/yama-labs
websocket/webapi/src/main/java/org/meruvian/yama/cs/webapi/config/EmailConfig.java
1273
package org.meruvian.yama.cs.webapi.config; import org.apache.commons.mail.EmailException; import org.apache.commons.mail.HtmlEmail; import org.springframework.boot.bind.RelaxedPropertyResolver; import org.springframework.context.EnvironmentAware; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Scope; import org.springframework.core.env.Environment; @Configuration public class EmailConfig implements EnvironmentAware { private RelaxedPropertyResolver props; @Bean @Scope("prototype") public HtmlEmail email() throws EmailException { HtmlEmail email = new HtmlEmail(); email.setHostName(props.getProperty("host")); email.setSmtpPort(props.getProperty("port", Integer.class, 0)); email.setAuthentication(props.getProperty("username"), props.getProperty("password")); email.setFrom(props.getProperty("from_email"), props.getProperty("from_alias")); email.setSSLOnConnect(props.getProperty("ssl", Boolean.class, false)); email.setStartTLSEnabled(props.getProperty("tls", Boolean.class, false)); return email; } @Override public void setEnvironment(Environment env) { this.props = new RelaxedPropertyResolver(env, "email."); } }
apache-2.0
gingerwizard/elasticsearch
server/src/test/java/org/elasticsearch/cluster/ClusterInfoTests.java
4892
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.cluster; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; public class ClusterInfoTests extends ESTestCase { public void testSerialization() throws Exception { ClusterInfo clusterInfo = new ClusterInfo( randomDiskUsage(), randomDiskUsage(), randomShardSizes(), randomRoutingToDataPath(), randomReservedSpace()); BytesStreamOutput output = new BytesStreamOutput(); clusterInfo.writeTo(output); ClusterInfo result = new ClusterInfo(output.bytes().streamInput()); assertEquals(clusterInfo.getNodeLeastAvailableDiskUsages(), result.getNodeLeastAvailableDiskUsages()); assertEquals(clusterInfo.getNodeMostAvailableDiskUsages(), result.getNodeMostAvailableDiskUsages()); assertEquals(clusterInfo.shardSizes, result.shardSizes); assertEquals(clusterInfo.routingToDataPath, result.routingToDataPath); assertEquals(clusterInfo.reservedSpace, result.reservedSpace); } private static ImmutableOpenMap<String, DiskUsage> randomDiskUsage() { int numEntries = randomIntBetween(0, 128); ImmutableOpenMap.Builder<String, DiskUsage> builder = ImmutableOpenMap.builder(numEntries); for (int i = 0; i < numEntries; i++) { String key = randomAlphaOfLength(32); DiskUsage diskUsage = new DiskUsage( randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4), randomIntBetween(0, Integer.MAX_VALUE), randomIntBetween(0, Integer.MAX_VALUE) ); builder.put(key, diskUsage); } return builder.build(); } private static ImmutableOpenMap<String, Long> randomShardSizes() { int numEntries = randomIntBetween(0, 128); ImmutableOpenMap.Builder<String, Long> builder = ImmutableOpenMap.builder(numEntries); for (int i = 0; i < numEntries; i++) { String key = randomAlphaOfLength(32); long shardSize = randomIntBetween(0, Integer.MAX_VALUE); builder.put(key, shardSize); } return builder.build(); } private static ImmutableOpenMap<ShardRouting, String> randomRoutingToDataPath() { int numEntries = randomIntBetween(0, 128); ImmutableOpenMap.Builder<ShardRouting, String> builder = ImmutableOpenMap.builder(numEntries); for (int i = 0; i < numEntries; i++) { ShardId shardId = new ShardId(randomAlphaOfLength(32), randomAlphaOfLength(32), randomIntBetween(0, Integer.MAX_VALUE)); ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, null, randomBoolean(), ShardRoutingState.UNASSIGNED); builder.put(shardRouting, randomAlphaOfLength(32)); } return builder.build(); } private static ImmutableOpenMap<ClusterInfo.NodeAndPath, ClusterInfo.ReservedSpace> randomReservedSpace() { int numEntries = randomIntBetween(0, 128); ImmutableOpenMap.Builder<ClusterInfo.NodeAndPath, ClusterInfo.ReservedSpace> builder = ImmutableOpenMap.builder(numEntries); for (int i = 0; i < numEntries; i++) { final ClusterInfo.NodeAndPath key = new ClusterInfo.NodeAndPath(randomAlphaOfLength(10), randomAlphaOfLength(10)); final ClusterInfo.ReservedSpace.Builder valueBuilder = new ClusterInfo.ReservedSpace.Builder(); for (int j = between(0,10); j > 0; j--) { ShardId shardId = new ShardId(randomAlphaOfLength(32), randomAlphaOfLength(32), randomIntBetween(0, Integer.MAX_VALUE)); valueBuilder.add(shardId, between(0, Integer.MAX_VALUE)); } builder.put(key, valueBuilder.build()); } return builder.build(); } }
apache-2.0
NSAmelchev/ignite
modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/stat/IgniteStatisticsConfigurationManager.java
23855
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.internal.processors.query.stat; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Set; import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.cluster.ClusterState; import org.apache.ignite.events.DiscoveryEvent; import org.apache.ignite.internal.NodeStoppingException; import org.apache.ignite.internal.events.DiscoveryCustomEvent; import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage; import org.apache.ignite.internal.managers.systemview.GridSystemViewManager; import org.apache.ignite.internal.managers.systemview.walker.StatisticsColumnConfigurationViewWalker; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.DynamicCacheChangeBatch; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; import org.apache.ignite.internal.processors.cluster.GridClusterStateProcessor; import org.apache.ignite.internal.processors.metastorage.DistributedMetaStorage; import org.apache.ignite.internal.processors.metastorage.DistributedMetastorageLifecycleListener; import org.apache.ignite.internal.processors.query.IgniteSQLException; import org.apache.ignite.internal.processors.query.QueryUtils; import org.apache.ignite.internal.processors.query.h2.SchemaManager; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; import org.apache.ignite.internal.processors.query.stat.config.StatisticsColumnConfiguration; import org.apache.ignite.internal.processors.query.stat.config.StatisticsObjectConfiguration; import org.apache.ignite.internal.processors.query.stat.view.ColumnConfigurationViewSupplier; import org.apache.ignite.internal.processors.subscription.GridInternalSubscriptionProcessor; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.thread.IgniteThreadPoolExecutor; import org.jetbrains.annotations.NotNull; /** * Holds statistic configuration objects at the distributed metastore * and match local statistics with target statistic configuration. */ public class IgniteStatisticsConfigurationManager { /** */ private static final String STAT_OBJ_PREFIX = "sql.statobj."; /** */ private static final String STAT_CFG_VIEW_NAME = "statistics.configuration"; /** */ private static final String STAT_CFG_VIEW_DESCRIPTION = "Statistics configuration"; /** Empty strings array. */ public static final String[] EMPTY_STRINGS = new String[0]; /** Schema manager. */ private final SchemaManager schemaMgr; /** Distributed metastore. */ private volatile DistributedMetaStorage distrMetaStorage; /** Statistic processor. */ private final StatisticsProcessor statProc; /** */ private final BusyExecutor mgmtBusyExecutor; /** Persistence enabled flag. */ private final boolean persistence; /** Logger. */ private final IgniteLogger log; /** Last ready topology version if {@code null} - used to skip updates of the distributed metastorage on start. */ private volatile AffinityTopologyVersion topVer; /** Cluster state processor. */ private final GridClusterStateProcessor cluster; /** Is server node flag. */ private final boolean isServerNode; /** Change statistics configuration listener to update particular object statistics. */ private final DistributedMetastorageLifecycleListener distrMetaStoreLsnr = new DistributedMetastorageLifecycleListener() { @Override public void onReadyForWrite(DistributedMetaStorage metastorage) { distrMetaStorage = metastorage; distrMetaStorage.listen( (metaKey) -> metaKey.startsWith(STAT_OBJ_PREFIX), (k, oldV, newV) -> { // Skip invoke on start node (see 'ReadableDistributedMetaStorage#listen' the second case) // The update statistics on start node is handled by 'scanAndCheckLocalStatistic' method // called on exchange done. if (topVer == null) return; mgmtBusyExecutor.execute(() -> { StatisticsObjectConfiguration newStatCfg = (StatisticsObjectConfiguration)newV; updateLocalStatistics(newStatCfg); }); } ); } }; /** * Constructor. * * @param schemaMgr Schema manager. * @param subscriptionProcessor Subscription processor. * @param sysViewMgr System view manager. * @param cluster Cluster state processor. * @param statProc Staitistics processor. * @param persistence Persistence enabled flag. * @param mgmtPool Statistics management pool * @param stopping Stopping state supplier. * @param logSupplier Log supplier. * @param isServerNode Server node flag. */ public IgniteStatisticsConfigurationManager( SchemaManager schemaMgr, GridInternalSubscriptionProcessor subscriptionProcessor, GridSystemViewManager sysViewMgr, GridClusterStateProcessor cluster, StatisticsProcessor statProc, boolean persistence, IgniteThreadPoolExecutor mgmtPool, Supplier<Boolean> stopping, Function<Class<?>, IgniteLogger> logSupplier, boolean isServerNode ) { this.schemaMgr = schemaMgr; log = logSupplier.apply(IgniteStatisticsConfigurationManager.class); this.persistence = persistence; this.mgmtBusyExecutor = new BusyExecutor("configuration", mgmtPool, stopping, logSupplier); this.statProc = statProc; this.cluster = cluster; this.isServerNode = isServerNode; subscriptionProcessor.registerDistributedMetastorageListener(distrMetaStoreLsnr); ColumnConfigurationViewSupplier colCfgViewSupplier = new ColumnConfigurationViewSupplier(this, logSupplier); sysViewMgr.registerFiltrableView(STAT_CFG_VIEW_NAME, STAT_CFG_VIEW_DESCRIPTION, new StatisticsColumnConfigurationViewWalker(), colCfgViewSupplier::columnConfigurationViewSupplier, Function.identity()); } /** * Update statistics after topology change, if necessary. * * @param fut Topology change future. */ public void afterTopologyUnlock(GridDhtPartitionsExchangeFuture fut) { topVer = fut.topologyVersion(); // Skip join/left client nodes. if (fut.exchangeType() != GridDhtPartitionsExchangeFuture.ExchangeType.ALL || (persistence && cluster.clusterState().lastState() != ClusterState.ACTIVE)) return; DiscoveryEvent evt = fut.firstEvent(); // Skip create/destroy caches. if (evt.type() == DiscoveryCustomEvent.EVT_DISCOVERY_CUSTOM_EVT) { DiscoveryCustomMessage msg = ((DiscoveryCustomEvent)evt).customMessage(); if (msg instanceof DynamicCacheChangeBatch) return; } mgmtBusyExecutor.execute(this::updateAllLocalStatistics); } /** Drop columns listener to clean its statistics configuration. */ private final BiConsumer<GridH2Table, List<String>> dropColsLsnr = new BiConsumer<GridH2Table, List<String>>() { /** * Drop statistics after columns dropped. * * @param tbl Table. * @param cols Dropped columns. */ @Override public void accept(GridH2Table tbl, List<String> cols) { assert !F.isEmpty(cols); dropStatistics(Collections.singletonList( new StatisticsTarget( tbl.identifier().schema(), tbl.getName(), cols.toArray(EMPTY_STRINGS) ) ), false); } }; /** Drop table listener to clear its statistics configuration. */ private final BiConsumer<String, String> dropTblLsnr = new BiConsumer<String, String>() { /** * Drop statistics after table dropped. * * @param schema Schema name. * @param name Table name. */ @Override public void accept(String schema, String name) { assert !F.isEmpty(schema) && !F.isEmpty(name) : schema + ":" + name; StatisticsKey key = new StatisticsKey(schema, name); try { StatisticsObjectConfiguration cfg = config(key); if (cfg != null && !F.isEmpty(cfg.columns())) dropStatistics(Collections.singletonList(new StatisticsTarget(schema, name)), false); } catch (Throwable e) { if (!X.hasCause(e, NodeStoppingException.class)) throw new IgniteSQLException("Error on drop statistics for dropped table [key=" + key + ']', e); } } }; /** * Pass all necessary parameters to schedule statistics key update. * * @param cfg Statistics object configuration to update statistics by. */ private void updateLocalStatistics(StatisticsObjectConfiguration cfg) { GridH2Table tbl = schemaMgr.dataTable(cfg.key().schema(), cfg.key().obj()); if (tbl == null || cfg.columns().isEmpty()) { // Can be drop table event, need to ensure that there is no stored data left for this table. if (log.isDebugEnabled()) { if (tbl == null) log.debug("Can't find table by key " + cfg.key() + ". Check statistics empty."); else if (cfg == null) log.debug("Tombstone configuration by key " + cfg.key() + ". Check statistics empty."); } // Ensure to clean local metastorage. LocalStatisticsGatheringContext ctx = new LocalStatisticsGatheringContext(false, tbl, cfg, Collections.emptySet(), topVer); statProc.updateLocalStatistics(ctx); if (tbl == null && !cfg.columns().isEmpty()) { if (log.isDebugEnabled()) log.debug("Removing config for non existing object " + cfg.key()); dropStatistics(Collections.singletonList(new StatisticsTarget(cfg.key())), false); } return; } GridCacheContext<?, ?> cctx = tbl.cacheContext(); if (cctx == null || !cctx.gate().enterIfNotStopped()) { if (log.isDebugEnabled()) log.debug("Unable to lock table by key " + cfg.key() + ". Skipping statistics collection."); return; } try { AffinityTopologyVersion topVer0 = cctx.affinity().affinityReadyFuture(topVer).get(); final Set<Integer> primParts = cctx.affinity().primaryPartitions(cctx.localNodeId(), topVer0); LocalStatisticsGatheringContext ctx = new LocalStatisticsGatheringContext(false, tbl, cfg, primParts, topVer0); statProc.updateLocalStatistics(ctx); } catch (IgniteCheckedException e) { log.warning("Unexpected error during statistics collection: " + e.getMessage(), e); } finally { cctx.gate().leave(); } } /** * Get statistics configurations for all objects. * * @return Collection of all statistics configuration. * @throws IgniteCheckedException In case of error. */ public Collection<StatisticsObjectConfiguration> getAllConfig() throws IgniteCheckedException { List<StatisticsObjectConfiguration> res = new ArrayList<>(); distrMetaStorage.iterate(STAT_OBJ_PREFIX, (k, v) -> res.add((StatisticsObjectConfiguration)v)); return res; } /** * Start tracking configuration changes and do initial loading. */ public void start() { if (log.isTraceEnabled()) log.trace("Statistics configuration manager starting..."); mgmtBusyExecutor.activate(); if (isServerNode) { schemaMgr.registerDropColumnsListener(dropColsLsnr); schemaMgr.registerDropTableListener(dropTblLsnr); } if (log.isDebugEnabled()) log.debug("Statistics configuration manager started."); if (distrMetaStorage != null && isServerNode) mgmtBusyExecutor.execute(this::updateAllLocalStatistics); } /** * Scan statistics configuration and update each key it contains. */ public void updateAllLocalStatistics() { try { distrMetaStorage.iterate(STAT_OBJ_PREFIX, (k, v) -> { StatisticsObjectConfiguration cfg = (StatisticsObjectConfiguration)v; updateLocalStatistics(cfg); }); } catch (IgniteCheckedException e) { log.warning("Unexpected statistics configuration processing error", e); } } /** * Stop tracking configuration changes. */ public void stop() { if (log.isTraceEnabled()) log.trace("Statistics configuration manager stopping..."); if (isServerNode) { schemaMgr.unregisterDropColumnsListener(dropColsLsnr); schemaMgr.unregisterDropTableListener(dropTblLsnr); } mgmtBusyExecutor.deactivate(); if (log.isDebugEnabled()) log.debug("Statistics configuration manager stopped."); } /** * Update local statistic for specified database objects on the cluster. * Each node will scan local primary partitions to collect and update local statistic. * * @param targets DB objects to statistics update. */ public void updateStatistics(StatisticsObjectConfiguration... targets) { if (log.isDebugEnabled()) log.debug("Update statistics [targets=" + targets + ']'); for (StatisticsObjectConfiguration target : targets) { GridH2Table tbl = schemaMgr.dataTable(target.key().schema(), target.key().obj()); validate(target, tbl); List<StatisticsColumnConfiguration> colCfgs; if (F.isEmpty(target.columns())) colCfgs = Arrays.stream(tbl.getColumns()) .filter(c -> c.getColumnId() >= QueryUtils.DEFAULT_COLUMNS_COUNT) .map(c -> new StatisticsColumnConfiguration(c.getName(), null)) .collect(Collectors.toList()); else colCfgs = new ArrayList<>(target.columns().values()); StatisticsObjectConfiguration newCfg = new StatisticsObjectConfiguration(target.key(), colCfgs, target.maxPartitionObsolescencePercent()); try { while (true) { String key = key2String(newCfg.key()); StatisticsObjectConfiguration oldCfg = distrMetaStorage.read(key); StatisticsObjectConfiguration resultCfg = (oldCfg == null) ? newCfg : StatisticsObjectConfiguration.merge(oldCfg, newCfg); if (distrMetaStorage.compareAndSet(key, oldCfg, resultCfg)) break; } } catch (IgniteCheckedException ex) { throw new IgniteSQLException("Error on get or update statistic schema", IgniteQueryErrorCode.UNKNOWN, ex); } } } /** * Drop local statistic for specified database objects on the cluster. * Remove local aggregated and partitioned statistics that are stored at the local metastorage. * * @param targets DB objects to update statistics by. * @param validate if {@code true} - validate statistics existence, otherwise - just try to remove. */ public void dropStatistics(List<StatisticsTarget> targets, boolean validate) { if (log.isDebugEnabled()) log.debug("Drop statistics [targets=" + targets + ']'); for (StatisticsTarget target : targets) { String key = key2String(target.key()); try { while (true) { StatisticsObjectConfiguration oldCfg = distrMetaStorage.read(key); if (validate) validateDropRefresh(target, oldCfg); if (oldCfg == null) return; Set<String> dropColNames = (target.columns() == null) ? Collections.emptySet() : Arrays.stream(target.columns()).collect(Collectors.toSet()); StatisticsObjectConfiguration newCfg = oldCfg.dropColumns(dropColNames); if (oldCfg.equals(newCfg)) break; if (distrMetaStorage.compareAndSet(key, oldCfg, newCfg)) break; } } catch (IgniteCheckedException ex) { throw new IgniteSQLException( "Error on get or update statistic schema", IgniteQueryErrorCode.UNKNOWN, ex); } } } /** * Drop all local statistics on the cluster. */ public void dropAll() { try { final List<StatisticsTarget> targetsToRemove = new ArrayList<>(); distrMetaStorage.iterate(STAT_OBJ_PREFIX, (k, v) -> { StatisticsKey statKey = ((StatisticsObjectConfiguration)v).key(); StatisticsObjectConfiguration cfg = (StatisticsObjectConfiguration)v; if (!F.isEmpty(cfg.columns())) targetsToRemove.add(new StatisticsTarget(statKey, null)); } ); dropStatistics(targetsToRemove, false); } catch (IgniteCheckedException e) { throw new IgniteSQLException( "Unexpected exception drop all statistics", IgniteQueryErrorCode.UNKNOWN, e); } } /** * Refresh local statistic for specified database objects on the cluster. * * @param targets DB objects to statistics update. */ public void refreshStatistics(List<StatisticsTarget> targets) { if (log.isDebugEnabled()) log.debug("Drop statistics [targets=" + targets + ']'); for (StatisticsTarget target : targets) { String key = key2String(target.key()); try { while (true) { StatisticsObjectConfiguration oldCfg = distrMetaStorage.read(key); validateDropRefresh(target, oldCfg); Set<String> cols; if (F.isEmpty(target.columns())) { cols = oldCfg.columns().values().stream().map(StatisticsColumnConfiguration::name) .collect(Collectors.toSet()); } else cols = Arrays.stream(target.columns()).collect(Collectors.toSet()); StatisticsObjectConfiguration newCfg = oldCfg.refresh(cols); if (distrMetaStorage.compareAndSet(key, oldCfg, newCfg)) break; } } catch (IgniteCheckedException ex) { throw new IgniteSQLException( "Error on get or update statistic schema", IgniteQueryErrorCode.UNKNOWN, ex); } } } /** * Validate that drop/refresh target exists in specified configuration. For statistics refresh/drop operations. * * @param target Operation targer. * @param cfg Current statistics configuration. */ private void validateDropRefresh(@NotNull StatisticsTarget target, @NotNull StatisticsObjectConfiguration cfg) { if (cfg == null || F.isEmpty(cfg.columns())) { throw new IgniteSQLException( "Statistic doesn't exist for [schema=" + target.schema() + ", obj=" + target.obj() + ']', IgniteQueryErrorCode.TABLE_NOT_FOUND ); } if (!F.isEmpty(target.columns())) { for (String col : target.columns()) { if (!cfg.columns().containsKey(col)) { throw new IgniteSQLException( "Statistic doesn't exist for [" + "schema=" + cfg.key().schema() + ", obj=" + cfg.key().obj() + ", col=" + col + ']', IgniteQueryErrorCode.COLUMN_NOT_FOUND ); } } } } /** * Read statistics object configuration by key. * * @param key Statistics key to read configuration by. * @return Statistics object configuration of {@code null} if there are no such configuration. * @throws IgniteCheckedException In case of errors. */ public StatisticsObjectConfiguration config(StatisticsKey key) throws IgniteCheckedException { return distrMetaStorage.read(key2String(key)); } /** * Validate specified configuration: check that specified table exist and contains all specified columns. * * @param cfg Statistics object configuration to check. * @param tbl Corresponding GridH2Table (if exists). */ private void validate(StatisticsObjectConfiguration cfg, GridH2Table tbl) { if (tbl == null) { throw new IgniteSQLException( "Table doesn't exist [schema=" + cfg.key().schema() + ", table=" + cfg.key().obj() + ']', IgniteQueryErrorCode.TABLE_NOT_FOUND); } if (!F.isEmpty(cfg.columns())) { for (String col : cfg.columns().keySet()) { if (!tbl.doesColumnExist(col)) { throw new IgniteSQLException( "Column doesn't exist [schema=" + cfg.key().schema() + ", table=" + cfg.key().obj() + ", column=" + col + ']', IgniteQueryErrorCode.COLUMN_NOT_FOUND); } } } } /** * Generate metastorage key by specified statistics key. * * @param key Statistics key. * @return Metastorage key. */ private static String key2String(StatisticsKey key) { StringBuilder sb = new StringBuilder(STAT_OBJ_PREFIX); sb.append(key.schema()).append('.').append(key.obj()); return sb.toString(); } }
apache-2.0
passion1014/metaworks_framework
core/broadleaf-framework/src/main/java/org/broadleafcommerce/core/catalog/domain/ProductOptionValueImpl.java
6905
/* * #%L * BroadleafCommerce Framework * %% * Copyright (C) 2009 - 2013 Broadleaf Commerce * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package org.broadleafcommerce.core.catalog.domain; import org.broadleafcommerce.common.extensibility.jpa.copy.DirectCopyTransform; import org.broadleafcommerce.common.extensibility.jpa.copy.DirectCopyTransformMember; import org.broadleafcommerce.common.extensibility.jpa.copy.DirectCopyTransformTypes; import org.broadleafcommerce.common.i18n.service.DynamicTranslationProvider; import org.broadleafcommerce.common.money.Money; import org.broadleafcommerce.common.presentation.AdminPresentation; import org.broadleafcommerce.common.presentation.AdminPresentationClass; import org.broadleafcommerce.common.presentation.client.SupportedFieldType; import org.broadleafcommerce.core.catalog.service.dynamic.DynamicSkuPrices; import org.broadleafcommerce.core.catalog.service.dynamic.SkuPricingConsiderationContext; import org.hibernate.annotations.Cache; import org.hibernate.annotations.CacheConcurrencyStrategy; import org.hibernate.annotations.GenericGenerator; import org.hibernate.annotations.Parameter; import java.math.BigDecimal; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.Id; import javax.persistence.Inheritance; import javax.persistence.InheritanceType; import javax.persistence.JoinColumn; import javax.persistence.ManyToOne; import javax.persistence.Table; @Entity @Inheritance(strategy = InheritanceType.JOINED) @Table(name = "BLC_PRODUCT_OPTION_VALUE") @Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region = "blProducts") @AdminPresentationClass(friendlyName = "Product Option Value") @DirectCopyTransform({ @DirectCopyTransformMember(templateTokens = DirectCopyTransformTypes.SANDBOX, skipOverlaps=true), @DirectCopyTransformMember(templateTokens = DirectCopyTransformTypes.MULTITENANT_CATALOG) }) public class ProductOptionValueImpl implements ProductOptionValue { private static final long serialVersionUID = 1L; @Id @GeneratedValue(generator = "ProductOptionValueId") @GenericGenerator( name = "ProductOptionValueId", strategy = "org.broadleafcommerce.common.persistence.IdOverrideTableGenerator", parameters = { @Parameter(name = "segment_value", value = "ProductOptionValueImpl"), @Parameter(name = "entity_name", value = "org.broadleafcommerce.core.catalog.domain.ProductOptionValueImpl") }) @Column(name = "PRODUCT_OPTION_VALUE_ID") protected Long id; @Column(name = "ATTRIBUTE_VALUE") @AdminPresentation(friendlyName = "productOptionValue_attributeValue", prominent = true, order = Presentation.FieldOrder.ATTRIBUTE_VALUE, translatable = true, gridOrder = Presentation.FieldOrder.ATTRIBUTE_VALUE) protected String attributeValue; @Column(name = "DISPLAY_ORDER") @AdminPresentation(friendlyName = "productOptionValue_displayOrder", prominent = true, gridOrder = Presentation.FieldOrder.DISPLAY_ORDER, order = Presentation.FieldOrder.DISPLAY_ORDER) protected Long displayOrder; @Column(name = "PRICE_ADJUSTMENT", precision = 19, scale = 5) @AdminPresentation(friendlyName = "productOptionValue_adjustment", fieldType = SupportedFieldType.MONEY, prominent = true, gridOrder = Presentation.FieldOrder.PRICE_ADJUSTMENT, order = Presentation.FieldOrder.PRICE_ADJUSTMENT) protected BigDecimal priceAdjustment; @ManyToOne(targetEntity = ProductOptionImpl.class) @JoinColumn(name = "PRODUCT_OPTION_ID") protected ProductOption productOption; @Override public Long getId() { return id; } @Override public void setId(Long id) { this.id = id; } @Override public String getAttributeValue() { return DynamicTranslationProvider.getValue(this, "attributeValue", attributeValue); } @Override public void setAttributeValue(String attributeValue) { this.attributeValue = attributeValue; } @Override public Long getDisplayOrder() { return displayOrder; } @Override public void setDisplayOrder(Long displayOrder) { this.displayOrder = displayOrder; } @Override public Money getPriceAdjustment() { Money returnPrice = null; if (SkuPricingConsiderationContext.hasDynamicPricing()) { DynamicSkuPrices dynamicPrices = SkuPricingConsiderationContext.getSkuPricingService().getPriceAdjustment(this, priceAdjustment == null ? null : new Money(priceAdjustment), SkuPricingConsiderationContext.getSkuPricingConsiderationContext()); returnPrice = dynamicPrices.getPriceAdjustment(); } else { if (priceAdjustment != null) { returnPrice = new Money(priceAdjustment, Money.defaultCurrency()); } } return returnPrice; } @Override public void setPriceAdjustment(Money priceAdjustment) { this.priceAdjustment = Money.toAmount(priceAdjustment); } @Override public ProductOption getProductOption() { return productOption; } @Override public void setProductOption(ProductOption productOption) { this.productOption = productOption; } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (!getClass().isAssignableFrom(obj.getClass())) { return false; } ProductOptionValueImpl other = (ProductOptionValueImpl) obj; if (id != null && other.id != null) { return id.equals(other.id); } if (getAttributeValue() == null) { if (other.getAttributeValue() != null) { return false; } } else if (!getAttributeValue().equals(other.getAttributeValue())) { return false; } return true; } public static class Presentation { public static class FieldOrder { public static final int ATTRIBUTE_VALUE = 1000; public static final int DISPLAY_ORDER = 3000; public static final int PRICE_ADJUSTMENT = 2000; } } }
apache-2.0
maxkondr/onos-porta
core/api/src/main/java/org/onosproject/net/resource/link/LinkResourceService.java
3202
/* * Copyright 2014 Open Networking Laboratory * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.onosproject.net.resource.link; import org.onosproject.net.Link; import org.onosproject.net.intent.IntentId; import org.onosproject.net.resource.ResourceRequest; /** * Service for providing link resource allocation. */ public interface LinkResourceService { /** * Requests resources. * * @param req resources to be allocated * @return allocated resources */ LinkResourceAllocations requestResources(LinkResourceRequest req); /** * Releases resources. * * @param allocations resources to be released */ void releaseResources(LinkResourceAllocations allocations); /** * Updates previously made allocations with a new resource request. * * @param req updated resource request * @param oldAllocations old resource allocations * @return new resource allocations */ LinkResourceAllocations updateResources(LinkResourceRequest req, LinkResourceAllocations oldAllocations); /** * Returns all allocated resources. * * @return allocated resources */ Iterable<LinkResourceAllocations> getAllocations(); /** * Returns all allocated resources to given link. * * @param link a target link * @return allocated resources */ Iterable<LinkResourceAllocations> getAllocations(Link link); /** * Returns the resources allocated for an Intent. * * @param intentId the target Intent's id * @return allocated resources for Intent */ LinkResourceAllocations getAllocations(IntentId intentId); /** * Returns available resources for given link. * * @param link a target link * @return available resources for the target link */ Iterable<ResourceRequest> getAvailableResources(Link link); /** * Returns available resources for given link. * * @param link a target link * @param allocations allocations to be included as available * @return available resources for the target link */ Iterable<ResourceRequest> getAvailableResources(Link link, LinkResourceAllocations allocations); /** * Adds a listener for resource related events. * * @param listener listener to add */ void addListener(LinkResourceListener listener); /** * Removes a listener for resource related events. * * @param listener listener to remove. */ void removeListener(LinkResourceListener listener); }
apache-2.0
networknt/light-java-example
servicemesher/services/petstore-service-api/src/main/java/com/networknt/petstore/handler/PetsPetIdDeleteHandler.java
622
package com.networknt.petstore.handler; import com.networknt.handler.LightHttpHandler; import io.undertow.server.HttpServerExchange; import io.undertow.util.HttpString; import java.util.HashMap; import java.util.Map; public class PetsPetIdDeleteHandler implements LightHttpHandler { @Override public void handleRequest(HttpServerExchange exchange) throws Exception { exchange.getResponseHeaders().add(new HttpString("Content-Type"), "application/json"); exchange.setStatusCode(200); exchange.getResponseSender().send("{\"id\":1,\"name\":\"Jessica Right\",\"tag\":\"pet\"}"); } }
apache-2.0
tomwhite/spark-dataflow
src/test/java/com/cloudera/dataflow/spark/TestSparkPipelineOptionsFactory.java
1194
/* * Copyright (c) 2014, Cloudera, Inc. All Rights Reserved. * * Cloudera, Inc. licenses this file to you under the Apache License, * Version 2.0 (the "License"). You may not use this file except in * compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * This software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for * the specific language governing permissions and limitations under the * License. */ package com.cloudera.dataflow.spark; import org.junit.Assert; import org.junit.Test; public class TestSparkPipelineOptionsFactory { @Test public void testDefaultCreateMethod() { SparkPipelineOptions actualOptions = SparkPipelineOptionsFactory.create(); Assert.assertEquals("local[1]", actualOptions.getSparkMaster()); } @Test public void testSettingCustomOptions() { SparkPipelineOptions actualOptions = SparkPipelineOptionsFactory.create(); actualOptions.setSparkMaster("spark://207.184.161.138:7077"); Assert.assertEquals("spark://207.184.161.138:7077", actualOptions.getSparkMaster()); } }
apache-2.0
tufangorel/hazelcast
hazelcast/src/main/java/com/hazelcast/client/impl/protocol/task/transactionallist/TransactionalListRemoveMessageTask.java
2894
/* * Copyright (c) 2008-2018, Hazelcast, Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.hazelcast.client.impl.protocol.task.transactionallist; import com.hazelcast.client.impl.protocol.ClientMessage; import com.hazelcast.client.impl.protocol.codec.TransactionalListRemoveCodec; import com.hazelcast.client.impl.protocol.task.AbstractTransactionalMessageTask; import com.hazelcast.collection.impl.list.ListService; import com.hazelcast.core.TransactionalList; import com.hazelcast.instance.Node; import com.hazelcast.nio.Connection; import com.hazelcast.security.permission.ActionConstants; import com.hazelcast.security.permission.ListPermission; import com.hazelcast.transaction.TransactionContext; import java.security.Permission; public class TransactionalListRemoveMessageTask extends AbstractTransactionalMessageTask<TransactionalListRemoveCodec.RequestParameters> { public TransactionalListRemoveMessageTask(ClientMessage clientMessage, Node node, Connection connection) { super(clientMessage, node, connection); } @Override protected Object innerCall() throws Exception { final TransactionContext context = endpoint.getTransactionContext(parameters.txnId); TransactionalList<Object> list = context.getList(parameters.name); return list.remove(parameters.item); } @Override protected long getClientThreadId() { return parameters.threadId; } @Override protected TransactionalListRemoveCodec.RequestParameters decodeClientMessage(ClientMessage clientMessage) { return TransactionalListRemoveCodec.decodeRequest(clientMessage); } @Override protected ClientMessage encodeResponse(Object response) { return TransactionalListRemoveCodec.encodeResponse((Boolean) response); } @Override public String getServiceName() { return ListService.SERVICE_NAME; } @Override public Permission getRequiredPermission() { return new ListPermission(parameters.name, ActionConstants.ACTION_REMOVE); } @Override public String getDistributedObjectName() { return parameters.name; } @Override public String getMethodName() { return "remove"; } @Override public Object[] getParameters() { return new Object[]{parameters.item}; } }
apache-2.0
Nmishin/jagger
jaas-client/src/main/java/com/griddynamics/jagger/jaas/storage/model/TestEnvironmentEntity.java
4630
package com.griddynamics.jagger.jaas.storage.model; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonInclude; import javax.persistence.CascadeType; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.EnumType; import javax.persistence.Enumerated; import javax.persistence.FetchType; import javax.persistence.Id; import javax.persistence.JoinColumn; import javax.persistence.OneToMany; import javax.persistence.OneToOne; import javax.persistence.Table; import java.util.List; import static org.apache.commons.collections.CollectionUtils.isEqualCollection; @JsonInclude(JsonInclude.Include.NON_NULL) @Entity @Table(name = "test_environment_entity") public class TestEnvironmentEntity { public enum TestEnvironmentStatus { PENDING, RUNNING } @Id @Column(name = "`environment_id`") private String environmentId; @OneToMany(mappedBy = "testEnvironmentEntity", fetch = FetchType.EAGER, cascade = CascadeType.ALL, orphanRemoval = true) private List<LoadScenarioEntity> loadScenarios; @Column(nullable = false) @Enumerated(EnumType.STRING) private TestEnvironmentStatus status = TestEnvironmentStatus.PENDING; @OneToOne(cascade = CascadeType.ALL) @JoinColumn(name = "`running_load_scenario`") private LoadScenarioEntity runningLoadScenario; @JsonIgnore @Column(name = "`expiration_timestamp`") private long expirationTimestamp; @JsonIgnore @Column(name = "`session_id`") private String sessionId; public String getEnvironmentId() { return environmentId; } public void setEnvironmentId(String environmentId) { this.environmentId = environmentId; } public List<LoadScenarioEntity> getLoadScenarios() { return loadScenarios; } public void setLoadScenarios(List<LoadScenarioEntity> loadScenarios) { this.loadScenarios = loadScenarios; } public TestEnvironmentStatus getStatus() { return status; } public void setStatus(TestEnvironmentStatus status) { this.status = status; } public LoadScenarioEntity getRunningLoadScenario() { return runningLoadScenario; } public void setRunningLoadScenario(LoadScenarioEntity runningLoadScenario) { this.runningLoadScenario = runningLoadScenario; } public long getExpirationTimestamp() { return expirationTimestamp; } public void setExpirationTimestamp(long expirationTimestamp) { this.expirationTimestamp = expirationTimestamp; } public String getSessionId() { return sessionId; } public void setSessionId(String sessionId) { this.sessionId = sessionId; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null || getClass() != obj.getClass()) return false; TestEnvironmentEntity that = (TestEnvironmentEntity) obj; if (!environmentId.equals(that.environmentId)) return false; if (sessionId != null ? !sessionId.equals(that.sessionId) : that.sessionId != null) return false; if (loadScenarios != null && that.loadScenarios == null || loadScenarios == null && that.loadScenarios != null) return false; if (loadScenarios != null && that.getLoadScenarios() != null && !isEqualCollection(loadScenarios, that.loadScenarios)) return false; if (status != that.status) return false; if (expirationTimestamp != that.expirationTimestamp) return false; return runningLoadScenario != null ? runningLoadScenario.equals(that.runningLoadScenario) : that.runningLoadScenario == null; } @Override public int hashCode() { int result = environmentId.hashCode(); result = 31 * result + (loadScenarios != null ? loadScenarios.hashCode() : 0); result = 31 * result + status.hashCode(); result = 31 * result + (runningLoadScenario != null ? runningLoadScenario.hashCode() : 0); result = 31 * result + Long.hashCode(expirationTimestamp); result = 31 * result + sessionId.hashCode(); return result; } @Override public String toString() { return "TestEnvironmentEntity{" + "environmentId='" + environmentId + '\'' + ", loadScenarios=" + loadScenarios + ", status=" + status + ", runningLoadScenario=" + runningLoadScenario + ", expirationTimestamp=" + expirationTimestamp + ", sessionId=" + sessionId + '}'; } }
apache-2.0
robinbakkerus/workshop-grpc
src/main/java/io/grpc/examples/routeguide/RouteGuideServer.java
11416
/* * Copyright 2015, Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package io.grpc.examples.routeguide; import static java.lang.Math.atan2; import static java.lang.Math.cos; import static java.lang.Math.max; import static java.lang.Math.min; import static java.lang.Math.sin; import static java.lang.Math.sqrt; import static java.lang.Math.toRadians; import static java.util.concurrent.TimeUnit.NANOSECONDS; import io.grpc.Server; import io.grpc.ServerBuilder; import io.grpc.stub.StreamObserver; import java.io.IOException; import java.net.URL; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.logging.Level; import java.util.logging.Logger; /** * A sample gRPC server that serve the RouteGuide (see route_guide.proto) service. */ public class RouteGuideServer { private static final Logger logger = Logger.getLogger(RouteGuideServer.class.getName()); private final int port; private final Server server; public RouteGuideServer(int port) throws IOException { this(port, RouteGuideUtil.getDefaultFeaturesFile()); } /** Create a RouteGuide server listening on {@code port} using {@code featureFile} database. */ public RouteGuideServer(int port, URL featureFile) throws IOException { this(ServerBuilder.forPort(port), port, RouteGuideUtil.parseFeatures(featureFile)); } /** Create a RouteGuide server using serverBuilder as a base and features as data. */ public RouteGuideServer(ServerBuilder<?> serverBuilder, int port, Collection<Feature> features) { this.port = port; server = serverBuilder.addService(new RouteGuideService(features)) .build(); } /** Start serving requests. */ public void start() throws IOException { server.start(); logger.info("Server started, listening on " + port); Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { // Use stderr here since the logger may has been reset by its JVM shutdown hook. System.err.println("*** shutting down gRPC server since JVM is shutting down"); RouteGuideServer.this.stop(); System.err.println("*** server shut down"); } }); } /** Stop serving requests and shutdown resources. */ public void stop() { if (server != null) { server.shutdown(); } } /** * Await termination on the main thread since the grpc library uses daemon threads. */ private void blockUntilShutdown() throws InterruptedException { if (server != null) { server.awaitTermination(); } } /** * Main method. This comment makes the linter happy. */ public static void main(String[] args) throws Exception { RouteGuideServer server = new RouteGuideServer(8980); server.start(); server.blockUntilShutdown(); } /** * Our implementation of RouteGuide service. * * <p>See route_guide.proto for details of the methods. */ private static class RouteGuideService extends RouteGuideGrpc.RouteGuideImplBase { private final Collection<Feature> features; private final ConcurrentMap<Point, List<RouteNote>> routeNotes = new ConcurrentHashMap<Point, List<RouteNote>>(); RouteGuideService(Collection<Feature> features) { this.features = features; } /** * Gets the {@link Feature} at the requested {@link Point}. If no feature at that location * exists, an unnamed feature is returned at the provided location. * * @param request the requested location for the feature. * @param responseObserver the observer that will receive the feature at the requested point. */ @Override public void getFeature(Point request, StreamObserver<Feature> responseObserver) { responseObserver.onNext(checkFeature(request)); responseObserver.onCompleted(); } /** * Gets all features contained within the given bounding {@link Rectangle}. * * @param request the bounding rectangle for the requested features. * @param responseObserver the observer that will receive the features. */ @Override public void listFeatures(Rectangle request, StreamObserver<Feature> responseObserver) { int left = min(request.getLo().getLongitude(), request.getHi().getLongitude()); int right = max(request.getLo().getLongitude(), request.getHi().getLongitude()); int top = max(request.getLo().getLatitude(), request.getHi().getLatitude()); int bottom = min(request.getLo().getLatitude(), request.getHi().getLatitude()); for (Feature feature : features) { if (!RouteGuideUtil.exists(feature)) { continue; } int lat = feature.getLocation().getLatitude(); int lon = feature.getLocation().getLongitude(); if (lon >= left && lon <= right && lat >= bottom && lat <= top) { responseObserver.onNext(feature); } } responseObserver.onCompleted(); } /** * Gets a stream of points, and responds with statistics about the "trip": number of points, * number of known features visited, total distance traveled, and total time spent. * * @param responseObserver an observer to receive the response summary. * @return an observer to receive the requested route points. */ @Override public StreamObserver<Point> recordRoute(final StreamObserver<RouteSummary> responseObserver) { return new StreamObserver<Point>() { int pointCount; int featureCount; int distance; Point previous; final long startTime = System.nanoTime(); @Override public void onNext(Point point) { pointCount++; if (RouteGuideUtil.exists(checkFeature(point))) { featureCount++; } // For each point after the first, add the incremental distance from the previous point to // the total distance value. if (previous != null) { distance += calcDistance(previous, point); } previous = point; } @Override public void onError(Throwable t) { logger.log(Level.WARNING, "recordRoute cancelled"); } @Override public void onCompleted() { long seconds = NANOSECONDS.toSeconds(System.nanoTime() - startTime); responseObserver.onNext(RouteSummary.newBuilder().setPointCount(pointCount) .setFeatureCount(featureCount).setDistance(distance) .setElapsedTime((int) seconds).build()); responseObserver.onCompleted(); } }; } /** * Receives a stream of message/location pairs, and responds with a stream of all previous * messages at each of those locations. * * @param responseObserver an observer to receive the stream of previous messages. * @return an observer to handle requested message/location pairs. */ @Override public StreamObserver<RouteNote> routeChat(final StreamObserver<RouteNote> responseObserver) { return new StreamObserver<RouteNote>() { @Override public void onNext(RouteNote note) { List<RouteNote> notes = getOrCreateNotes(note.getLocation()); // Respond with all previous notes at this location. for (RouteNote prevNote : notes.toArray(new RouteNote[0])) { responseObserver.onNext(prevNote); } // Now add the new note to the list notes.add(note); } @Override public void onError(Throwable t) { logger.log(Level.WARNING, "routeChat cancelled"); } @Override public void onCompleted() { responseObserver.onCompleted(); } }; } /** * Get the notes list for the given location. If missing, create it. */ private List<RouteNote> getOrCreateNotes(Point location) { List<RouteNote> notes = Collections.synchronizedList(new ArrayList<RouteNote>()); List<RouteNote> prevNotes = routeNotes.putIfAbsent(location, notes); return prevNotes != null ? prevNotes : notes; } /** * Gets the feature at the given point. * * @param location the location to check. * @return The feature object at the point. Note that an empty name indicates no feature. */ private Feature checkFeature(Point location) { for (Feature feature : features) { if (feature.getLocation().getLatitude() == location.getLatitude() && feature.getLocation().getLongitude() == location.getLongitude()) { return feature; } } // No feature was found, return an unnamed feature. return Feature.newBuilder().setName("").setLocation(location).build(); } /** * Calculate the distance between two points using the "haversine" formula. * This code was taken from http://www.movable-type.co.uk/scripts/latlong.html. * * @param start The starting point * @param end The end point * @return The distance between the points in meters */ private static int calcDistance(Point start, Point end) { double lat1 = RouteGuideUtil.getLatitude(start); double lat2 = RouteGuideUtil.getLatitude(end); double lon1 = RouteGuideUtil.getLongitude(start); double lon2 = RouteGuideUtil.getLongitude(end); int r = 6371000; // meters double phi1 = toRadians(lat1); double phi2 = toRadians(lat2); double deltaPhi = toRadians(lat2 - lat1); double deltaLambda = toRadians(lon2 - lon1); double a = sin(deltaPhi / 2) * sin(deltaPhi / 2) + cos(phi1) * cos(phi2) * sin(deltaLambda / 2) * sin(deltaLambda / 2); double c = 2 * atan2(sqrt(a), sqrt(1 - a)); return (int) (r * c); } } }
apache-2.0
pwachira/droolsexamples
drools-examples-cdi/cdi-example/src/main/java/org/drools/example/cdi/cdiexample/Message.java
1421
package org.drools.example.cdi.cdiexample; public class Message { private String name; private String text; public Message(String name, String text) { this.text = text; this.name = name; } public String getText() { return text; } public void setText(String text) { this.text = text; } public String getName() { return name; } public void setName(String name) { this.name = name; } public String toString() { return "Message[name='" + name + "' text='" + text + "'"; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((name == null) ? 0 : name.hashCode()); result = prime * result + ((text == null) ? 0 : text.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } Message other = (Message) obj; if (name == null) { if (other.name != null) { return false; } } else if (!name.equals(other.name)) { return false; } if (text == null) { if (other.text != null) { return false; } } else if (!text.equals(other.text)) { return false; } return true; } }
apache-2.0
mashuai/Open-Source-Research
Javac2007/流程/jvm/16Gen/other/17.java
1148
/** Construct an attributed tree to coerce an expression to some erased * target type, unless the expression is already assignable to that type. * If target type is a constant type, use its base type instead. * @param tree The expression tree. * @param target The target type. */ JCExpression coerce(JCExpression tree, Type target) { try {//我加上的 DEBUG.P(this,"coerce(2)"); DEBUG.P("tree="+tree); DEBUG.P("target="+target); Type btarget = target.baseType(); DEBUG.P("btarget="+btarget); DEBUG.P("tree.type.isPrimitive()="+tree.type.isPrimitive()); DEBUG.P("target.isPrimitive()="+target.isPrimitive()); DEBUG.P("(tree.type.isPrimitive() == target.isPrimitive())="+(tree.type.isPrimitive() == target.isPrimitive())); if (tree.type.isPrimitive() == target.isPrimitive()) { return types.isAssignable(tree.type, btarget, Warner.noWarnings) ? tree : cast(tree, btarget); } return tree; }finally{//我加上的 DEBUG.P(0,this,"coerce(2)"); } }
apache-2.0
mhurne/aws-sdk-java
aws-java-sdk-cloudfront/src/main/java/com/amazonaws/services/cloudfront/model/transform/DistributionAlreadyExistsExceptionUnmarshaller.java
1628
/* * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights * Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.cloudfront.model.transform; import org.w3c.dom.Node; import com.amazonaws.AmazonServiceException; import com.amazonaws.util.XpathUtils; import com.amazonaws.transform.StandardErrorUnmarshaller; import com.amazonaws.services.cloudfront.model.DistributionAlreadyExistsException; public class DistributionAlreadyExistsExceptionUnmarshaller extends StandardErrorUnmarshaller { public DistributionAlreadyExistsExceptionUnmarshaller() { super(DistributionAlreadyExistsException.class); } @Override public AmazonServiceException unmarshall(Node node) throws Exception { // Bail out if this isn't the right error code that this // marshaller understands String errorCode = parseErrorCode(node); if (errorCode == null || !errorCode.equals("DistributionAlreadyExists")) return null; DistributionAlreadyExistsException e = (DistributionAlreadyExistsException) super .unmarshall(node); return e; } }
apache-2.0
treasure-data/presto
presto-main/src/main/java/io/prestosql/execution/TaskStateMachine.java
4426
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.prestosql.execution; import com.google.common.util.concurrent.ListenableFuture; import io.airlift.log.Logger; import io.prestosql.execution.StateMachine.StateChangeListener; import org.joda.time.DateTime; import javax.annotation.concurrent.ThreadSafe; import java.util.concurrent.Executor; import java.util.concurrent.LinkedBlockingQueue; import static com.google.common.base.MoreObjects.toStringHelper; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.util.concurrent.Futures.immediateFuture; import static io.prestosql.execution.TaskState.FLUSHING; import static io.prestosql.execution.TaskState.RUNNING; import static io.prestosql.execution.TaskState.TERMINAL_TASK_STATES; import static java.util.Objects.requireNonNull; @ThreadSafe public class TaskStateMachine { private static final Logger log = Logger.get(TaskStateMachine.class); private final DateTime createdTime = DateTime.now(); private final TaskId taskId; private final StateMachine<TaskState> taskState; private final LinkedBlockingQueue<Throwable> failureCauses = new LinkedBlockingQueue<>(); public TaskStateMachine(TaskId taskId, Executor executor) { this.taskId = requireNonNull(taskId, "taskId is null"); taskState = new StateMachine<>("task " + taskId, executor, TaskState.RUNNING, TERMINAL_TASK_STATES); taskState.addStateChangeListener(newState -> log.debug("Task %s is %s", taskId, newState)); } public DateTime getCreatedTime() { return createdTime; } public TaskId getTaskId() { return taskId; } public TaskState getState() { return taskState.get(); } public ListenableFuture<TaskState> getStateChange(TaskState currentState) { requireNonNull(currentState, "currentState is null"); checkArgument(!currentState.isDone(), "Current state is already done"); ListenableFuture<TaskState> future = taskState.getStateChange(currentState); TaskState state = taskState.get(); if (state.isDone()) { return immediateFuture(state); } return future; } public LinkedBlockingQueue<Throwable> getFailureCauses() { return failureCauses; } public void transitionToFlushing() { taskState.setIf(FLUSHING, currentState -> currentState == RUNNING); } public void finished() { transitionToDoneState(TaskState.FINISHED); } public void cancel() { transitionToDoneState(TaskState.CANCELED); } public void abort() { transitionToDoneState(TaskState.ABORTED); } public void failed(Throwable cause) { failureCauses.add(cause); transitionToDoneState(TaskState.FAILED); } private void transitionToDoneState(TaskState doneState) { requireNonNull(doneState, "doneState is null"); checkArgument(doneState.isDone(), "doneState %s is not a done state", doneState); taskState.setIf(doneState, currentState -> !currentState.isDone()); } /** * Listener is always notified asynchronously using a dedicated notification thread pool so, care should * be taken to avoid leaking {@code this} when adding a listener in a constructor. Additionally, it is * possible notifications are observed out of order due to the asynchronous execution. */ public void addStateChangeListener(StateChangeListener<TaskState> stateChangeListener) { taskState.addStateChangeListener(stateChangeListener); } @Override public String toString() { return toStringHelper(this) .add("taskId", taskId) .add("taskState", taskState) .add("failureCauses", failureCauses) .toString(); } }
apache-2.0
jandppw/ppwcode-recovered-from-google-code
java/util/reflection/trunk/src/test/java/org/ppwcode/util/reflect_I/teststubs/StubClass.java
11052
/*<license> Copyright 2004 - $Date$ by PeopleWare n.v.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. </license>*/ package org.ppwcode.util.reflect_I.teststubs; import java.io.Serializable; import java.util.Date; public class StubClass extends SuperStubClass { public static class StubClassA { public StubClassA(String s, int i, float f) { // } public int stubMethod1(String s, int i, float f) { return 0; } public void stubMethod2() { return; } } public static class StubClassB extends StubClassA { public StubClassB(String s, int i, float f) { super(s, i, f); } } public class StubClassInnerA { public class StubClassInnerAInner { // NOP } } public class StubClassInnerB extends StubClassInnerA { // NOP } public StubClass() { $stubProperty = new CloneableStubClassA(); } public StubClass(CloneableStubClassA cscA) { $stubProperty = cscA; } @Override @SuppressWarnings("unused") public void stubMethod() { // NOP } @SuppressWarnings("unused") public double stubMethodWithReturn() { return 0.0d; } @SuppressWarnings("unused") public double stubMethodWithException() throws Exception { return 0.0d; } @SuppressWarnings("unused") public void stubMethod(Object o) { // NOP } @SuppressWarnings("unused") public void stubMethod(String s) { // NOP } @SuppressWarnings("unused") public void stubMethod(int i) { // NOP } @SuppressWarnings("unused") public void stubMethod(Class<StubClass> stubClass) { // NOP } @SuppressWarnings("unused") public void stubMethod(int i, boolean b, Object o, String s) { // NOP } @SuppressWarnings("unused") public <_T_> _T_ stubMethod(_T_ t, float f) { return null; } @SuppressWarnings("unused") public <_T_ extends Serializable> _T_ stubMethod(_T_ t, float f) { return null; } @SuppressWarnings("unused") public void stubMethod(Date d) { // NOP } @SuppressWarnings("unused") public static void stubMethod(Object[] os) { // NOP } @SuppressWarnings("unused") void stubMethod(long l) { // NOP } @SuppressWarnings("unused") protected void stubMethod(boolean b) { // NOP } @SuppressWarnings("unused") private void stubMethod(byte b) { // NOP } @SuppressWarnings("unused") public static void stubStaticMethod() { // NOP } @SuppressWarnings("unused") public static int stubStaticMethodWithReturn() { return 0; } @SuppressWarnings("unused") public static int stubStaticMethodWithException() throws Exception { return 0; } @SuppressWarnings("unused") public static void stubStaticMethod(Object o) { // NOP } @SuppressWarnings("unused") public static void stubStaticMethod(String s) { // NOP } @SuppressWarnings("unused") public static void stubStaticMethod(int i) { // NOP } @SuppressWarnings("unused") public static void stubStaticMethod(Class<StubClass> stubClass) { // NOP } @SuppressWarnings("unused") public static void stubStaticMethod(int i, boolean b, Object o, String s) { // NOP } @SuppressWarnings("unused") public static <_T_> _T_ stubStaticMethod(_T_ t, float f) { return t; } @SuppressWarnings("unused") public static <_T_ extends Serializable> _T_ stubStaticMethod(_T_ t, float f) { return t; } @SuppressWarnings("unused") public static void stubStaticMethod(Date d) { // NOP } @SuppressWarnings("unused") public static void stubStaticMethod(Object[] os) { // NOP } @SuppressWarnings("unused") void stubStaticMethod(long l) { // NOP } @SuppressWarnings("unused") protected void stubStaticMethod(boolean b) { // NOP } @SuppressWarnings("unused") private void stubStaticMethod(byte b) { // NOP } public StubClass(Object o) { // NOP } public StubClass(String s) { // NOP } public StubClass(int i) { // NOP } public StubClass(Class<StubClass> stubClass) { // NOP } public StubClass(StubClass s) { // NOP } public StubClass(int i, boolean b, Object o, String s) { // NOP } public <_T_> StubClass(_T_ t1, _T_ t2, float f) { // NOP } public <_T_ extends Serializable> StubClass(_T_ t1, Serializable t2, float f) { // NOP } public StubClass(Date d) throws Exception { // NOP } public StubClass(Object[] os) throws Exception { // NOP } StubClass(long l) throws Exception { // NOP } protected StubClass(boolean b) throws Exception { // NOP } @SuppressWarnings("unused") private StubClass(byte b) throws Exception { // NOP } public final CloneableStubClassA getStubProperty() { return $stubProperty.clone(); } public final void setStubProperty(CloneableStubClassA stubProperty) { $stubProperty = stubProperty.clone(); } private CloneableStubClassA $stubProperty; public CloneableStubClassA stubPropertyField = new CloneableStubClassA(); public final CloneableStubClassA getStubRoProperty() { return $stubRoProperty.clone(); } private CloneableStubClassA $stubRoProperty = new CloneableStubClassA(); public final CloneableStubClassA getStubRoPrivateProperty() { return $stubRoPrivateProperty.clone(); } @SuppressWarnings("unused") private final void setStubRoPrivateProperty(CloneableStubClassA stubRoPrivateProperty) { $stubRoPrivateProperty = stubRoPrivateProperty.clone(); } private CloneableStubClassA $stubRoPrivateProperty = new CloneableStubClassA(); public final CloneableStubClassA getStubRoPackageProperty() { return $stubRoPackageProperty.clone(); } final void setStubRoPackageProperty(CloneableStubClassA stubRoPackageProperty) { $stubRoPackageProperty = stubRoPackageProperty.clone(); } private CloneableStubClassA $stubRoPackageProperty = new CloneableStubClassA(); public final CloneableStubClassA getStubRoProtectedProperty() { return $stubRoProtectedProperty.clone(); } protected final void setStubRoProtectedProperty(CloneableStubClassA stubRoProtectedProperty) { $stubRoProtectedProperty = stubRoProtectedProperty.clone(); } private CloneableStubClassA $stubRoProtectedProperty = new CloneableStubClassA(); public final void setStubWoProperty(CloneableStubClassA stubWoProperty) { $stubWoProperty = stubWoProperty.clone(); } @SuppressWarnings("unused") private CloneableStubClassA $stubWoProperty = new CloneableStubClassA(); @SuppressWarnings("unused") private final CloneableStubClassA getStubWoPrivateProperty() { return $stubWoPrivateProperty.clone(); } public final void setStubWoPrivateProperty(CloneableStubClassA stubWoPrivateProperty) { $stubWoPrivateProperty = stubWoPrivateProperty.clone(); } private CloneableStubClassA $stubWoPrivateProperty = new CloneableStubClassA(); final CloneableStubClassA getStubWoPackageProperty() { return $stubWoPackageProperty.clone(); } public final void setStubWoPackageProperty(CloneableStubClassA stubWoPackageProperty) { $stubWoPackageProperty = stubWoPackageProperty.clone(); } private CloneableStubClassA $stubWoPackageProperty = new CloneableStubClassA(); protected final CloneableStubClassA getStubWoProtectedProperty() { return $stubWoProtectedProperty.clone(); } public final void setStubWoProtectedProperty(CloneableStubClassA stubWoProtectedProperty) { $stubWoProtectedProperty = stubWoProtectedProperty.clone(); } private CloneableStubClassA $stubWoProtectedProperty = new CloneableStubClassA(); public final int getStubPropertyInt() { return $stubPropertyInt; } public final void setStubPropertyInt(int stubPropertyInt) { $stubPropertyInt = stubPropertyInt; } private int $stubPropertyInt = 7; public final long getStubPropertyLong() { return $stubPropertyLong; } public final void setStubPropertyLong(long stubPropertyLong) { $stubPropertyLong = stubPropertyLong; } private long $stubPropertyLong = 7L; public final char getStubPropertyChar() { return $stubPropertyChar; } public final void setStubPropertyChar(char stubPropertyChar) { $stubPropertyChar = stubPropertyChar; } private char $stubPropertyChar = 'c'; public final short getStubPropertyShort() { return $stubPropertyShort; } public final void setStubPropertyShort(short stubPropertyShort) { $stubPropertyShort = stubPropertyShort; } private short $stubPropertyShort = 7; public final byte getStubPropertyByte() { return $stubPropertyByte; } public final void setStubPropertyByte(byte stubPropertyByte) { $stubPropertyByte = stubPropertyByte; } private byte $stubPropertyByte = 7; public final boolean getStubPropertyBoolean() { return $stubPropertyBoolean; } public final void setStubPropertyBoolean(boolean stubPropertyBoolean) { $stubPropertyBoolean = stubPropertyBoolean; } private boolean $stubPropertyBoolean = true; public final float getStubPropertyFloat() { return $stubPropertyFloat; } public final void setStubPropertyFloat(float stubPropertyFloat) { $stubPropertyFloat = stubPropertyFloat; } private float $stubPropertyFloat = 7.7F; public final double getStubPropertyDouble() { return $stubPropertyDouble; } public final void setStubPropertyDouble(double stubPropertyDouble) { $stubPropertyDouble = stubPropertyDouble; } private double $stubPropertyDouble = 7.7D; public final String getStubPropertyString() { return $stubPropertyString; } public final void setStubPropertyString(String stubPropertyString) { $stubPropertyString = stubPropertyString; } private String $stubPropertyString = STUB_PROPERTY_STRING_VALUE; public final Date getStubPropertyDate() { return $stubPropertyDate; } public final void setStubPropertyDate(Date stubPropertyDate) { $stubPropertyDate = stubPropertyDate; } private Date $stubPropertyDate = new Date(); public static class AnException extends Exception { // NOP } public final Object getExceptionProperty() { return $exceptionProperty; } public final void setExceptionProperty(Object o) throws AnException { if (o instanceof AnException) { throw (AnException)o; } $exceptionProperty = o; } private Object $exceptionProperty; public final static String STUB_PROPERTY_STRING_VALUE = "String property stub"; }
apache-2.0
mztaylor/rice-git
rice-middleware/ksb/client-impl/src/main/java/org/kuali/rice/ksb/service/impl/BasicAuthenticationServiceImpl.java
3998
/** * Copyright 2005-2014 The Kuali Foundation * * Licensed under the Educational Community License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.opensource.org/licenses/ecl2.php * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kuali.rice.ksb.service.impl; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import javax.xml.namespace.QName; import org.apache.commons.lang.StringUtils; import org.kuali.rice.ksb.service.BasicAuthenticationConnectionCredentials; import org.kuali.rice.ksb.service.BasicAuthenticationCredentials; import org.kuali.rice.ksb.service.BasicAuthenticationService; /** * Implements the BasicAuthenticationService * * @author Kuali Rice Team (rice.collab@kuali.org) * @see org.kuali.rice.ksb.service.BasicAuthenticationService */ public class BasicAuthenticationServiceImpl implements BasicAuthenticationService { private Map<QName, List<BasicAuthenticationCredentials>> serviceCredentialsMap; private Map<QName, BasicAuthenticationConnectionCredentials> connectionCredentialsMap; /** * Constructs BasicAuthenticationServiceImpl with a serviceCredentialsMap and a connectionCredentialsMap */ public BasicAuthenticationServiceImpl() { this.serviceCredentialsMap = Collections.synchronizedMap( new HashMap<QName, List<BasicAuthenticationCredentials>>()); this.connectionCredentialsMap = Collections.synchronizedMap( new HashMap<QName, BasicAuthenticationConnectionCredentials>()); } public boolean checkServiceAuthentication(String serviceNameSpaceURI, QName serviceName, String username, String password) { List<BasicAuthenticationCredentials> credentialsList = serviceCredentialsMap.get(serviceName); if (credentialsList != null) { synchronized (credentialsList) { for (BasicAuthenticationCredentials credentials : credentialsList) { if (StringUtils.equals(username, credentials.getUsername()) && StringUtils.equals( serviceNameSpaceURI, credentials.getServiceNameSpaceURI())) { return StringUtils.equals(password, credentials.getPassword()); } } } } return false; } public BasicAuthenticationConnectionCredentials getConnectionCredentials(String serviceNameSpaceURI, String serviceName) { return connectionCredentialsMap.get(new QName(serviceNameSpaceURI, serviceName)); } public void registerServiceCredentials(BasicAuthenticationCredentials credentials) { synchronized (serviceCredentialsMap) { QName serviceName = new QName(credentials.getServiceNameSpaceURI(), credentials.getLocalServiceName()); List<BasicAuthenticationCredentials> credentialsList = serviceCredentialsMap.get(serviceName); if (credentialsList == null) { credentialsList = Collections.synchronizedList(new ArrayList<BasicAuthenticationCredentials>()); serviceCredentialsMap.put(serviceName, credentialsList); } credentialsList.add(credentials); } } public void registerConnectionCredentials(BasicAuthenticationConnectionCredentials credentials) { synchronized (connectionCredentialsMap) { connectionCredentialsMap.put(new QName(credentials.getServiceNameSpaceURI(), credentials.getLocalServiceName()), credentials); } } }
apache-2.0
miniway/presto
presto-kafka/src/test/java/io/prestosql/plugin/kafka/TestKafkaDistributed.java
1578
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.prestosql.plugin.kafka; import io.airlift.tpch.TpchTable; import io.prestosql.plugin.kafka.util.EmbeddedKafka; import io.prestosql.tests.AbstractTestQueries; import org.testng.annotations.AfterClass; import org.testng.annotations.Test; import java.io.IOException; import static io.prestosql.plugin.kafka.KafkaQueryRunner.createKafkaQueryRunner; import static io.prestosql.plugin.kafka.util.EmbeddedKafka.createEmbeddedKafka; @Test public class TestKafkaDistributed extends AbstractTestQueries { private final EmbeddedKafka embeddedKafka; public TestKafkaDistributed() throws Exception { this(createEmbeddedKafka()); } public TestKafkaDistributed(EmbeddedKafka embeddedKafka) { super(() -> createKafkaQueryRunner(embeddedKafka, TpchTable.getTables())); this.embeddedKafka = embeddedKafka; } @AfterClass(alwaysRun = true) public void destroy() throws IOException { embeddedKafka.close(); } }
apache-2.0
jbottel/openstorefront
server/openstorefront/describe/src/main/java/edu/usu/sdl/describe/model/Edh.java
1373
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package edu.usu.sdl.describe.model; import org.simpleframework.xml.Element; import org.simpleframework.xml.Root; /** * * @author dshurtleff */ @Root(strict = false) public class Edh { @Element(name="Identifier", required = false) private String identifier; @Element(name="DataItemCreateDateTime", required = false) private String createDts; @Element(name = "ResponsibleEntity", required = false) private ResponsibleEntity responsibleEntity; @Element(name = "Security", required = false) private Security security; public Edh() { } public String getIdentifier() { return identifier; } public void setIdentifier(String identifier) { this.identifier = identifier; } public String getCreateDts() { return createDts; } public void setCreateDts(String createDts) { this.createDts = createDts; } public ResponsibleEntity getResponsibleEntity() { return responsibleEntity; } public void setResponsibleEntity(ResponsibleEntity responsibleEntity) { this.responsibleEntity = responsibleEntity; } public Security getSecurity() { return security; } public void setSecurity(Security security) { this.security = security; } }
apache-2.0
gemmellr/qpid-jms
qpid-jms-interop-tests/qpid-jms-activemq-tests/src/test/java/org/apache/qpid/jms/joram/JoramMessageDefaultTest.java
1794
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.qpid.jms.joram; import junit.framework.Test; import junit.framework.TestSuite; import org.junit.After; import org.junit.Before; import org.objectweb.jtests.jms.conform.message.MessageDefaultTest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Runs the Joram MessageDefaultTest */ public class JoramMessageDefaultTest extends MessageDefaultTest { private final Logger LOG = LoggerFactory.getLogger(getClass()); public JoramMessageDefaultTest(String name) { super(name); } @Before @Override public void setUp() throws Exception { LOG.info("========== Starting test: " + getName() + " =========="); super.setUp(); } @After @Override public void tearDown() throws Exception { LOG.info("========== Finsished test: " + getName() + " =========="); super.tearDown(); } public static Test suite() { return new TestSuite(JoramMessageDefaultTest.class); } }
apache-2.0
geekboxzone/mmallow_external_jetty
src/java/org/eclipse/jetty/io/EndPoint.java
7116
// // ======================================================================== // Copyright (c) 1995-2014 Mort Bay Consulting Pty. Ltd. // ------------------------------------------------------------------------ // All rights reserved. This program and the accompanying materials // are made available under the terms of the Eclipse Public License v1.0 // and Apache License v2.0 which accompanies this distribution. // // The Eclipse Public License is available at // http://www.eclipse.org/legal/epl-v10.html // // The Apache License v2.0 is available at // http://www.opensource.org/licenses/apache2.0.php // // You may elect to redistribute this code under either of these licenses. // ======================================================================== // package org.eclipse.jetty.io; import java.io.IOException; /** * * A transport EndPoint */ public interface EndPoint { /** * Shutdown any backing output stream associated with the endpoint */ void shutdownOutput() throws IOException; boolean isOutputShutdown(); /** * Shutdown any backing input stream associated with the endpoint */ void shutdownInput() throws IOException; boolean isInputShutdown(); /** * Close any backing stream associated with the endpoint */ void close() throws IOException; /** * Fill the buffer from the current putIndex to it's capacity from whatever * byte source is backing the buffer. The putIndex is increased if bytes filled. * The buffer may chose to do a compact before filling. * @return an <code>int</code> value indicating the number of bytes * filled or -1 if EOF is reached. * @throws EofException If input is shutdown or the endpoint is closed. */ int fill(Buffer buffer) throws IOException; /** * Flush the buffer from the current getIndex to it's putIndex using whatever byte * sink is backing the buffer. The getIndex is updated with the number of bytes flushed. * Any mark set is cleared. * If the entire contents of the buffer are flushed, then an implicit empty() is done. * * @param buffer The buffer to flush. This buffers getIndex is updated. * @return the number of bytes written * @throws EofException If the endpoint is closed or output is shutdown. */ int flush(Buffer buffer) throws IOException; /** * Flush the buffer from the current getIndex to it's putIndex using whatever byte * sink is backing the buffer. The getIndex is updated with the number of bytes flushed. * Any mark set is cleared. * If the entire contents of the buffer are flushed, then an implicit empty() is done. * The passed header/trailer buffers are written before/after the contents of this buffer. This may be done * either as gather writes, as a poke into this buffer or as several writes. The implementation is free to * select the optimal mechanism. * @param header A buffer to write before flushing this buffer. This buffers getIndex is updated. * @param buffer The buffer to flush. This buffers getIndex is updated. * @param trailer A buffer to write after flushing this buffer. This buffers getIndex is updated. * @return the total number of bytes written. */ int flush(Buffer header, Buffer buffer, Buffer trailer) throws IOException; /* ------------------------------------------------------------ */ /** * @return The local IP address to which this <code>EndPoint</code> is bound, or <code>null</code> * if this <code>EndPoint</code> does not represent a network connection. */ public String getLocalAddr(); /* ------------------------------------------------------------ */ /** * @return The local host name to which this <code>EndPoint</code> is bound, or <code>null</code> * if this <code>EndPoint</code> does not represent a network connection. */ public String getLocalHost(); /* ------------------------------------------------------------ */ /** * @return The local port number on which this <code>EndPoint</code> is listening, or <code>0</code> * if this <code>EndPoint</code> does not represent a network connection. */ public int getLocalPort(); /* ------------------------------------------------------------ */ /** * @return The remote IP address to which this <code>EndPoint</code> is connected, or <code>null</code> * if this <code>EndPoint</code> does not represent a network connection. */ public String getRemoteAddr(); /* ------------------------------------------------------------ */ /** * @return The host name of the remote machine to which this <code>EndPoint</code> is connected, or <code>null</code> * if this <code>EndPoint</code> does not represent a network connection. */ public String getRemoteHost(); /* ------------------------------------------------------------ */ /** * @return The remote port number to which this <code>EndPoint</code> is connected, or <code>0</code> * if this <code>EndPoint</code> does not represent a network connection. */ public int getRemotePort(); /* ------------------------------------------------------------ */ public boolean isBlocking(); /* ------------------------------------------------------------ */ public boolean blockReadable(long millisecs) throws IOException; /* ------------------------------------------------------------ */ public boolean blockWritable(long millisecs) throws IOException; /* ------------------------------------------------------------ */ public boolean isOpen(); /* ------------------------------------------------------------ */ /** * @return The underlying transport object (socket, channel, etc.) */ public Object getTransport(); /* ------------------------------------------------------------ */ /** Flush any buffered output. * May fail to write all data if endpoint is non-blocking * @throws EofException If the endpoint is closed or output is shutdown. */ public void flush() throws IOException; /* ------------------------------------------------------------ */ /** Get the max idle time in ms. * <p>The max idle time is the time the endpoint can be idle before * extraordinary handling takes place. This loosely corresponds to * the {@link java.net.Socket#getSoTimeout()} for blocking connections, * but {@link AsyncEndPoint} implementations must use other mechanisms * to implement the max idle time. * @return the max idle time in ms or if ms <= 0 implies an infinite timeout */ public int getMaxIdleTime(); /* ------------------------------------------------------------ */ /** Set the max idle time. * @param timeMs the max idle time in MS. Timeout <= 0 implies an infinite timeout * @throws IOException if the timeout cannot be set. */ public void setMaxIdleTime(int timeMs) throws IOException; }
apache-2.0
HackShare/Presto
presto-main/src/main/java/com/facebook/presto/ErrorCodes.java
1788
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.presto; import com.facebook.presto.spi.ErrorCode; import com.facebook.presto.spi.PrestoException; import com.facebook.presto.spi.StandardErrorCode; import com.facebook.presto.sql.analyzer.SemanticException; import com.facebook.presto.sql.parser.ParsingException; import com.facebook.presto.execution.Failure; import javax.annotation.Nullable; public final class ErrorCodes { private ErrorCodes() { } @Nullable public static ErrorCode toErrorCode(@Nullable Throwable throwable) { if (throwable == null) { return null; } if (throwable instanceof PrestoException) { return ((PrestoException) throwable).getErrorCode(); } if (throwable instanceof Failure && ((Failure) throwable).getErrorCode() != null) { return ((Failure) throwable).getErrorCode(); } if (throwable instanceof ParsingException || throwable instanceof SemanticException) { return StandardErrorCode.SYNTAX_ERROR.toErrorCode(); } if (throwable.getCause() != null) { return toErrorCode(throwable.getCause()); } return StandardErrorCode.INTERNAL.toErrorCode(); } }
apache-2.0
nazarewk/elasticsearch
modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java
24120
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.transport.netty4; import io.netty.bootstrap.Bootstrap; import io.netty.bootstrap.ServerBootstrap; import io.netty.channel.AdaptiveRecvByteBufAllocator; import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; import io.netty.channel.FixedRecvByteBufAllocator; import io.netty.channel.RecvByteBufAllocator; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.nio.NioServerSocketChannel; import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.util.concurrent.Future; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkService.TcpSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportServiceAdapter; import org.elasticsearch.transport.TransportSettings; import java.io.IOException; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import static org.elasticsearch.common.settings.Setting.byteSizeSetting; import static org.elasticsearch.common.settings.Setting.intSetting; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; /** * There are 4 types of connections per node, low/med/high/ping. Low if for batch oriented APIs (like recovery or * batch) with high payload that will cause regular request. (like search or single index) to take * longer. Med is for the typical search / single doc index. And High for things like cluster state. Ping is reserved for * sending out ping requests to other nodes. */ public class Netty4Transport extends TcpTransport<Channel> { static { Netty4Utils.setup(); } public static final Setting<Integer> WORKER_COUNT = new Setting<>("transport.netty.worker_count", (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), Property.NodeScope, Property.Shared); public static final Setting<ByteSizeValue> NETTY_MAX_CUMULATION_BUFFER_CAPACITY = Setting.byteSizeSetting( "transport.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), Property.NodeScope, Property.Shared); public static final Setting<Integer> NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = Setting.intSetting("transport.netty.max_composite_buffer_components", -1, -1, Property.NodeScope, Property.Shared); public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting( "transport.netty.receive_predictor_size", new ByteSizeValue(64, ByteSizeUnit.KB), Property.NodeScope); public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_MIN = byteSizeSetting("transport.netty.receive_predictor_min", NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope); public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_MAX = byteSizeSetting("transport.netty.receive_predictor_max", NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope); public static final Setting<Integer> NETTY_BOSS_COUNT = intSetting("transport.netty.boss_count", 1, 1, Property.NodeScope, Property.Shared); protected final ByteSizeValue maxCumulationBufferCapacity; protected final int maxCompositeBufferComponents; protected final RecvByteBufAllocator recvByteBufAllocator; protected final int workerCount; protected final ByteSizeValue receivePredictorMin; protected final ByteSizeValue receivePredictorMax; // package private for testing volatile Netty4OpenChannelsHandler serverOpenChannels; protected volatile Bootstrap bootstrap; protected final Map<String, ServerBootstrap> serverBootstraps = newConcurrentMap(); public Netty4Transport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { super("netty", settings, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService); this.workerCount = WORKER_COUNT.get(settings); this.maxCumulationBufferCapacity = NETTY_MAX_CUMULATION_BUFFER_CAPACITY.get(settings); this.maxCompositeBufferComponents = NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings); // See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one this.receivePredictorMin = NETTY_RECEIVE_PREDICTOR_MIN.get(settings); this.receivePredictorMax = NETTY_RECEIVE_PREDICTOR_MAX.get(settings); if (receivePredictorMax.getBytes() == receivePredictorMin.getBytes()) { recvByteBufAllocator = new FixedRecvByteBufAllocator((int) receivePredictorMax.getBytes()); } else { recvByteBufAllocator = new AdaptiveRecvByteBufAllocator((int) receivePredictorMin.getBytes(), (int) receivePredictorMin.getBytes(), (int) receivePredictorMax.getBytes()); } } TransportServiceAdapter transportServiceAdapter() { return transportServiceAdapter; } @Override protected void doStart() { boolean success = false; try { bootstrap = createBootstrap(); if (NetworkService.NETWORK_SERVER.get(settings)) { final Netty4OpenChannelsHandler openChannels = new Netty4OpenChannelsHandler(logger); this.serverOpenChannels = openChannels; // loop through all profiles and start them up, special handling for default one for (Map.Entry<String, Settings> entry : buildProfileSettings().entrySet()) { // merge fallback settings with default settings with profile settings so we have complete settings with default values final Settings settings = Settings.builder() .put(createFallbackSettings()) .put(entry.getValue()).build(); createServerBootstrap(entry.getKey(), settings); bindServer(entry.getKey(), settings); } } super.doStart(); success = true; } finally { if (success == false) { doStop(); } } } private Bootstrap createBootstrap() { final Bootstrap bootstrap = new Bootstrap(); bootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings, TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX))); bootstrap.channel(NioSocketChannel.class); bootstrap.handler(getClientChannelInitializer()); bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, Math.toIntExact(defaultConnectionProfile.getConnectTimeout().millis())); bootstrap.option(ChannelOption.TCP_NODELAY, TCP_NO_DELAY.get(settings)); bootstrap.option(ChannelOption.SO_KEEPALIVE, TCP_KEEP_ALIVE.get(settings)); final ByteSizeValue tcpSendBufferSize = TCP_SEND_BUFFER_SIZE.get(settings); if (tcpSendBufferSize.getBytes() > 0) { bootstrap.option(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.getBytes())); } final ByteSizeValue tcpReceiveBufferSize = TCP_RECEIVE_BUFFER_SIZE.get(settings); if (tcpReceiveBufferSize.getBytes() > 0) { bootstrap.option(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.getBytes())); } bootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); final boolean reuseAddress = TCP_REUSE_ADDRESS.get(settings); bootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress); bootstrap.validate(); return bootstrap; } private Settings createFallbackSettings() { Settings.Builder fallbackSettingsBuilder = Settings.builder(); List<String> fallbackBindHost = TransportSettings.BIND_HOST.get(settings); if (fallbackBindHost.isEmpty() == false) { fallbackSettingsBuilder.putArray("bind_host", fallbackBindHost); } List<String> fallbackPublishHost = TransportSettings.PUBLISH_HOST.get(settings); if (fallbackPublishHost.isEmpty() == false) { fallbackSettingsBuilder.putArray("publish_host", fallbackPublishHost); } boolean fallbackTcpNoDelay = settings.getAsBoolean("transport.netty.tcp_no_delay", TcpSettings.TCP_NO_DELAY.get(settings)); fallbackSettingsBuilder.put("tcp_no_delay", fallbackTcpNoDelay); boolean fallbackTcpKeepAlive = settings.getAsBoolean("transport.netty.tcp_keep_alive", TcpSettings.TCP_KEEP_ALIVE.get(settings)); fallbackSettingsBuilder.put("tcp_keep_alive", fallbackTcpKeepAlive); boolean fallbackReuseAddress = settings.getAsBoolean("transport.netty.reuse_address", TcpSettings.TCP_REUSE_ADDRESS.get(settings)); fallbackSettingsBuilder.put("reuse_address", fallbackReuseAddress); ByteSizeValue fallbackTcpSendBufferSize = settings.getAsBytesSize("transport.netty.tcp_send_buffer_size", TCP_SEND_BUFFER_SIZE.get(settings)); if (fallbackTcpSendBufferSize.getBytes() >= 0) { fallbackSettingsBuilder.put("tcp_send_buffer_size", fallbackTcpSendBufferSize); } ByteSizeValue fallbackTcpBufferSize = settings.getAsBytesSize("transport.netty.tcp_receive_buffer_size", TCP_RECEIVE_BUFFER_SIZE.get(settings)); if (fallbackTcpBufferSize.getBytes() >= 0) { fallbackSettingsBuilder.put("tcp_receive_buffer_size", fallbackTcpBufferSize); } return fallbackSettingsBuilder.build(); } private void createServerBootstrap(String name, Settings settings) { if (logger.isDebugEnabled()) { logger.debug("using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], compress[{}], " + "connect_timeout[{}], connections_per_node[{}/{}/{}/{}/{}], receive_predictor[{}->{}]", name, workerCount, settings.get("port"), settings.get("bind_host"), settings.get("publish_host"), compress, defaultConnectionProfile.getConnectTimeout(), defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.RECOVERY), defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.BULK), defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.REG), defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.STATE), defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.PING), receivePredictorMin, receivePredictorMax); } final ThreadFactory workerFactory = daemonThreadFactory(this.settings, TRANSPORT_SERVER_WORKER_THREAD_NAME_PREFIX, name); final ServerBootstrap serverBootstrap = new ServerBootstrap(); serverBootstrap.group(new NioEventLoopGroup(workerCount, workerFactory)); serverBootstrap.channel(NioServerSocketChannel.class); serverBootstrap.childHandler(getServerChannelInitializer(name, settings)); serverBootstrap.childOption(ChannelOption.TCP_NODELAY, TCP_NO_DELAY.get(settings)); serverBootstrap.childOption(ChannelOption.SO_KEEPALIVE, TCP_KEEP_ALIVE.get(settings)); final ByteSizeValue tcpSendBufferSize = TCP_SEND_BUFFER_SIZE.getDefault(settings); if (tcpSendBufferSize != null && tcpSendBufferSize.getBytes() > 0) { serverBootstrap.childOption(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.getBytes())); } final ByteSizeValue tcpReceiveBufferSize = TCP_RECEIVE_BUFFER_SIZE.getDefault(settings); if (tcpReceiveBufferSize != null && tcpReceiveBufferSize.getBytes() > 0) { serverBootstrap.childOption(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.bytesAsInt())); } serverBootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); serverBootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); final boolean reuseAddress = TCP_REUSE_ADDRESS.get(settings); serverBootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress); serverBootstrap.childOption(ChannelOption.SO_REUSEADDR, reuseAddress); serverBootstrap.validate(); serverBootstraps.put(name, serverBootstrap); } protected ChannelHandler getServerChannelInitializer(String name, Settings settings) { return new ServerChannelInitializer(name, settings); } protected ChannelHandler getClientChannelInitializer() { return new ClientChannelInitializer(); } protected final void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { final Throwable unwrapped = ExceptionsHelper.unwrap(cause, ElasticsearchException.class); final Throwable t = unwrapped != null ? unwrapped : cause; onException(ctx.channel(), t instanceof Exception ? (Exception) t : new ElasticsearchException(t)); } @Override public long serverOpen() { Netty4OpenChannelsHandler channels = serverOpenChannels; return channels == null ? 0 : channels.numberOfOpenChannels(); } @Override protected NodeChannels connectToChannels(DiscoveryNode node, ConnectionProfile profile) { final Channel[] channels = new Channel[profile.getNumConnections()]; final NodeChannels nodeChannels = new NodeChannels(node, channels, profile); boolean success = false; try { final TimeValue connectTimeout; final Bootstrap bootstrap; final TimeValue defaultConnectTimeout = defaultConnectionProfile.getConnectTimeout(); if (profile.getConnectTimeout() != null && profile.getConnectTimeout().equals(defaultConnectTimeout) == false) { bootstrap = this.bootstrap.clone(this.bootstrap.config().group()); bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, Math.toIntExact(profile.getConnectTimeout().millis())); connectTimeout = profile.getConnectTimeout(); } else { connectTimeout = defaultConnectTimeout; bootstrap = this.bootstrap; } final ArrayList<ChannelFuture> connections = new ArrayList<>(channels.length); final InetSocketAddress address = node.getAddress().address(); for (int i = 0; i < channels.length; i++) { connections.add(bootstrap.connect(address)); } final Iterator<ChannelFuture> iterator = connections.iterator(); try { for (int i = 0; i < channels.length; i++) { assert iterator.hasNext(); ChannelFuture future = iterator.next(); future.awaitUninterruptibly((long) (connectTimeout.millis() * 1.5)); if (!future.isSuccess()) { throw new ConnectTransportException(node, "connect_timeout[" + connectTimeout + "]", future.cause()); } channels[i] = future.channel(); channels[i].closeFuture().addListener(new ChannelCloseListener(node)); } assert iterator.hasNext() == false : "not all created connection have been consumed"; } catch (final RuntimeException e) { for (final ChannelFuture future : Collections.unmodifiableList(connections)) { FutureUtils.cancel(future); if (future.channel() != null && future.channel().isOpen()) { try { future.channel().close(); } catch (Exception inner) { e.addSuppressed(inner); } } } throw e; } success = true; } finally { if (success == false) { try { nodeChannels.close(); } catch (IOException e) { logger.trace("exception while closing channels", e); } } } return nodeChannels; } private class ChannelCloseListener implements ChannelFutureListener { private final DiscoveryNode node; private ChannelCloseListener(DiscoveryNode node) { this.node = node; } @Override public void operationComplete(final ChannelFuture future) throws Exception { onChannelClosed(future.channel()); NodeChannels nodeChannels = connectedNodes.get(node); if (nodeChannels != null && nodeChannels.hasChannel(future.channel())) { threadPool.generic().execute(() -> disconnectFromNode(node, future.channel(), "channel closed event")); } } } @Override protected void sendMessage(Channel channel, BytesReference reference, ActionListener<Channel> listener) { final ChannelFuture future = channel.writeAndFlush(Netty4Utils.toByteBuf(reference)); future.addListener(f -> { if (f.isSuccess()) { listener.onResponse(channel); } else { Throwable cause = f.cause(); // If the Throwable is an Error something has gone very wrong and Netty4MessageChannelHandler is // going to cause that to bubble up and kill the process. if (cause instanceof Exception) { listener.onFailure((Exception) cause); } } }); } @Override protected void closeChannels(final List<Channel> channels) throws IOException { Netty4Utils.closeChannels(channels); } @Override protected InetSocketAddress getLocalAddress(Channel channel) { return (InetSocketAddress) channel.localAddress(); } @Override protected Channel bind(String name, InetSocketAddress address) { return serverBootstraps.get(name).bind(address).syncUninterruptibly().channel(); } ScheduledPing getPing() { return scheduledPing; } @Override protected boolean isOpen(Channel channel) { return channel.isOpen(); } @Override @SuppressForbidden(reason = "debug") protected void stopInternal() { Releasables.close(serverOpenChannels, () -> { final List<Tuple<String, Future<?>>> serverBootstrapCloseFutures = new ArrayList<>(serverBootstraps.size()); for (final Map.Entry<String, ServerBootstrap> entry : serverBootstraps.entrySet()) { serverBootstrapCloseFutures.add( Tuple.tuple(entry.getKey(), entry.getValue().config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS))); } for (final Tuple<String, Future<?>> future : serverBootstrapCloseFutures) { future.v2().awaitUninterruptibly(); if (!future.v2().isSuccess()) { logger.debug( (Supplier<?>) () -> new ParameterizedMessage( "Error closing server bootstrap for profile [{}]", future.v1()), future.v2().cause()); } } serverBootstraps.clear(); if (bootstrap != null) { bootstrap.config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS).awaitUninterruptibly(); bootstrap = null; } }); } protected class ClientChannelInitializer extends ChannelInitializer<Channel> { @Override protected void initChannel(Channel ch) throws Exception { ch.pipeline().addLast("size", new Netty4SizeHeaderFrameDecoder()); // using a dot as a prefix means this cannot come from any settings parsed ch.pipeline().addLast("dispatcher", new Netty4MessageChannelHandler(Netty4Transport.this, ".client")); } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { Netty4Utils.maybeDie(cause); super.exceptionCaught(ctx, cause); } } protected class ServerChannelInitializer extends ChannelInitializer<Channel> { protected final String name; protected final Settings settings; protected ServerChannelInitializer(String name, Settings settings) { this.name = name; this.settings = settings; } @Override protected void initChannel(Channel ch) throws Exception { ch.pipeline().addLast("open_channels", Netty4Transport.this.serverOpenChannels); ch.pipeline().addLast("size", new Netty4SizeHeaderFrameDecoder()); ch.pipeline().addLast("dispatcher", new Netty4MessageChannelHandler(Netty4Transport.this, name)); } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { Netty4Utils.maybeDie(cause); super.exceptionCaught(ctx, cause); } } }
apache-2.0
trentontrees/dawg
libraries/dawg-house/src/main/java/com/comcast/video/dawg/controller/house/filter/Condition.java
1259
/** * Copyright 2010 Comcast Cable Communications Management, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.comcast.video.dawg.controller.house.filter; import org.springframework.data.mongodb.core.query.Criteria; /** * Interface for a condition on searching for a device. This will provide a way * to convert a user query to a mongodb {@link Criteria} * @author Kevin Pearson * */ public interface Condition { /** * Converts to a mongo db Criteria to apply to a query * @return */ Criteria toCriteria(); /** * Converts to a mongo db Criteria to apply to a query * @param negate negates the condition to do the opposite * @return */ Criteria toCriteria(boolean negate); }
apache-2.0
zer0se7en/netty
handler/src/main/java/io/netty/handler/ssl/SslMasterKeyHandler.java
7963
/* * Copyright 2019 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package io.netty.handler.ssl; import io.netty.buffer.ByteBufUtil; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.util.internal.ReflectionUtil; import io.netty.util.internal.SystemPropertyUtil; import io.netty.util.internal.logging.InternalLogger; import io.netty.util.internal.logging.InternalLoggerFactory; import javax.crypto.SecretKey; import javax.crypto.spec.SecretKeySpec; import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLSession; import java.lang.reflect.Field; /** * The {@link SslMasterKeyHandler} is a channel-handler you can include in your pipeline to consume the master key * & session identifier for a TLS session. * This can be very useful, for instance the {@link WiresharkSslMasterKeyHandler} implementation will * log the secret & identifier in a format that is consumable by Wireshark -- allowing easy decryption of pcap/tcpdumps. */ public abstract class SslMasterKeyHandler extends ChannelInboundHandlerAdapter { private static final InternalLogger logger = InternalLoggerFactory.getInstance(SslMasterKeyHandler.class); /** * The JRE SSLSessionImpl cannot be imported */ private static final Class<?> SSL_SESSIONIMPL_CLASS; /** * The master key field in the SSLSessionImpl */ private static final Field SSL_SESSIONIMPL_MASTER_SECRET_FIELD; /** * A system property that can be used to turn on/off the {@link SslMasterKeyHandler} dynamically without having * to edit your pipeline. * <code>-Dio.netty.ssl.masterKeyHandler=true</code> */ public static final String SYSTEM_PROP_KEY = "io.netty.ssl.masterKeyHandler"; /** * The unavailability cause of whether the private Sun implementation of SSLSessionImpl is available. */ private static final Throwable UNAVAILABILITY_CAUSE; static { Throwable cause; Class<?> clazz = null; Field field = null; try { clazz = Class.forName("sun.security.ssl.SSLSessionImpl"); field = clazz.getDeclaredField("masterSecret"); cause = ReflectionUtil.trySetAccessible(field, true); } catch (Throwable e) { cause = e; logger.debug("sun.security.ssl.SSLSessionImpl is unavailable.", e); } UNAVAILABILITY_CAUSE = cause; SSL_SESSIONIMPL_CLASS = clazz; SSL_SESSIONIMPL_MASTER_SECRET_FIELD = field; } /** * Constructor. */ protected SslMasterKeyHandler() { } /** * Ensure that SSLSessionImpl is available. * @throws UnsatisfiedLinkError if unavailable */ public static void ensureSunSslEngineAvailability() { if (UNAVAILABILITY_CAUSE != null) { throw new IllegalStateException( "Failed to find SSLSessionImpl on classpath", UNAVAILABILITY_CAUSE); } } /** * Returns the cause of unavailability. * * @return the cause if unavailable. {@code null} if available. */ public static Throwable sunSslEngineUnavailabilityCause() { return UNAVAILABILITY_CAUSE; } /* Returns {@code true} if and only if sun.security.ssl.SSLSessionImpl exists in the runtime. */ public static boolean isSunSslEngineAvailable() { return UNAVAILABILITY_CAUSE == null; } /** * Consume the master key for the session and the sessionId * @param masterKey A 48-byte secret shared between the client and server. * @param session The current TLS session */ protected abstract void accept(SecretKey masterKey, SSLSession session); @Override public final void userEventTriggered(ChannelHandlerContext ctx, Object evt) { //only try to log the session info if the ssl handshake has successfully completed. if (evt == SslHandshakeCompletionEvent.SUCCESS && masterKeyHandlerEnabled()) { final SslHandler handler = ctx.pipeline().get(SslHandler.class); final SSLEngine engine = handler.engine(); final SSLSession sslSession = engine.getSession(); //the OpenJDK does not expose a way to get the master secret, so try to use reflection to get it. if (isSunSslEngineAvailable() && sslSession.getClass().equals(SSL_SESSIONIMPL_CLASS)) { final SecretKey secretKey; try { secretKey = (SecretKey) SSL_SESSIONIMPL_MASTER_SECRET_FIELD.get(sslSession); } catch (IllegalAccessException e) { throw new IllegalArgumentException("Failed to access the field 'masterSecret' " + "via reflection.", e); } accept(secretKey, sslSession); } else if (OpenSsl.isAvailable() && engine instanceof ReferenceCountedOpenSslEngine) { SecretKeySpec secretKey = ((ReferenceCountedOpenSslEngine) engine).masterKey(); accept(secretKey, sslSession); } } ctx.fireUserEventTriggered(evt); } /** * Checks if the handler is set up to actually handle/accept the event. * By default the {@link #SYSTEM_PROP_KEY} property is checked, but any implementations of this class are * free to override if they have different mechanisms of checking. * * @return true if it should handle, false otherwise. */ protected boolean masterKeyHandlerEnabled() { return SystemPropertyUtil.getBoolean(SYSTEM_PROP_KEY, false); } /** * Create a {@link WiresharkSslMasterKeyHandler} instance. * This TLS master key handler logs the master key and session-id in a format * understood by Wireshark -- this can be especially useful if you need to ever * decrypt a TLS session and are using perfect forward secrecy (i.e. Diffie-Hellman) * The key and session identifier are forwarded to the log named 'io.netty.wireshark'. */ public static SslMasterKeyHandler newWireSharkSslMasterKeyHandler() { return new WiresharkSslMasterKeyHandler(); } /** * Record the session identifier and master key to the {@link InternalLogger} named <code>io.netty.wireshark</code>. * ex. <code>RSA Session-ID:XXX Master-Key:YYY</code> * This format is understood by Wireshark 1.6.0. * https://code.wireshark.org/review/gitweb?p=wireshark.git;a=commit;h=686d4cabb41185591c361f9ec6b709034317144b * The key and session identifier are forwarded to the log named 'io.netty.wireshark'. */ private static final class WiresharkSslMasterKeyHandler extends SslMasterKeyHandler { private static final InternalLogger wireshark_logger = InternalLoggerFactory.getInstance("io.netty.wireshark"); @Override protected void accept(SecretKey masterKey, SSLSession session) { if (masterKey.getEncoded().length != 48) { throw new IllegalArgumentException("An invalid length master key was provided."); } final byte[] sessionId = session.getId(); wireshark_logger.warn("RSA Session-ID:{} Master-Key:{}", ByteBufUtil.hexDump(sessionId).toLowerCase(), ByteBufUtil.hexDump(masterKey.getEncoded()).toLowerCase()); } } }
apache-2.0
apache/jackrabbit-ocm
src/test/java/org/apache/jackrabbit/ocm/manager/query/AnnotationSimpleQueryTest.java
8546
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jackrabbit.ocm.manager.query; import java.util.ArrayList; import java.util.Collection; import junit.framework.Test; import junit.framework.TestSuite; import org.apache.jackrabbit.ocm.AnnotationRepositoryTestBase; import org.apache.jackrabbit.ocm.exception.JcrMappingException; import org.apache.jackrabbit.ocm.manager.ObjectContentManager; import org.apache.jackrabbit.ocm.query.Filter; import org.apache.jackrabbit.ocm.query.Query; import org.apache.jackrabbit.ocm.query.QueryManager; import org.apache.jackrabbit.ocm.testmodel.Page; import org.apache.jackrabbit.ocm.testmodel.Paragraph; /** * Test QueryManagerImpl Query methods * * @author <a href="mailto:christophe.lombart@sword-technologies.com">Christophe Lombart</a> */ public class AnnotationSimpleQueryTest extends AnnotationRepositoryTestBase { public static Test suite() { // All methods starting with "test" will be executed in the test suite. return new TestSuite(AnnotationSimpleQueryTest.class); } /** * @see junit.framework.TestCase#setUp() */ protected void setUp() throws Exception { super.setUp(); importData(); } /** * Test equalTo * */ public void testGetObjectEqualsTo() { try { // Build the Query Object QueryManager queryManager = getObjectContentManager().getQueryManager(); Filter filter = queryManager.createFilter(Paragraph.class); filter.addEqualTo("text", "Para 1"); Query query = queryManager.createQuery(filter); ObjectContentManager ocm = this.getObjectContentManager(); Paragraph paragraph = (Paragraph) ocm.getObject(query); assertNotNull("Object is null", paragraph); assertTrue("Invalid paragraph found" , paragraph.getText().equals("Para 1")); } catch (Exception e) { e.printStackTrace(); fail("Exception occurs during the unit test : " + e); } } /** * Test equalTo * */ public void testGetObjectsEqualsTo() { try { // Build the Query Object QueryManager queryManager = getObjectContentManager().getQueryManager(); Filter filter = queryManager.createFilter(Paragraph.class); filter.addEqualTo("text", "Para 1"); filter.setScope("/test/"); Query query = queryManager.createQuery(filter); ObjectContentManager ocm = this.getObjectContentManager(); Collection result = ocm.getObjects(query); assertEquals("Invalid number of objects - should be = 1", 1, result.size()); Paragraph paragraph = (Paragraph) result.iterator().next(); assertTrue("Invalid paragraph found" , paragraph.getText().equals("Para 1")); } catch (Exception e) { e.printStackTrace(); fail("Exception occurs during the unit test : " + e); } } /** * Test the like "like" expression */ public void testGetObjectsLike() { try { // Build the Query Object QueryManager queryManager = getObjectContentManager().getQueryManager(); Filter filter = queryManager.createFilter(Paragraph.class); filter.addLike("text", "Para%"); filter.setScope("/test/"); Query query = queryManager.createQuery(filter); ObjectContentManager ocm = this.getObjectContentManager(); Collection result = ocm.getObjects(query); assertEquals("Invalid number of objects - should be = 3", 3, result.size()); Paragraph[] paragraphs = (Paragraph[]) result.toArray(new Paragraph[result.size()]); assertTrue("Invalid paragraph found", this.containsText(paragraphs,"Para 1")); assertTrue("Invalid paragraph found", this.containsText(paragraphs,"Para 2")); assertTrue("Invalid paragraph found", this.containsText(paragraphs,"Para 3")); } catch (Exception e) { e.printStackTrace(); fail("Exception occurs during the unit test : " + e); } } /** * Build an or expression between 2 filters * */ public void testGetObjectsOr() { try { // Build the Query Object QueryManager queryManager = getObjectContentManager().getQueryManager(); Filter filter1 = queryManager.createFilter(Paragraph.class); filter1.addEqualTo("text", "Para 1"); filter1.setScope("/test/"); Filter filter2 = queryManager.createFilter(Paragraph.class); filter2.addEqualTo("text", "Para 2"); filter1.addOrFilter(filter2); Query query = queryManager.createQuery(filter1); ObjectContentManager ocm = this.getObjectContentManager(); Collection result = ocm.getObjects(query); assertEquals("Invalid number of objects - should be = 2", 2, result.size()); Paragraph[] paragraphs = (Paragraph[]) result.toArray(new Paragraph[result.size()]); assertTrue("Invalid paragraph found", this.containsText(paragraphs,"Para 1")); assertTrue("Invalid paragraph found", this.containsText(paragraphs,"Para 2")); } catch (Exception e) { e.printStackTrace(); fail("Exception occurs during the unit test : " + e); } } public void testGetObjectOrderBy() { try { // Build the Query Object QueryManager queryManager = getObjectContentManager().getQueryManager(); Filter filter = queryManager.createFilter(Paragraph.class); filter.addLike("text", "Para%"); filter.setScope("/test/"); Query query = queryManager.createQuery(filter); query.addOrderByDescending("text"); ObjectContentManager ocm = this.getObjectContentManager(); Collection result = ocm.getObjects(query); assertEquals("Invalid number of objects - should be = 3", 3, result.size()); Paragraph[] paragraphs = (Paragraph[]) result.toArray(new Paragraph[result.size()]); assertTrue("Invalid paragraph found", this.containsText(paragraphs,"Para 1")); assertTrue("Invalid paragraph found", this.containsText(paragraphs,"Para 2")); assertTrue("Invalid paragraph found", this.containsText(paragraphs,"Para 3")); } catch (Exception e) { e.printStackTrace(); fail("Exception occurs during the unit test : " + e); } } private void importData() throws JcrMappingException { ObjectContentManager ocm = getObjectContentManager(); Page page = new Page(); page.setPath("/test"); page.setTitle("Page Title"); ArrayList paragraphs = new ArrayList(); paragraphs.add(new Paragraph("Para 1")); paragraphs.add(new Paragraph("Para 2")); paragraphs.add(new Paragraph("Para 3")); paragraphs.add(new Paragraph("Another Para ")); page.setParagraphs(paragraphs); ocm.insert(page); ocm.save(); } private boolean containsText(Paragraph[] paragraphs, String text) { for (int i = 0; i < paragraphs.length; i++) { if (paragraphs[i].getText().equals(text)) { return true; } } return false; } }
apache-2.0
signed/intellij-community
platform/lang-impl/src/com/intellij/codeInsight/editorActions/EndHandler.java
5358
/* * Copyright 2000-2014 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.codeInsight.editorActions; import com.intellij.codeInsight.CodeInsightSettings; import com.intellij.ide.DataManager; import com.intellij.openapi.actionSystem.CommonDataKeys; import com.intellij.openapi.actionSystem.DataContext; import com.intellij.openapi.application.ApplicationManager; import com.intellij.openapi.application.WriteAction; import com.intellij.openapi.editor.*; import com.intellij.openapi.editor.actionSystem.EditorActionHandler; import com.intellij.openapi.fileEditor.FileDocumentManager; import com.intellij.openapi.project.Project; import com.intellij.psi.PsiDocumentManager; import com.intellij.psi.PsiFile; import com.intellij.psi.codeStyle.CodeStyleManager; import com.intellij.util.text.CharArrayUtil; public class EndHandler extends EditorActionHandler { private final EditorActionHandler myOriginalHandler; public EndHandler(EditorActionHandler originalHandler) { super(true); myOriginalHandler = originalHandler; } @Override protected void doExecute(final Editor editor, Caret caret, DataContext dataContext) { CodeInsightSettings settings = CodeInsightSettings.getInstance(); if (!settings.SMART_END_ACTION) { if (myOriginalHandler != null) { myOriginalHandler.execute(editor, caret, dataContext); } return; } final Project project = CommonDataKeys.PROJECT.getData(DataManager.getInstance().getDataContext(editor.getComponent())); if (project == null) { if (myOriginalHandler != null) { myOriginalHandler.execute(editor, caret, dataContext); } return; } final Document document = editor.getDocument(); final PsiFile file = PsiDocumentManager.getInstance(project).getPsiFile(document); if (file == null) { if (myOriginalHandler != null){ myOriginalHandler.execute(editor, caret, dataContext); } return; } final EditorNavigationDelegate[] extensions = EditorNavigationDelegate.EP_NAME.getExtensions(); for (EditorNavigationDelegate delegate : extensions) { if (delegate.navigateToLineEnd(editor, dataContext) == EditorNavigationDelegate.Result.STOP) { return; } } final CaretModel caretModel = editor.getCaretModel(); final int caretOffset = caretModel.getOffset(); CharSequence chars = editor.getDocument().getCharsSequence(); int length = editor.getDocument().getTextLength(); if (caretOffset < length) { final int offset1 = CharArrayUtil.shiftBackward(chars, caretOffset - 1, " \t"); if (offset1 < 0 || chars.charAt(offset1) == '\n' || chars.charAt(offset1) == '\r') { final int offset2 = CharArrayUtil.shiftForward(chars, offset1 + 1, " \t"); boolean isEmptyLine = offset2 >= length || chars.charAt(offset2) == '\n' || chars.charAt(offset2) == '\r'; if (isEmptyLine) { // There is a possible case that indent string is not calculated for particular document (that is true at least for plain text // documents). Hence, we check that and don't finish processing in case we have such a situation. boolean stopProcessing = true; PsiDocumentManager.getInstance(project).commitAllDocuments(); CodeStyleManager styleManager = CodeStyleManager.getInstance(project); final String lineIndent = styleManager.getLineIndent(file, caretOffset); if (lineIndent != null) { int col = calcColumnNumber(lineIndent, editor.getSettings().getTabSize(project)); int line = caretModel.getVisualPosition().line; caretModel.moveToVisualPosition(new VisualPosition(line, col)); if (caretModel.getLogicalPosition().column != col){ if (!ApplicationManager.getApplication().isWriteAccessAllowed() && !FileDocumentManager.getInstance().requestWriting(editor.getDocument(), project)) { return; } editor.getSelectionModel().removeSelection(); WriteAction.run(() -> document.replaceString(offset1 + 1, offset2, lineIndent)); } } else { stopProcessing = false; } editor.getScrollingModel().scrollToCaret(ScrollType.RELATIVE); editor.getSelectionModel().removeSelection(); if (stopProcessing) { return; } } } } if (myOriginalHandler != null){ myOriginalHandler.execute(editor, caret, dataContext); } } private static int calcColumnNumber(final String lineIndent, final int tabSize) { int result = 0; for (char c : lineIndent.toCharArray()) { if (c == ' ') result++; if (c == '\t') result += tabSize; } return result; } }
apache-2.0
adessaigne/camel
components/camel-infinispan/src/test/java/org/apache/camel/component/infinispan/util/UserUtils.java
2567
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.infinispan.util; import java.util.List; import org.infinispan.protostream.sampledomain.User; public final class UserUtils { public static final User[] USERS = new User[] { createUser("nameA", "surnameA"), createUser("nameA", "surnameB"), createUser("nameB", "surnameB") }; public static final User[] CQ_USERS = new User[] { createUser("CQ01", "surname01"), createUser("CQ02", "surname01"), createUser("NQ03", "surname03"), createUser("NQ04", "surname04") }; private UserUtils() { } public static String createKey(User user) { return String.format("%s+%s", user.getName(), user.getSurname()); } public static User createUser(String name, String surname) { User user = new User(); user.setName(name); user.setSurname(surname); return user; } public static boolean eq(String str1, String str2) { if (str1 == null) { return str2 == null; } else { return str1.equals(str2); } } public static boolean eq(User user, String name, String surname) { if (user == null) { return false; } if (!eq(user.getName(), name)) { return false; } if (!eq(user.getSurname(), surname)) { return false; } return true; } public static boolean hasUser(List<User> users, String name, String surname) { if (users == null) { return false; } for (User user : users) { if (eq(user, name, surname)) { return true; } } return false; } }
apache-2.0
zwets/flowable-engine
modules/flowable-engine/src/main/java/org/flowable/engine/impl/NativeHistoricDetailQueryImpl.java
2036
/* Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.flowable.engine.impl; import java.util.List; import java.util.Map; import org.flowable.engine.common.impl.AbstractNativeQuery; import org.flowable.engine.common.impl.interceptor.CommandContext; import org.flowable.engine.common.impl.interceptor.CommandExecutor; import org.flowable.engine.history.HistoricDetail; import org.flowable.engine.history.NativeHistoricDetailQuery; import org.flowable.engine.impl.util.CommandContextUtil; public class NativeHistoricDetailQueryImpl extends AbstractNativeQuery<NativeHistoricDetailQuery, HistoricDetail> implements NativeHistoricDetailQuery { private static final long serialVersionUID = 1L; public NativeHistoricDetailQueryImpl(CommandContext commandContext) { super(commandContext); } public NativeHistoricDetailQueryImpl(CommandExecutor commandExecutor) { super(commandExecutor); } // results //////////////////////////////////////////////////////////////// @Override public List<HistoricDetail> executeList(CommandContext commandContext, Map<String, Object> parameterMap) { return CommandContextUtil.getHistoricDetailEntityManager(commandContext).findHistoricDetailsByNativeQuery(parameterMap); } @Override public long executeCount(CommandContext commandContext, Map<String, Object> parameterMap) { return CommandContextUtil.getHistoricDetailEntityManager(commandContext).findHistoricDetailCountByNativeQuery(parameterMap); } }
apache-2.0
psiinon/zaproxy
zap/src/main/java/org/zaproxy/zap/extension/api/ApiGeneratorUtils.java
3868
/* * Zed Attack Proxy (ZAP) and its related class files. * * ZAP is an HTTP/HTTPS proxy for assessing web application security. * * Copyright 2012 The ZAP Development Team * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.zaproxy.zap.extension.api; import java.util.ArrayList; import java.util.List; import org.parosproxy.paros.core.scanner.ScannerParam; import org.parosproxy.paros.network.ConnectionParam; import org.zaproxy.zap.extension.alert.AlertAPI; import org.zaproxy.zap.extension.anticsrf.AntiCsrfAPI; import org.zaproxy.zap.extension.anticsrf.AntiCsrfParam; import org.zaproxy.zap.extension.ascan.ActiveScanAPI; import org.zaproxy.zap.extension.authentication.AuthenticationAPI; import org.zaproxy.zap.extension.authorization.AuthorizationAPI; import org.zaproxy.zap.extension.autoupdate.AutoUpdateAPI; import org.zaproxy.zap.extension.autoupdate.OptionsParamCheckForUpdates; import org.zaproxy.zap.extension.brk.BreakAPI; import org.zaproxy.zap.extension.forceduser.ForcedUserAPI; import org.zaproxy.zap.extension.httpsessions.HttpSessionsAPI; import org.zaproxy.zap.extension.params.ParamsAPI; import org.zaproxy.zap.extension.pscan.PassiveScanAPI; import org.zaproxy.zap.extension.ruleconfig.RuleConfigAPI; import org.zaproxy.zap.extension.script.ScriptAPI; import org.zaproxy.zap.extension.search.SearchAPI; import org.zaproxy.zap.extension.sessions.SessionManagementAPI; import org.zaproxy.zap.extension.spider.SpiderAPI; import org.zaproxy.zap.extension.stats.StatsAPI; import org.zaproxy.zap.extension.stats.StatsParam; import org.zaproxy.zap.extension.users.UsersAPI; import org.zaproxy.zap.spider.SpiderParam; /** * Utility class for the API generators * * @author simon */ public class ApiGeneratorUtils { /** * Return all of the available ApiImplementors. If you implement a new ApiImplementor then you * must add it to this class. * * @return all of the available ApiImplementors. */ public static List<ApiImplementor> getAllImplementors() { List<ApiImplementor> imps = new ArrayList<>(); ApiImplementor api; imps.add(new AlertAPI(null)); api = new AntiCsrfAPI(null); api.addApiOptions(new AntiCsrfParam()); imps.add(api); imps.add(new PassiveScanAPI(null)); imps.add(new SearchAPI(null)); api = new AutoUpdateAPI(null); api.addApiOptions(new OptionsParamCheckForUpdates()); imps.add(api); api = new SpiderAPI(null); api.addApiOptions(new SpiderParam()); imps.add(api); api = new CoreAPI(new ConnectionParam()); imps.add(api); imps.add(new ParamsAPI(null)); api = new ActiveScanAPI(null); api.addApiOptions(new ScannerParam()); imps.add(api); imps.add(new ContextAPI()); imps.add(new HttpSessionsAPI(null)); imps.add(new BreakAPI(null)); imps.add(new AuthenticationAPI(null)); imps.add(new AuthorizationAPI()); imps.add(new RuleConfigAPI(null)); imps.add(new SessionManagementAPI(null)); imps.add(new UsersAPI(null)); imps.add(new ForcedUserAPI(null)); imps.add(new ScriptAPI(null)); api = new StatsAPI(null); api.addApiOptions(new StatsParam()); imps.add(api); return imps; } }
apache-2.0
ravisund/Kundera
src/kundera-rdbms/src/test/java/com/impetus/client/crud/datatypes/StudentRdbmsBytePrimitiveTest.java
19344
package com.impetus.client.crud.datatypes; import java.sql.SQLException; import java.util.List; import javax.persistence.EntityManager; import javax.persistence.Query; import junit.framework.Assert; import org.junit.After; import org.junit.Before; import org.junit.Test; import com.impetus.client.crud.datatypes.entities.StudentBytePrimitive; public class StudentRdbmsBytePrimitiveTest extends RdbmsBase { @Before public void setUp() throws Exception { super.setUp(); } @After public void tearDown() throws Exception { super.tearDown(); } @Test public void testExecuteUseSameEm() { testPersist(true); testFindById(true); testMerge(true); testFindByQuery(true); testNamedQueryUseSameEm(true); testDelete(true); } @Test public void testExecute() { testPersist(false); testFindById(false); testMerge(false); testFindByQuery(false); testNamedQuery(false); testDelete(false); } public void testPersist(boolean useSameEm) { EntityManager em = emf.createEntityManager(); // Insert max value of byte StudentBytePrimitive studentMax = new StudentBytePrimitive(); studentMax.setAge((Short) getMaxValue(short.class)); studentMax.setId((Byte) getMaxValue(byte.class)); studentMax.setName((String) getMaxValue(String.class)); em.persist(studentMax); // Insert min value of byte StudentBytePrimitive studentMin = new StudentBytePrimitive(); studentMin.setAge((Short) getMinValue(short.class)); studentMin.setId((Byte) getMinValue(byte.class)); studentMin.setName((String) getMinValue(String.class)); em.persist(studentMin); // Insert random value of byte StudentBytePrimitive student = new StudentBytePrimitive(); student.setAge((Short) getRandomValue(short.class)); student.setId((Byte) getRandomValue(byte.class)); student.setName((String) getRandomValue(String.class)); em.persist(student); em.close(); } public void testFindById(boolean useSameEm) { EntityManager em = emf.createEntityManager(); StudentBytePrimitive studentMax = em.find(StudentBytePrimitive.class, getMaxValue(byte.class)); Assert.assertNotNull(studentMax); Assert.assertEquals(getMaxValue(short.class), studentMax.getAge()); Assert.assertEquals(getMaxValue(String.class), studentMax.getName()); if (!useSameEm) { em.close(); em = emf.createEntityManager(); } StudentBytePrimitive studentMin = em.find(StudentBytePrimitive.class, getMinValue(byte.class)); Assert.assertNotNull(studentMin); Assert.assertEquals(getMinValue(short.class), studentMin.getAge()); Assert.assertEquals(getMinValue(String.class), studentMin.getName()); if (!useSameEm) { em.close(); em = emf.createEntityManager(); } StudentBytePrimitive student = em.find(StudentBytePrimitive.class, getRandomValue(byte.class)); Assert.assertNotNull(student); Assert.assertEquals(getRandomValue(short.class), student.getAge()); Assert.assertEquals(getRandomValue(String.class), student.getName()); em.close(); } public void testMerge(boolean useSameEm) { EntityManager em = emf.createEntityManager(); StudentBytePrimitive student = em.find(StudentBytePrimitive.class, getMaxValue(byte.class)); Assert.assertNotNull(student); Assert.assertEquals(getMaxValue(short.class), student.getAge()); Assert.assertEquals(getMaxValue(String.class), student.getName()); student.setName("Kuldeep"); em.merge(student); if (!useSameEm) { em.close(); em = emf.createEntityManager(); } StudentBytePrimitive newStudent = em.find(StudentBytePrimitive.class, getMaxValue(byte.class)); Assert.assertNotNull(newStudent); Assert.assertEquals(getMaxValue(short.class), newStudent.getAge()); Assert.assertEquals("Kuldeep", newStudent.getName()); } public void testFindByQuery(boolean useSameEm) { findAllQuery(); findByName(); findByAge(); findByNameAndAgeGTAndLT(); findByNameAndAgeGTEQAndLTEQ(); findByNameAndAgeGTAndLTEQ(); findByNameAndAgeWithOrClause(); findByAgeAndNameGTAndLT(); findByNameAndAGEBetween(); // findByRange(); } private void findByAgeAndNameGTAndLT() { EntityManager em; String query; Query q; List<StudentBytePrimitive> students; int count; em = emf.createEntityManager(); query = "Select s From StudentBytePrimitive s where s.age = " + getMinValue(short.class) + " and s.name > 'Amresh' and s.name <= '" + getMaxValue(String.class) +"'"; q = em.createQuery(query); students = q.getResultList(); Assert.assertNotNull(students); Assert.assertEquals(1, students.size()); count = 0; for (StudentBytePrimitive student : students) { Assert.assertEquals(getMinValue(byte.class), student.getId()); Assert.assertEquals(getMinValue(short.class), student.getAge()); Assert.assertEquals(getMinValue(String.class), student.getName()); count++; } Assert.assertEquals(1, count); em.close(); } private void findByRange() { EntityManager em; String query; Query q; List<StudentBytePrimitive> students; em = emf.createEntityManager(); query = "Select s From StudentBytePrimitive s where s.id between ?1 and ?2"; q = em.createQuery(query); q.setParameter(1, getMinValue(byte.class)); q.setParameter(2, getMaxValue(byte.class)); students = q.getResultList(); Assert.assertNotNull(students); Assert.assertEquals(3, students.size()); int count = 0; for (StudentBytePrimitive student : students) { if (student.getId() == ((Byte) getMaxValue(byte.class)).byteValue()) { Assert.assertEquals(getMaxValue(short.class), student.getAge()); Assert.assertEquals("Kuldeep", student.getName()); count++; } else if (student.getId() == ((Byte) getMinValue(byte.class)).byteValue()) { Assert.assertEquals(getMinValue(short.class), student.getAge()); Assert.assertEquals(getMinValue(String.class), student.getName()); count++; } else { Assert.assertEquals(getRandomValue(byte.class), student.getId()); Assert.assertEquals(getRandomValue(short.class), student.getAge()); Assert.assertEquals(getRandomValue(String.class), student.getName()); count++; } } Assert.assertEquals(3, count); em.close(); } private void findByNameAndAgeWithOrClause() { EntityManager em; String query; Query q; List<StudentBytePrimitive> students; int count; em = emf.createEntityManager(); query = "Select s From StudentBytePrimitive s where s.name = 'Kuldeep' and s.age > " + getMinValue(short.class); q = em.createQuery(query); students = q.getResultList(); Assert.assertNotNull(students); Assert.assertEquals(1, students.size()); count = 0; for (StudentBytePrimitive student : students) { Assert.assertEquals(getMaxValue(byte.class), student.getId()); Assert.assertEquals(getMaxValue(short.class), student.getAge()); Assert.assertEquals("Kuldeep", student.getName()); count++; } Assert.assertEquals(1, count); em.close(); } private void findByNameAndAgeGTAndLTEQ() { EntityManager em; String query; Query q; List<StudentBytePrimitive> students; int count; em = emf.createEntityManager(); query = "Select s From StudentBytePrimitive s where s.name = 'Kuldeep' and s.age > " + getMinValue(short.class) + " and s.age <= " + getMaxValue(short.class); q = em.createQuery(query); students = q.getResultList(); Assert.assertNotNull(students); Assert.assertEquals(1, students.size()); count = 0; for (StudentBytePrimitive student : students) { Assert.assertEquals(getMaxValue(byte.class), student.getId()); Assert.assertEquals(getMaxValue(short.class), student.getAge()); Assert.assertEquals("Kuldeep", student.getName()); count++; } Assert.assertEquals(1, count); em.close(); } public void testNamedQueryUseSameEm(boolean useSameEm) { updateNamed(true); deleteNamed(true); } public void testNamedQuery(boolean useSameEm) { updateNamed(false); deleteNamed(false); } public void testDelete(boolean useSameEm) { EntityManager em = emf.createEntityManager(); StudentBytePrimitive studentMax = em.find(StudentBytePrimitive.class, getMaxValue(byte.class)); Assert.assertNotNull(studentMax); Assert.assertEquals(getMaxValue(short.class), studentMax.getAge()); Assert.assertEquals("Kuldeep", studentMax.getName()); em.remove(studentMax); if (!useSameEm) { em.close(); em = emf.createEntityManager(); } studentMax = em.find(StudentBytePrimitive.class, getMaxValue(byte.class)); Assert.assertNull(studentMax); em.close(); } /** * */ private void deleteNamed(boolean useSameEm) { String deleteQuery = "Delete From StudentBytePrimitive s where s.name='Vivek'"; EntityManager em = emf.createEntityManager(); Query q = em.createQuery(deleteQuery); q.executeUpdate(); if (!useSameEm) { em.close(); em = emf.createEntityManager(); } StudentBytePrimitive newStudent = em.find(StudentBytePrimitive.class, getRandomValue(byte.class)); Assert.assertNull(newStudent); em.close(); } /** * @return */ private void updateNamed(boolean useSameEm) { EntityManager em = emf.createEntityManager(); String updateQuery = "Update StudentBytePrimitive s SET s.name='Vivek' where s.name='Amresh'"; Query q = em.createQuery(updateQuery); q.executeUpdate(); if (!useSameEm) { em.close(); em = emf.createEntityManager(); } StudentBytePrimitive newStudent = em.find(StudentBytePrimitive.class, getRandomValue(byte.class)); Assert.assertNotNull(newStudent); Assert.assertEquals(getRandomValue(short.class), newStudent.getAge()); Assert.assertEquals("Vivek", newStudent.getName()); em.close(); } private void findByNameAndAGEBetween() { EntityManager em; String query; Query q; List<StudentBytePrimitive> students; int count; em = emf.createEntityManager(); query = "Select s From StudentBytePrimitive s where s.name = 'Amresh' and s.age between " + getMinValue(short.class) + " and " + getMaxValue(short.class); q = em.createQuery(query); students = q.getResultList(); Assert.assertNotNull(students); Assert.assertEquals(1, students.size()); count = 0; for (StudentBytePrimitive student : students) { Assert.assertEquals(getRandomValue(byte.class), student.getId()); Assert.assertEquals(getRandomValue(short.class), student.getAge()); Assert.assertEquals(getRandomValue(String.class), student.getName()); count++; } Assert.assertEquals(1, count); em.close(); } private void findByNameAndAgeGTAndLT() { EntityManager em; String query; Query q; List<StudentBytePrimitive> students; int count; em = emf.createEntityManager(); query = "Select s From StudentBytePrimitive s where s.name = 'Amresh' and s.age > " + getMinValue(short.class) + " and s.age < " + getMaxValue(short.class); q = em.createQuery(query); students = q.getResultList(); Assert.assertNotNull(students); Assert.assertEquals(1, students.size()); count = 0; for (StudentBytePrimitive student : students) { Assert.assertEquals(getRandomValue(byte.class), student.getId()); Assert.assertEquals(getRandomValue(short.class), student.getAge()); Assert.assertEquals(getRandomValue(String.class), student.getName()); count++; } Assert.assertEquals(1, count); em.close(); } private void findByNameAndAgeGTEQAndLTEQ() { EntityManager em; String query; Query q; List<StudentBytePrimitive> students; int count; em = emf.createEntityManager(); query = "Select s From StudentBytePrimitive s where s.name = 'Kuldeep' and s.age >= " + getMinValue(short.class) + " and s.age <= " + getMaxValue(short.class); q = em.createQuery(query); students = q.getResultList(); Assert.assertNotNull(students); Assert.assertEquals(2, students.size()); count = 0; for (StudentBytePrimitive student : students) { if (student.getId() == ((Byte) getMaxValue(byte.class)).byteValue()) { Assert.assertEquals(getMaxValue(short.class), student.getAge()); Assert.assertEquals("Kuldeep", student.getName()); count++; } else { Assert.assertEquals(getMinValue(byte.class), student.getId()); Assert.assertEquals(getMinValue(short.class), student.getAge()); Assert.assertEquals(getMinValue(String.class), student.getName()); count++; } } Assert.assertEquals(2, count); em.close(); } private void findByAge() { EntityManager em; String query; Query q; List<StudentBytePrimitive> students; int count; em = emf.createEntityManager(); query = "Select s From StudentBytePrimitive s where s.age = " + getRandomValue(short.class); q = em.createQuery(query); students = q.getResultList(); Assert.assertNotNull(students); Assert.assertEquals(1, students.size()); count = 0; for (StudentBytePrimitive student : students) { Assert.assertEquals(getRandomValue(byte.class), student.getId()); Assert.assertEquals(getRandomValue(short.class), student.getAge()); Assert.assertEquals(getRandomValue(String.class), student.getName()); count++; } Assert.assertEquals(1, count); em.close(); } /** * */ private void findByName() { EntityManager em; String query; Query q; List<StudentBytePrimitive> students; int count; em = emf.createEntityManager(); query = "Select s From StudentBytePrimitive s where s.name = 'Kuldeep'"; q = em.createQuery(query); students = q.getResultList(); Assert.assertNotNull(students); Assert.assertEquals(2, students.size()); count = 0; for (StudentBytePrimitive student : students) { if (student.getId() == ((Byte) getMaxValue(byte.class)).byteValue()) { Assert.assertEquals(getMaxValue(short.class), student.getAge()); Assert.assertEquals("Kuldeep", student.getName()); count++; } else { Assert.assertEquals(getMinValue(byte.class), student.getId()); Assert.assertEquals(getMinValue(short.class), student.getAge()); Assert.assertEquals(getMinValue(String.class), student.getName()); count++; } } Assert.assertEquals(2, count); em.close(); } /** * */ private void findAllQuery() { EntityManager em = emf.createEntityManager(); // Selet all query. String query = "Select s From StudentBytePrimitive s "; Query q = em.createQuery(query); List<StudentBytePrimitive> students = q.getResultList(); Assert.assertNotNull(students); Assert.assertEquals(3, students.size()); int count = 0; for (StudentBytePrimitive student : students) { if (student.getId() == ((Byte) getMaxValue(byte.class)).byteValue()) { Assert.assertEquals(getMaxValue(short.class), student.getAge()); Assert.assertEquals("Kuldeep", student.getName()); count++; } else if (student.getId() == ((Byte) getMinValue(byte.class)).byteValue()) { Assert.assertEquals(getMinValue(short.class), student.getAge()); Assert.assertEquals(getMinValue(String.class), student.getName()); count++; } else { Assert.assertEquals(getRandomValue(byte.class), student.getId()); Assert.assertEquals(getRandomValue(short.class), student.getAge()); Assert.assertEquals(getRandomValue(String.class), student.getName()); count++; } } Assert.assertEquals(3, count); em.close(); } public void startCluster() { } public void stopCluster() { // TODO Auto-generated method stub } public void createSchema() throws SQLException { try { cli.createSchema("testdb"); cli.update("CREATE TABLE TESTDB.StudentBytePrimitive (id TINYINT PRIMARY KEY, NAME VARCHAR(256), AGE SMALLINT)"); } catch (Exception e) { cli.update("DELETE FROM TESTDB.StudentBytePrimitive"); cli.update("DROP TABLE TESTDB.StudentBytePrimitive"); cli.update("DROP SCHEMA TESTDB"); cli.update("CREATE TABLE TESTDB.StudentBytePrimitive (id TINYINT PRIMARY KEY, NAME VARCHAR(256), AGE SMALLINT)"); } } public void dropSchema() { try { cli.update("DELETE FROM TESTDB.StudentBytePrimitive"); cli.update("DROP TABLE TESTDB.StudentBytePrimitive"); cli.update("DROP SCHEMA TESTDB"); cli.closeConnection(); } catch (Exception e) { // Nothing to do } } }
apache-2.0
bgroenks96/nifty-gui
nifty-examples/src/main/java/de/lessvoid/nifty/examples/multiplayer/MultiplayerPanelControl.java
2010
package de.lessvoid.nifty.examples.multiplayer; import de.lessvoid.nifty.Nifty; import de.lessvoid.nifty.controls.Controller; import de.lessvoid.nifty.controls.Parameters; import de.lessvoid.nifty.elements.Element; import de.lessvoid.nifty.input.NiftyInputEvent; import de.lessvoid.nifty.screen.Screen; import javax.annotation.Nonnull; /** * @author void */ public class MultiplayerPanelControl implements Controller { private Nifty nifty; private Screen screen; private Element element; @Override public void bind( @Nonnull final Nifty niftyParam, @Nonnull final Screen screenParam, @Nonnull final Element newElement, @Nonnull final Parameters properties) { nifty = niftyParam; screen = screenParam; element = newElement; } @Override public void init(@Nonnull final Parameters parameter) { } @Override public void onStartScreen() { setDifficulty("easy"); } @Override public void onFocus(final boolean getFocus) { } @Override public boolean inputEvent(@Nonnull final NiftyInputEvent inputEvent) { return false; } @Override public void onEndScreen() { } public void removePanel() { nifty.removeElement(screen, element); } public void setDifficulty(final String mode) { element.findElementById("#easy").setStyle("unselected"); element.findElementById("#medium").setStyle("unselected"); element.findElementById("#hard").setStyle("unselected"); element.findElementById("#expert").setStyle("unselected"); if ("easy".equals(mode)) { element.findElementById("#easy").setStyle("selected"); } else if ("medium".equals(mode)) { element.findElementById("#medium").setStyle("selected"); } else if ("hard".equals(mode)) { element.findElementById("#hard").setStyle("selected"); } else if ("expert".equals(mode)) { element.findElementById("#expert").setStyle("selected"); } } }
bsd-2-clause
UniquePassive/runelite
runelite-client/src/main/java/net/runelite/client/plugins/combatlevel/CombatLevelPlugin.java
3191
/* * Copyright (c) 2017, Devin French <https://github.com/devinfrench> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package net.runelite.client.plugins.combatlevel; import com.google.common.eventbus.Subscribe; import java.text.DecimalFormat; import javax.inject.Inject; import net.runelite.api.Client; import net.runelite.api.Experience; import net.runelite.api.GameState; import net.runelite.api.Skill; import net.runelite.api.events.GameTick; import net.runelite.api.widgets.Widget; import net.runelite.api.widgets.WidgetInfo; import net.runelite.client.plugins.Plugin; import net.runelite.client.plugins.PluginDescriptor; @PluginDescriptor( name = "Combat Level" ) public class CombatLevelPlugin extends Plugin { private final DecimalFormat decimalFormat = new DecimalFormat("#.###"); @Inject Client client; @Override protected void shutDown() throws Exception { Widget combatLevelWidget = client.getWidget(WidgetInfo.COMBAT_LEVEL); if (combatLevelWidget != null) { String widgetText = combatLevelWidget.getText(); if (widgetText.contains(".")) { combatLevelWidget.setText(widgetText.substring(0, widgetText.indexOf("."))); } } } @Subscribe public void updateCombatLevel(GameTick event) { if (client.getGameState() != GameState.LOGGED_IN) { return; } Widget combatLevelWidget = client.getWidget(WidgetInfo.COMBAT_LEVEL); if (combatLevelWidget == null) { return; } double combatLevelPrecise = Experience.getCombatLevelPrecise( client.getRealSkillLevel(Skill.ATTACK), client.getRealSkillLevel(Skill.STRENGTH), client.getRealSkillLevel(Skill.DEFENCE), client.getRealSkillLevel(Skill.HITPOINTS), client.getRealSkillLevel(Skill.MAGIC), client.getRealSkillLevel(Skill.RANGED), client.getRealSkillLevel(Skill.PRAYER) ); combatLevelWidget.setText("Combat Lvl: " + decimalFormat.format(combatLevelPrecise)); } }
bsd-2-clause
drmacro/basex
basex-core/src/test/java/org/basex/build/CollectionPathTest.java
1652
package org.basex.build; import static org.junit.Assert.*; import org.basex.*; import org.basex.core.cmd.*; import org.junit.*; import org.junit.Test; /** * Tests queries on collections. * * @author BaseX Team 2005-16, BSD License * @author Michael Seiferle */ public final class CollectionPathTest extends SandboxTest { /** Test files directory. */ private static final String DIR = "src/test/resources/"; /** Test files. */ private static final String[] FILES = { DIR + "input.xml", DIR + "xmark.xml", DIR + "test.xml" }; /** Test ZIP. */ private static final String ZIP = DIR + "xml.zip"; /** * Creates an initial database. */ @BeforeClass public static void before() { execute(new CreateDB(NAME)); for(final String file : FILES) execute(new Add(DIR, file)); execute(new Add("test/zipped", ZIP)); } /** * Drops the initial collection. */ @AfterClass public static void after() { execute(new DropDB(NAME)); } /** * Finds single doc. */ @Test public void findDoc() { assertEquals("1", query( "count(for $x in collection('" + NAME + '/' + DIR + "xmark.xml') " + "where $x//location contains text 'uzbekistan' " + "return $x)")); } /** * Finds documents in path. */ @Test public void findDocs() { assertEquals("4", query("count(collection('" + NAME + "/test/zipped'))")); } /** * Checks if the constructed base-uri matches the base-uri of added documents. */ @Test public void baseUri() { assertEquals(NAME + '/' + FILES[1], query("base-uri(collection('" + NAME + '/' + DIR + "xmark.xml'))")); } }
bsd-3-clause
StexX/KiWi-OSE
src/util/kiwi/util/izpack/DatabaseParser.java
9577
package kiwi.util.izpack; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.NodeList; /** * Used to manage and manipulate the information stored in the * <code>persitence.xml</code> file. <br> * From design reasons this class can not be extend. * * @author mradules */ final class DatabaseParser { /** * The property node name. */ private static final String PROPERTY_NODE_NAME = "property"; //$NON-NLS-1$ /** * The name attribute (for the property node). */ private static final String NAME_PROPERTY = "name"; //$NON-NLS-1$ /** * The value attribute (for the property node). */ private static final String VALUE_PROPERTY = "value"; //$NON-NLS-1$ /** * The "Hibernate dialect" value attribute's value. */ private static final String HIBERNATE_DIALECT_PROPERTY = "hibernate.dialect"; //$NON-NLS-1$ /** * The "KiWi triple store" value attribute's value. */ private static final String KIWI_TRIPLESTORE_DIR_PROPERTY = "kiwi.triplestore.dir"; //$NON-NLS-1$ /** * The "KiWi SOLR home" value attribute's value. */ private static final String KIWI_SOLR_HOME_PROPERTY = "kiwi.solr.home"; //$NON-NLS-1$ /** * The "KiWi work directory" value attribute's value. */ private static final String KIWI_WORK_DIR_PROPERTY = "kiwi.work.dir"; //$NON-NLS-1$ /** * The "KiWi semantic vectors directory" value attribute's * value. */ private static final String KIWI_SEMVECTOR_DIR_PROPERTY = "kiwi.semanticvectors"; //$NON-NLS-1$ /** * The DOM which contains the application.xml. */ private final Document document; /** * The actual database system. */ private DatabaseSystem database; /** * The element which contains the KiWi work directory * property ("kiwi.work.dir"). */ private final Element workDirNode; /** * The element which contains the KiWi work directory * property ("kiwi.solr.home"). */ private final Element solrHomeNode; /** * The element which contains the KiWi work directory * property ("kiwi.triplestore.dir"). */ private final Element triplestoreDirNode; /** * The element which contains the KiWi semantic vectors * directory property ("kiwi.semanticvectors"). */ private final Element semanticVectorsDirNode; /** * The element which contains the hybernate dialect property * ("hibernate.dialect"). */ private final Element hibernateDialectNode; private File destinationFile; /** * Used to manage and manipulate the information stored in * the <code>persistence.xml</code> file. * * @param document the DOM correspond to the application.xml * file, can not be null. * @throws NullPointerException if the <code>document</code> * is null. */ DatabaseParser(Document document) { if (document == null) { throw new NullPointerException("The document can not be null."); } this.document = document; workDirNode = getProperty(KIWI_WORK_DIR_PROPERTY); solrHomeNode = getProperty(KIWI_SOLR_HOME_PROPERTY); triplestoreDirNode = getProperty(KIWI_TRIPLESTORE_DIR_PROPERTY); hibernateDialectNode = getProperty(HIBERNATE_DIALECT_PROPERTY); semanticVectorsDirNode = getProperty(KIWI_SEMVECTOR_DIR_PROPERTY); final String attribute = hibernateDialectNode.getAttribute(VALUE_PROPERTY); if (attribute != null) { for (final DatabaseSystem databaseSystem : DatabaseSystem.values()) { if (attribute.contains(databaseSystem.getName())) { // the dialect name contains the db system // name database = databaseSystem; } } } } /** * Returns the DOM element that has a certain property, the * properties is identified after its name. E.G. the method * call : * * <pre> * Element e = getProperty(&quot;kiwi.work.dir&quot;); * </pre> * * will return the element tat looks like : * * <pre> * <property name="kiwi.work.dir" value="/tmp/kiwi"/> * </pre> * * @param propName the property name. * @return the property with the given name. */ private Element getProperty(String propName) { final NodeList elements = document.getElementsByTagName(PROPERTY_NODE_NAME); for (int index = 0; index < elements.getLength(); index++) { final Element item = (Element) elements.item(index); final String name = item.getAttribute(NAME_PROPERTY); if (propName.equals(name)) { return item; } } return null; } /** * Returns an array which contains the name for all the * supported database systems. * * @return an array which contains the name for all the * supported database systems. */ DatabaseSystem[] getSupportedDatabase() { return DatabaseSystem.values(); } /** * Returns the actual database base system. * * @return the actual database base system. */ DatabaseSystem getActualDatabase() { return database; } /** * Set the actual database system. * * @param database the actual database system. */ void setActualDatabase(DatabaseSystem database) { this.database = database; hibernateDialectNode.setAttribute(VALUE_PROPERTY, this.database.getDialect()); } /** * Registers a new value for the actual database system. The * string must correspond one <code>toString</code> element * representation from the <code>DatabaseSystem</code> type * safe enum. * * @param database the name for the database system. * @see DatabaseSystem */ void setActualDatabaseAsString(String database) { for (final DatabaseSystem ds : DatabaseSystem.values()) { if (database.equals(ds.getName())) { this.database = ds; hibernateDialectNode.setAttribute(VALUE_PROPERTY, this.database.getDialect()); break; } } } /** * Returns the actual value for the KiWi work directory * property. * * @return the actual value for the KiWi work directory * property. */ String getWorkDir() { return workDirNode.getAttribute(VALUE_PROPERTY); } /** * Register a new value for the KiWi work directory property. * * @param workDir the new value for the workDir. */ void setWorkDir(String workDir) { workDirNode.setAttribute(VALUE_PROPERTY, workDir); } /** * Returns the actual value for the KiWi work directory * property. * * @return the solrHome the */ String getSolrHome() { return solrHomeNode.getAttribute(VALUE_PROPERTY); } /** * @param solrHome the solrHome to set */ void setSolrHome(String solrHome) { solrHomeNode.setAttribute(VALUE_PROPERTY, solrHome); } /** * @return the triplestoreDir */ String getTriplestoreDir() { return triplestoreDirNode.getAttribute(VALUE_PROPERTY); } /** * @param triplestoreDir the triplestoreDir to set */ void setTriplestoreDir(String triplestoreDir) { triplestoreDirNode.setAttribute(VALUE_PROPERTY, triplestoreDir); } String getSemanticVectorDir() { return semanticVectorsDirNode.getAttribute(VALUE_PROPERTY); } void setSemanticVectorDir(String semanticVectorDir) { semanticVectorsDirNode.setAttribute(VALUE_PROPERTY, semanticVectorDir); } /** * Persists the actual state in in an XML file placed on a * given path. * * @param path the path to the xml file. * @throws IOException by any IO related exceptions. */ void persist(String path) throws IOException { if (path == null) { throw new NullPointerException( "The destination file can not eb null."); } final File dest = new File(path); final String parentStr = dest.getParent(); final File parent = new File(parentStr); if (!parent.exists()) { parent.mkdirs(); } final FileOutputStream outputStream = new FileOutputStream(path); XMLUtil.persist(outputStream, document); } File getDestinationFile() { return destinationFile; } void setDestinationFile(File destinationFile) { this.destinationFile = destinationFile; } void persist() throws IOException { if (destinationFile == null) { throw new NullPointerException( "The destination file can not eb null."); } final String parentStr = destinationFile.getParent(); final File parent = new File(parentStr); if (!parent.exists()) { parent.mkdirs(); } final FileOutputStream outputStream = new FileOutputStream(destinationFile); XMLUtil.persist(outputStream, document); } }
bsd-3-clause
bhav0904/eclipse-collections
eclipse-collections/src/main/java/org/eclipse/collections/impl/parallel/FastListCollectProcedureCombiner.java
1390
/* * Copyright (c) 2016 Goldman Sachs. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * and Eclipse Distribution License v. 1.0 which accompany this distribution. * The Eclipse Public License is available at http://www.eclipse.org/legal/epl-v10.html * and the Eclipse Distribution License is available at * http://www.eclipse.org/org/documents/edl-v10.php. */ package org.eclipse.collections.impl.parallel; import java.util.Collection; import org.eclipse.collections.impl.block.procedure.FastListCollectProcedure; /** * Combines the results of a Collection of CollectBlocks which each hold onto a transformed (collect) * collection of results. */ public final class FastListCollectProcedureCombiner<T, V> extends AbstractTransformerBasedCombiner<V, T, FastListCollectProcedure<T, V>> { private static final long serialVersionUID = 1L; public FastListCollectProcedureCombiner( Iterable<T> sourceIterable, Collection<V> targetCollection, int initialCapacity, boolean combineOne) { super(combineOne, targetCollection, sourceIterable, initialCapacity); } @Override public void combineOne(FastListCollectProcedure<T, V> procedure) { this.result.addAll(procedure.getFastList()); } }
bsd-3-clause
ChernyshovYuriy/openxc-android
library/src/main/java/com/openxc/sinks/AbstractQueuedCallbackSink.java
4223
package com.openxc.sinks; import java.util.Iterator; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import android.util.Log; import com.openxc.messages.VehicleMessage; /** * Functionality to notify multiple clients asynchronously of new measurements. * * This class encapsulates the functionality to keep a thread-safe list of * listeners that want to be notified of updates asynchronously. Subclasses need * only to implement the {@link #propagateMessage(VehicleMessage)} * to add the actual logic for looping over the list of receivers and send them * new values. * * New measurements are queued up and propagated to receivers in a separate * thread, to avoid blocking the original sender of the data. */ public abstract class AbstractQueuedCallbackSink implements VehicleDataSink { private final static String TAG = "AbstractQueuedCallbackSink"; private NotificationThread mNotificationThread = new NotificationThread(); private Lock mNotificationsLock = new ReentrantLock(); private Condition mNotificationsChanged = mNotificationsLock.newCondition(); private CopyOnWriteArrayList<VehicleMessage> mNotifications = new CopyOnWriteArrayList<>(); public AbstractQueuedCallbackSink() { mNotificationThread.start(); } @Override public synchronized void stop() { mNotificationThread.done(); } @Override public void receive(VehicleMessage message) throws DataSinkException { try { mNotificationsLock.lock(); mNotifications.add(message); mNotificationsChanged.signal(); } finally { mNotificationsLock.unlock(); } } /* Block until the notifications queue is cleared. */ public void clearQueue() { try { mNotificationsLock.lock(); while(!mNotifications.isEmpty()) { mNotificationsChanged.await(); } } catch(InterruptedException e) { } finally { mNotificationsLock.unlock(); } } abstract protected void propagateMessage(VehicleMessage message); private class NotificationThread extends Thread { private boolean mRunning = true; private synchronized boolean isRunning() { return mRunning; } public synchronized void done() { Log.d(TAG, "Stopping message notifier"); mRunning = false; // A context switch right can cause a race condition if we // used take() instead of poll(): when mRunning is set to // false and interrupt is called but we haven't called // take() yet, so nobody is waiting. By using poll we can not be // locked for more than 1s. interrupt(); } @Override public void run() { Log.d(TAG, "Starting notification thread"); while(isRunning()) { mNotificationsLock.lock(); try { if(mNotifications.isEmpty()) { mNotificationsChanged.await(); } Iterator<VehicleMessage> it = mNotifications.iterator(); CopyOnWriteArrayList<VehicleMessage> deleted = new CopyOnWriteArrayList<>(mNotifications); while(it.hasNext()) { VehicleMessage message = it.next(); propagateMessage(message); deleted.add(message); } mNotifications.removeAll(deleted) ; } catch(InterruptedException e) { Log.d(TAG, "Interrupted while waiting for a new " + "item for notification -- likely shutting down"); break; } finally { mNotificationsChanged.signal(); mNotificationsLock.unlock(); } } Log.d(TAG, "Stopped notification thread"); } } }
bsd-3-clause
zachylimwl/WhatNow
src/main/java/seedu/whatnow/model/freetime/Period.java
2338
//@@author A0139772U package seedu.whatnow.model.freetime; import java.text.DateFormat; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Comparator; import java.util.Date; import java.util.logging.Logger; import seedu.whatnow.commons.core.LogsCenter; /** * A time period with start and end time */ public class Period implements Comparator { private static final Logger logger = LogsCenter.getLogger(Period.class); public String start; public String end; private static final int SMALLER = -1; private static final int EQUAL = 0; private static final int BIGGER = 1; private static final String TWELVE_HOUR_WITH_MINUTES_COLON_FORMAT = "h:mma"; public Period() { } public Period(String start, String end) { this.start = start; this.end = end; } public void setStart(String start) { this.start = start; } public void setEnd(String end) { this.end = end; } public String getStart() { return this.start; } public String getEnd() { return this.end; } @Override public String toString() { return "[" + start + ", " + end + "]"; } @Override public int compare(Object o1, Object o2) { if (o1 instanceof Period && o2 instanceof Period) { Period period1 = (Period) o1; Period period2 = (Period) o2; DateFormat df = new SimpleDateFormat(TWELVE_HOUR_WITH_MINUTES_COLON_FORMAT); df.setLenient(false); try { Date p1start = df.parse(period1.getStart()); Date p1end = df.parse(period1.getEnd()); Date p2start = df.parse(period2.getStart()); Date p2end = df.parse(period2.getEnd()); if (p1start.compareTo(p2start) < 0 && p1end.compareTo(p2start) <= 0) { return SMALLER; } else if (p1start.compareTo(p2start) == 0 && p1end.compareTo(p2end) == 0) { return EQUAL; } else { return BIGGER; } } catch (ParseException e) { logger.warning("ParseException at Period: \n" + e.getMessage()); return EQUAL; } } return 0; } }
mit
sk89q/CommandHelper
src/main/java/com/laytonsmith/abstraction/bukkit/entities/BukkitMCZombieVillager.java
826
package com.laytonsmith.abstraction.bukkit.entities; import com.laytonsmith.abstraction.entities.MCZombieVillager; import com.laytonsmith.abstraction.enums.MCProfession; import com.laytonsmith.abstraction.enums.bukkit.BukkitMCProfession; import org.bukkit.entity.Entity; import org.bukkit.entity.Villager; import org.bukkit.entity.ZombieVillager; public class BukkitMCZombieVillager extends BukkitMCZombie implements MCZombieVillager { ZombieVillager zv; public BukkitMCZombieVillager(Entity ent) { super(ent); zv = (ZombieVillager) ent; } @Override public MCProfession getProfession() { return BukkitMCProfession.valueOfConcrete(zv.getVillagerProfession()); } @Override public void setProfession(MCProfession profession) { zv.setVillagerProfession((Villager.Profession) profession.getConcrete()); } }
mit
godotgildor/igv
src/org/broad/igv/ui/action/ClearRegionsMenuAction.java
2524
/* * The MIT License (MIT) * * Copyright (c) 2007-2015 Broad Institute * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ /* * To change this template, choose Tools | Templates * and open the template in the editor. */ package org.broad.igv.ui.action; import org.apache.log4j.Logger; import org.broad.igv.ui.IGV; import org.broad.igv.ui.UIConstants; import org.broad.igv.ui.util.UIUtilities; import javax.swing.*; import java.awt.event.ActionEvent; /** * @author jrobinso */ public class ClearRegionsMenuAction extends MenuAction { static Logger log = Logger.getLogger(ClearRegionsMenuAction.class); IGV mainFrame; public ClearRegionsMenuAction(String label, IGV mainFrame) { super(label, null); this.mainFrame = mainFrame; setToolTipText(UIConstants.EXPORT_REGION_TOOLTIP); } @Override public void actionPerformed(ActionEvent e) { UIUtilities.invokeOnEventThread(new Runnable() { public void run() { int choice = JOptionPane.showConfirmDialog( mainFrame.getMainFrame(), "This action will clear all regions of interest. Continue?", "Clear Regions", JOptionPane.YES_NO_OPTION); if (choice == JOptionPane.YES_OPTION) { mainFrame.getSession().clearRegionsOfInterest(); mainFrame.repaint(); } } }); } }
mit
iseki-masaya/spongycastle
core/src/main/java/org/spongycastle/asn1/ocsp/ServiceLocator.java
845
package org.spongycastle.asn1.ocsp; import org.spongycastle.asn1.ASN1EncodableVector; import org.spongycastle.asn1.ASN1Object; import org.spongycastle.asn1.ASN1Primitive; import org.spongycastle.asn1.DERSequence; import org.spongycastle.asn1.x500.X500Name; public class ServiceLocator extends ASN1Object { X500Name issuer; ASN1Primitive locator; /** * Produce an object suitable for an ASN1OutputStream. * <pre> * ServiceLocator ::= SEQUENCE { * issuer Name, * locator AuthorityInfoAccessSyntax OPTIONAL } * </pre> */ public ASN1Primitive toASN1Primitive() { ASN1EncodableVector v = new ASN1EncodableVector(); v.add(issuer); if (locator != null) { v.add(locator); } return new DERSequence(v); } }
mit
Ori-Libhaber/che-core
commons/che-core-commons-xml/src/test/java/org/eclipse/che/commons/xml/XMLTreeUtilTest.java
7348
/******************************************************************************* * Copyright (c) 2012-2015 Codenvy, S.A. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * Codenvy, S.A. - initial API and implementation *******************************************************************************/ package org.eclipse.che.commons.xml; import org.testng.annotations.Test; import static org.eclipse.che.commons.xml.XMLTreeUtil.closeTagLength; import static org.eclipse.che.commons.xml.XMLTreeUtil.indexOf; import static org.eclipse.che.commons.xml.XMLTreeUtil.indexOfAttributeName; import static org.eclipse.che.commons.xml.XMLTreeUtil.replaceAll; import static org.eclipse.che.commons.xml.XMLTreeUtil.rootStart; import static org.eclipse.che.commons.xml.XMLTreeUtil.single; import static org.eclipse.che.commons.xml.XMLTreeUtil.insertBetween; import static org.eclipse.che.commons.xml.XMLTreeUtil.insertInto; import static org.eclipse.che.commons.xml.XMLTreeUtil.lastIndexOf; import static org.eclipse.che.commons.xml.XMLTreeUtil.openTagLength; import static org.eclipse.che.commons.xml.XMLTreeUtil.tabulate; import static java.util.Arrays.asList; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotEquals; /** * Tests for {@link XMLTreeUtil} * * @author Eugene Voevodin */ public class XMLTreeUtilTest { @Test public void shouldTabulateOneLineString() { final String src = "text here"; assertEquals(tabulate(src, 2), " " + src); } @Test public void shouldTabulateMultilineString() { final String src = "first line\nsecond line"; assertEquals(tabulate(src, 1), " first line\n second line"); } @Test public void shouldReturnFirstElement() { assertEquals(single(asList("first")), "first"); } @Test(expectedExceptions = XMLTreeException.class) public void shouldThrowExceptionWhenListContainsNotOnlyElement() { single(asList("first", "second")); } @Test public void shouldInsertContentBetweenTwoAnchors() { // 6 12 final byte[] src = "<name>content</name>".getBytes(); final byte[] newSrc = insertBetween(src, 6, 12, "new content"); assertEquals(newSrc, "<name>new content</name>".getBytes()); } @Test public void shouldInsertContentToCharArrayInSelectedPlace() { // 6 final byte[] src = "<name></name>".getBytes(); final byte[] newSrc = insertInto(src, 6, "new content"); assertEquals(new String(newSrc).intern(), "<name>new content</name>"); } @Test public void shouldBeAbleToFindLastIndexOf() { // 11 20 28 final byte[] src = "...</before>\n <current>...".getBytes(); assertEquals(lastIndexOf(src, '>', 20), 11); assertEquals(lastIndexOf(src, '>', src.length - 1), 28); } @Test public void shouldBeAbleToGetElementOpenTagLength() { //<test>test</test> final NewElement newElement = NewElement.createElement("test", "test"); assertEquals(openTagLength(newElement), 6); } @Test public void shouldBeAbleToGetElementCloseTagLength() { //<test>test</test> final NewElement newElement = NewElement.createElement("test", "test"); assertEquals(closeTagLength(newElement), 7); } @Test public void shouldBeAbleToGetIndexOf() { final String src = "<element attribute1=\"value1\" attribute2=\"value2\" attribute3=\"value3\">text</element>"; final byte[] byteSrc = src.getBytes(); assertEquals(indexOf(byteSrc, "attribute1".getBytes(), 0), src.indexOf("attribute1")); assertEquals(indexOf(byteSrc, "attribute2".getBytes(), 0), src.indexOf("attribute2")); assertEquals(indexOf(byteSrc, "attribute3".getBytes(), 0), src.indexOf("attribute3")); } @Test public void shouldReturnMinusOneIfTargetBytesWereNotFound() { final String src = "source string"; final byte[] byteSrc = src.getBytes(); assertNotEquals(indexOf(byteSrc, "string".getBytes(), 0), -1); assertEquals(indexOf(byteSrc, "strings".getBytes(), 0), -1); } @Test public void shouldBeAbleToFindIndexOfAttributeNameBytes() { final String src = "<element attribute1=\"value1\" attribute2=\"value2\" attribute3=\"value3\">text</element>"; final byte[] byteSrc = src.getBytes(); assertEquals(indexOfAttributeName(byteSrc, "attribute1".getBytes(), 0), src.indexOf("attribute1")); assertEquals(indexOfAttributeName(byteSrc, "attribute2".getBytes(), 0), src.indexOf("attribute2")); assertEquals(indexOfAttributeName(byteSrc, "attribute3".getBytes(), 0), src.indexOf("attribute3")); } @Test public void shouldReturnMinusOneIfAttributeNameBytesWereNotFound() { final String src = "<element attribute12=\"value1\"/>"; final byte[] byteSrc = src.getBytes(); assertEquals(indexOfAttributeName(byteSrc, "attribute1".getBytes(), 0), -1); } @Test public void shouldBeAbleToReplaceMoreBytesWithLessBytes() { final byte[] src = "\r\n\r\n text \r\n\r\n".getBytes(); final byte[] newSrc = replaceAll(src, "\r\n".getBytes(), "\n".getBytes()); assertEquals(newSrc, "\n\n text \n\n".getBytes()); } @Test public void shouldBeAbleToReplaceLessBytesWithMoreBytes() { final byte[] src = "\n\n text \n\n text".getBytes(); final byte[] newSrc = replaceAll(src, "\n".getBytes(), "\r\n".getBytes()); assertEquals(newSrc, "\r\n\r\n text \r\n\r\n text".getBytes()); } @Test public void shouldBeAbleToReplaceBytes() { final byte[] src = "\r\r text \r\r text \r\r text \r\r".getBytes(); final byte[] newSrc = replaceAll(src, "\r".getBytes(), "\n".getBytes()); assertEquals(newSrc, "\n\n text \n\n text \n\n text \n\n".getBytes()); } @Test public void shouldNotReplaceBytesIfTargetBytesWereNotFound() { final byte[] src = "\n\n text \n\n text".getBytes(); final byte[] newSrc = replaceAll(src, "\r\n".getBytes(), "\n".getBytes()); assertEquals(newSrc, "\n\n text \n\n text".getBytes()); } @Test public void shouldFindRootStart() { final String src = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" + "<!-- Comment -->" + "<!-- Another comment -->" + "<root></root>"; int start = rootStart(src.getBytes()); assertEquals(start, src.indexOf("<root>")); } @Test public void shouldFindRootStartEvenIfCommentContainsStartCharacter() { final String src = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" + "<!-- Comment < < -->" + "<!-- Another < < comment -->" + "<root></root>"; int start = rootStart(src.getBytes()); assertEquals(start, src.indexOf("<root>")); } }
epl-1.0
idserda/openhab
bundles/binding/org.openhab.binding.enocean.test/src/test/java/org/openhab/binding/enocean/internal/bus/RockerSwitchInRollershutterProfileTest.java
9107
/** * Copyright (c) 2010-2019 by the respective copyright holders. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html */ package org.openhab.binding.enocean.internal.bus; import static org.junit.Assert.assertEquals; import org.junit.Before; import org.junit.Test; import org.opencean.core.address.EnoceanId; import org.opencean.core.address.EnoceanParameterAddress; import org.opencean.core.common.EEPId; import org.opencean.core.common.Parameter; import org.opencean.core.common.values.ButtonState; import org.openhab.core.library.items.RollershutterItem; import org.openhab.core.library.types.StopMoveType; import org.openhab.core.library.types.UpDownType; public class RockerSwitchInRollershutterProfileTest extends BasicBindingTest { private static final int LONG_PRESS_DELAY = 300 + 10; @Before public void setUpDefaultDevice() { parameterAddress = new EnoceanParameterAddress(EnoceanId.fromString(EnoceanBindingProviderMock.DEVICE_ID)); provider.setParameterAddress(parameterAddress); provider.setItem(new RollershutterItem("dummie")); provider.setEep(EEPId.EEP_F6_02_01); binding.addBindingProvider(provider); } @Test public void openShutterOnShortButtonPressUp() { EnoceanParameterAddress valueParameterAddress = new EnoceanParameterAddress( EnoceanId.fromString(EnoceanBindingProviderMock.DEVICE_ID), Parameter.O); binding.valueChanged(valueParameterAddress, ButtonState.PRESSED); assertEquals("Update State", UpDownType.UP, publisher.popLastCommand()); binding.valueChanged(valueParameterAddress, ButtonState.RELEASED); assertEquals("No new state expected", null, publisher.popLastCommand()); } @Test public void closeShutterOnShortButtonPressDown() { EnoceanParameterAddress valueParameterAddress = new EnoceanParameterAddress( EnoceanId.fromString(EnoceanBindingProviderMock.DEVICE_ID), Parameter.I); binding.valueChanged(valueParameterAddress, ButtonState.PRESSED); assertEquals("Update State", UpDownType.DOWN, publisher.popLastCommand()); binding.valueChanged(valueParameterAddress, ButtonState.RELEASED); assertEquals("No new state expected", null, publisher.popLastCommand()); } @Test public void openShutterDuringLongButtonPressUp() { EnoceanParameterAddress valueParameterAddress = new EnoceanParameterAddress( EnoceanId.fromString(EnoceanBindingProviderMock.DEVICE_ID), Parameter.O); binding.valueChanged(valueParameterAddress, ButtonState.PRESSED); assertEquals("Update State", UpDownType.UP, publisher.popLastCommand()); waitFor(LONG_PRESS_DELAY); binding.valueChanged(valueParameterAddress, ButtonState.RELEASED); assertEquals("Update State", StopMoveType.STOP, publisher.popLastCommand()); assertEquals("No new state expected", null, publisher.popLastCommand()); } @Test public void closeShutterDuringLongButtonPressDown() { EnoceanParameterAddress valueParameterAddress = new EnoceanParameterAddress( EnoceanId.fromString(EnoceanBindingProviderMock.DEVICE_ID), Parameter.I); binding.valueChanged(valueParameterAddress, ButtonState.PRESSED); assertEquals("Update State", UpDownType.DOWN, publisher.popLastCommand()); waitFor(LONG_PRESS_DELAY); binding.valueChanged(valueParameterAddress, ButtonState.RELEASED); assertEquals("Update State", StopMoveType.STOP, publisher.popLastCommand()); assertEquals("No new state expected", null, publisher.popLastCommand()); } @Test public void stopShutterMovingUpOnShortPressUp() { EnoceanParameterAddress valueParameterAddress = new EnoceanParameterAddress( EnoceanId.fromString(EnoceanBindingProviderMock.DEVICE_ID), Parameter.O); binding.valueChanged(valueParameterAddress, ButtonState.PRESSED); binding.valueChanged(valueParameterAddress, ButtonState.RELEASED); publisher.popLastCommand(); waitFor(100); binding.valueChanged(valueParameterAddress, ButtonState.PRESSED); binding.valueChanged(valueParameterAddress, ButtonState.RELEASED); assertEquals("Update State", StopMoveType.STOP, publisher.popLastCommand()); assertEquals("No new state expected", null, publisher.popLastCommand()); } @Test public void stopShutterMovingDownOnShortPressDown() { EnoceanParameterAddress valueParameterAddress = new EnoceanParameterAddress( EnoceanId.fromString(EnoceanBindingProviderMock.DEVICE_ID), Parameter.I); binding.valueChanged(valueParameterAddress, ButtonState.PRESSED); binding.valueChanged(valueParameterAddress, ButtonState.RELEASED); publisher.popLastCommand(); waitFor(100); binding.valueChanged(valueParameterAddress, ButtonState.PRESSED); binding.valueChanged(valueParameterAddress, ButtonState.RELEASED); assertEquals("Update State", StopMoveType.STOP, publisher.popLastCommand()); assertEquals("No new state expected", null, publisher.popLastCommand()); } @Test public void stopShutterMovingDownOnShortPressUp() { EnoceanParameterAddress valueParameterAddress = new EnoceanParameterAddress( EnoceanId.fromString(EnoceanBindingProviderMock.DEVICE_ID), Parameter.I); binding.valueChanged(valueParameterAddress, ButtonState.PRESSED); publisher.popLastCommand(); binding.valueChanged(valueParameterAddress, ButtonState.RELEASED); assertEquals("No new state expected", null, publisher.popLastCommand()); waitFor(100); valueParameterAddress = new EnoceanParameterAddress(EnoceanId.fromString(EnoceanBindingProviderMock.DEVICE_ID), Parameter.O); binding.valueChanged(valueParameterAddress, ButtonState.PRESSED); assertEquals("Update State", StopMoveType.STOP, publisher.popLastCommand()); binding.valueChanged(valueParameterAddress, ButtonState.RELEASED); assertEquals("No new state expected", null, publisher.popLastCommand()); } @Test public void stopShutterMovingUpOnShortPressDown() { EnoceanParameterAddress valueParameterAddress = new EnoceanParameterAddress( EnoceanId.fromString(EnoceanBindingProviderMock.DEVICE_ID), Parameter.O); binding.valueChanged(valueParameterAddress, ButtonState.PRESSED); binding.valueChanged(valueParameterAddress, ButtonState.RELEASED); publisher.popLastCommand(); waitFor(100); valueParameterAddress = new EnoceanParameterAddress(EnoceanId.fromString(EnoceanBindingProviderMock.DEVICE_ID), Parameter.I); binding.valueChanged(valueParameterAddress, ButtonState.PRESSED); binding.valueChanged(valueParameterAddress, ButtonState.RELEASED); assertEquals("Update State", StopMoveType.STOP, publisher.popLastCommand()); assertEquals("No new state expected", null, publisher.popLastCommand()); } @Test public void stopShutterMovingAndStartAgain() { EnoceanParameterAddress valueParameterAddress = new EnoceanParameterAddress( EnoceanId.fromString(EnoceanBindingProviderMock.DEVICE_ID), Parameter.O); binding.valueChanged(valueParameterAddress, ButtonState.PRESSED); binding.valueChanged(valueParameterAddress, ButtonState.RELEASED); assertEquals("Update State", UpDownType.UP, publisher.popLastCommand()); waitFor(100); binding.valueChanged(valueParameterAddress, ButtonState.PRESSED); binding.valueChanged(valueParameterAddress, ButtonState.RELEASED); assertEquals("Update State", StopMoveType.STOP, publisher.popLastCommand()); waitFor(100); binding.valueChanged(valueParameterAddress, ButtonState.PRESSED); binding.valueChanged(valueParameterAddress, ButtonState.RELEASED); assertEquals("Update State", UpDownType.UP, publisher.popLastCommand()); waitFor(100); binding.valueChanged(valueParameterAddress, ButtonState.PRESSED); binding.valueChanged(valueParameterAddress, ButtonState.RELEASED); assertEquals("Update State", StopMoveType.STOP, publisher.popLastCommand()); waitFor(100); binding.valueChanged(valueParameterAddress, ButtonState.PRESSED); binding.valueChanged(valueParameterAddress, ButtonState.RELEASED); assertEquals("Update State", UpDownType.UP, publisher.popLastCommand()); assertEquals("No new state expected", null, publisher.popLastCommand()); } private void waitFor(int i) { try { Thread.sleep(i); } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } } }
epl-1.0