repo_name
stringlengths
5
108
path
stringlengths
6
333
size
stringlengths
1
6
content
stringlengths
4
977k
license
stringclasses
15 values
arshadalisoomro/pebble
src/test/java/net/sourceforge/pebble/dao/file/FileStaticPageDAOTest.java
3267
/* * Copyright (c) 2003-2011, Simon Brown * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * - Neither the name of Pebble nor the names of its contributors may * be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ package net.sourceforge.pebble.dao.file; import net.sourceforge.pebble.dao.StaticPageDAO; import net.sourceforge.pebble.domain.SingleBlogTestCase; import net.sourceforge.pebble.domain.StaticPage; import net.sourceforge.pebble.util.FileUtils; import java.io.File; import java.util.Locale; /** * Tests for the FileStaticPageDAO class. * * @author Simon Brown */ public class FileStaticPageDAOTest extends SingleBlogTestCase { private StaticPageDAO dao= new FileStaticPageDAO(); private Locale defaultLocale; protected void setUp() throws Exception { super.setUp(); defaultLocale = Locale.getDefault(); Locale.setDefault(Locale.ENGLISH); } public void tearDown() throws Exception { super.tearDown(); Locale.setDefault(defaultLocale); } public void testLoadStaticPageFomFile() throws Exception { File source = new File(TEST_RESOURCE_LOCATION, "1152083300843.xml"); File destination = new File(blog.getRoot(), "pages/1152083300843"); destination.mkdirs(); FileUtils.copyFile(source, new File(destination, "1152083300843.xml")); StaticPage page = dao.loadStaticPage(blog, "1152083300843"); // test that the static page properties were loaded okay assertEquals("Static page title", page.getTitle()); assertEquals("Static page subtitle", page.getSubtitle()); assertEquals("<p>Static page body.</p>", page.getBody()); assertEquals("some tags", page.getTags()); assertEquals(1152083300843L, page.getDate().getTime()); assertEquals("http://pebble.sourceforge.net", page.getOriginalPermalink()); } }
bsd-3-clause
Crossy147/java-design-patterns
prototype/src/main/java/com/iluwatar/prototype/HeroFactoryImpl.java
2130
/** * The MIT License * Copyright (c) 2014-2016 Ilkka Seppälä * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package com.iluwatar.prototype; /** * * Concrete factory class. * */ public class HeroFactoryImpl implements HeroFactory { private Mage mage; private Warlord warlord; private Beast beast; /** * Constructor */ public HeroFactoryImpl(Mage mage, Warlord warlord, Beast beast) { this.mage = mage; this.warlord = warlord; this.beast = beast; } /** * Create mage */ public Mage createMage() { try { return mage.clone(); } catch (CloneNotSupportedException e) { return null; } } /** * Create warlord */ public Warlord createWarlord() { try { return warlord.clone(); } catch (CloneNotSupportedException e) { return null; } } /** * Create beast */ public Beast createBeast() { try { return beast.clone(); } catch (CloneNotSupportedException e) { return null; } } }
mit
keceli/RMG-Java
source/RMG/jing/param/Global.java
3742
// ////////////////////////////////////////////////////////////////////////////// // // RMG - Reaction Mechanism Generator // // Copyright (c) 2002-2011 Prof. William H. Green (whgreen@mit.edu) and the // RMG Team (rmg_dev@mit.edu) // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. // // ////////////////////////////////////////////////////////////////////////////// package jing.param; public class Global { public static StringBuilder diagnosticInfo = new StringBuilder(); public static StringBuilder enlargerInfo = new StringBuilder( "Species \t singleReaction \t doubleReaction \t longestTime \t longestTemplate \t H_Abstractiontimes \n"); public static double RT_identifyReactedSites = 0; public static double RT_reactChemGraph = 0; public static double RT_findRateConstant = 0; public static long tAtInitialization; public static double makeSpecies = 0; public static double checkReactionReverse = 0; public static double makeTR = 0; // 10/25/07 gmagoon: commenting out global temp/pressure parameters, which should not be used if code is applied to // systems with multiple temperatures or pressures // public static Temperature temperature = new Temperature(); // public static Pressure pressure = new Pressure(); // 10/29/07 gmagoon: adding new global temperature variables for the high and low temperatures of the input range public static Temperature lowTemperature; public static Temperature highTemperature; public static Pressure lowPressure; public static Pressure highPressure; public static double solvertime = 0; // the time taken by just daspk. public static double writeSolverFile = 0; public static double readSolverFile = 0; public static double solverPrepossesor = 0; public static double transferReaction = 0; public static double speciesStatusGenerator = 0; public static int solverIterations = 0; public static double moveUnreactedToReacted = 0; public static double getReacFromStruc = 0; public static double generateReverse = 0; public static double chemkinThermo = 0; public static double chemkinReaction = 0; public static int maxRadNumForQM; // 5/13/08 gmagoon: added variables temporarily for automatic time-stepping timing; 6/25/08: commented out (along // with timing in JDASSL) // public static double JDASSLtime= 0; // public static double fortrantime= 0; // public static double timesteppingtime= 0; // 18-Jun-2009 MRH // Used these for profiling purposes for the functions // reactTwoReactants and identifyReactiveSites // public static int identifyReactiveSitesCount; // public static double RT_reactTwoReactants; }
mit
Snickermicker/smarthome
bundles/core/org.eclipse.smarthome.core.test/src/test/java/org/eclipse/smarthome/core/library/types/StringListTypeTest.java
3380
/** * Copyright (c) 2014,2019 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 */ package org.eclipse.smarthome.core.library.types; import static org.junit.Assert.*; import org.junit.Test; /** * @author Gaël L'hopital * @author Kai Kreuzer - added tests for valueOf and toFullString */ public class StringListTypeTest { @Test public void testSerializedEquals_simple() { final int DEST_IDX = 0; final int ORIG_IDX = 1; StringListType call1 = new StringListType("0179999998", "0699222222"); StringListType call2 = new StringListType("0699222222,0179999998"); assertEquals(call1.getValue(ORIG_IDX), call2.getValue(DEST_IDX)); assertEquals(call2.toString(), "0699222222,0179999998"); } @Test public void testSerializedEquals_withEscapedEntries() { String serialized = "value1,value2,value=with=foo,value\\,with\\,foo,,\\,\\,foo"; StringListType call4 = new StringListType(serialized); assertEquals("value2", call4.getValue(1)); assertTrue(call4.getValue(4).isEmpty()); assertEquals("value=with=foo", call4.getValue(2)); assertEquals("value,with,foo", call4.getValue(3)); assertEquals(",,foo", call4.getValue(5)); assertEquals(serialized, call4.toString()); } @Test public void testWithEmptyConstituents() { StringListType call1 = new StringListType(",0699222222"); assertEquals("", call1.getValue(0)); assertEquals("0699222222", call1.getValue(1)); StringListType call2 = new StringListType("0699222222,"); assertEquals("0699222222", call2.getValue(0)); assertEquals("", call2.getValue(1)); } @Test public void testError() { StringListType type = new StringListType("foo=bar", "electric", "chair"); try { // Index is between 0 and number of elements -1 @SuppressWarnings("unused") String value = type.getValue(-1); fail("-1 is an invalid index"); } catch (IllegalArgumentException e) { // That's what we expect. } try { @SuppressWarnings("unused") String value = type.getValue(3); fail("3 is an invalid index"); } catch (IllegalArgumentException e) { // That's what we expect. } } @Test public void testToFullString() { StringListType abc = new StringListType("a", "b", "c"); String fullString = abc.toFullString(); assertEquals("a,b,c", fullString); } @Test public void testValueOf_simple() { StringListType abc = StringListType.valueOf("a,b,c"); assertEquals("a", abc.getValue(0)); assertEquals("b", abc.getValue(1)); assertEquals("c", abc.getValue(2)); } @Test public void testValueOf_withEscapedEntries() { StringListType abC = StringListType.valueOf("a\\,b,c"); assertEquals("a,b", abC.getValue(0)); assertEquals("c", abC.getValue(1)); } }
epl-1.0
google-code-export/google-plugin-for-eclipse
plugins/com.google.gdt.eclipse.suite.test/src/com/google/gdt/eclipse/suite/launch/processors/WarArgumentProcessorTest.java
3612
/******************************************************************************* * Copyright 2011 Google Inc. All Rights Reserved. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ package com.google.gdt.eclipse.suite.launch.processors; import com.google.appengine.eclipse.core.projects.GaeEnablingProjectCreationParticipant; import com.google.appengine.eclipse.core.sdk.GaeSdkTestUtilities; import com.google.gdt.eclipse.core.TestUtilities; import com.google.gdt.eclipse.core.WebAppUtilities; import com.google.gdt.eclipse.core.launch.LaunchConfigurationProcessorTestingHelper; import com.google.gdt.eclipse.core.launch.LaunchConfigurationProcessorUtilities; import com.google.gdt.eclipse.core.properties.WebAppProjectProperties; import com.google.gwt.eclipse.core.launch.processors.GwtLaunchConfigurationProcessorUtilities; import com.google.gwt.eclipse.core.projects.GwtEnablingProjectCreationParticipant; import com.google.gwt.eclipse.core.runtime.GwtRuntimeTestUtilities; import junit.framework.TestCase; import org.eclipse.core.runtime.Path; import org.eclipse.jdt.core.JavaCore; import org.eclipse.jdt.launching.IJavaLaunchConfigurationConstants; import java.util.List; /** * Tests the {@link WarArgumentProcessor}. */ public class WarArgumentProcessorTest extends TestCase { private final LaunchConfigurationProcessorTestingHelper helper = new LaunchConfigurationProcessorTestingHelper(); @Override public void setUp() throws Exception { TestUtilities.setUp(); GaeSdkTestUtilities.addDefaultSdk(); GwtRuntimeTestUtilities.addDefaultRuntime(); helper.setUp(WarArgumentProcessorTest.class.getSimpleName(), new GwtEnablingProjectCreationParticipant(), new GaeEnablingProjectCreationParticipant()); } public void testWarArgPresenceForNonWebAppProject() throws Exception { // Get rid of the "web app"-ness WebAppProjectProperties.setWarSrcDir(helper.getProject(), new Path("")); assertFalse(WebAppUtilities.isWebApp(helper.getProject())); // Ensure the WarArgumentProcessor keeps the "-war", since the main // type is still one that uses it List<String> args = LaunchConfigurationProcessorUtilities.parseProgramArgs(helper.getLaunchConfig()); assertTrue(args.indexOf("-war") >= 0); new WarArgumentProcessor().update(helper.getLaunchConfig(), JavaCore.create(helper.getProject()), args, null); assertTrue(args.indexOf("-war") >= 0); // Drop to GWTShell main type, which does not use it, ensure the arg is // removed helper.getLaunchConfig().setAttribute( IJavaLaunchConfigurationConstants.ATTR_MAIN_TYPE_NAME, GwtLaunchConfigurationProcessorUtilities.GWT_SHELL_MAIN_TYPE); assertTrue(args.indexOf("-war") >= 0); new WarArgumentProcessor().update(helper.getLaunchConfig(), JavaCore.create(helper.getProject()), args, null); assertFalse(args.indexOf("-war") >= 0); } @Override protected void tearDown() throws Exception { helper.tearDown(); } }
epl-1.0
rex-xxx/mt6572_x201
frameworks/base/tools/layoutlib/create/src/com/android/tools/layoutlib/create/AsmGenerator.java
14700
/* * Copyright (C) 2008 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.android.tools.layoutlib.create; import org.objectweb.asm.ClassReader; import org.objectweb.asm.ClassVisitor; import org.objectweb.asm.ClassWriter; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.TreeMap; import java.util.jar.JarEntry; import java.util.jar.JarOutputStream; /** * Class that generates a new JAR from a list of classes, some of which are to be kept as-is * and some of which are to be stubbed partially or totally. */ public class AsmGenerator { /** Output logger. */ private final Log mLog; /** The path of the destination JAR to create. */ private final String mOsDestJar; /** List of classes to inject in the final JAR from _this_ archive. */ private final Class<?>[] mInjectClasses; /** The set of methods to stub out. */ private final Set<String> mStubMethods; /** All classes to output as-is, except if they have native methods. */ private Map<String, ClassReader> mKeep; /** All dependencies that must be completely stubbed. */ private Map<String, ClassReader> mDeps; /** Counter of number of classes renamed during transform. */ private int mRenameCount; /** FQCN Names of the classes to rename: map old-FQCN => new-FQCN */ private final HashMap<String, String> mRenameClasses; /** FQCN Names of "old" classes that were NOT renamed. This starts with the full list of * old-FQCN to rename and they get erased as they get renamed. At the end, classes still * left here are not in the code base anymore and thus were not renamed. */ private HashSet<String> mClassesNotRenamed; /** A map { FQCN => set { list of return types to delete from the FQCN } }. */ private HashMap<String, Set<String>> mDeleteReturns; /** A map { FQCN => set { method names } } of methods to rewrite as delegates. * The special name {@link DelegateClassAdapter#ALL_NATIVES} can be used as in internal set. */ private final HashMap<String, Set<String>> mDelegateMethods; /** * Creates a new generator that can generate the output JAR with the stubbed classes. * * @param log Output logger. * @param osDestJar The path of the destination JAR to create. * @param createInfo Creation parameters. Must not be null. */ public AsmGenerator(Log log, String osDestJar, ICreateInfo createInfo) { mLog = log; mOsDestJar = osDestJar; mInjectClasses = createInfo.getInjectedClasses(); mStubMethods = new HashSet<String>(Arrays.asList(createInfo.getOverriddenMethods())); // Create the map/set of methods to change to delegates mDelegateMethods = new HashMap<String, Set<String>>(); for (String signature : createInfo.getDelegateMethods()) { int pos = signature.indexOf('#'); if (pos <= 0 || pos >= signature.length() - 1) { continue; } String className = binaryToInternalClassName(signature.substring(0, pos)); String methodName = signature.substring(pos + 1); Set<String> methods = mDelegateMethods.get(className); if (methods == null) { methods = new HashSet<String>(); mDelegateMethods.put(className, methods); } methods.add(methodName); } for (String className : createInfo.getDelegateClassNatives()) { className = binaryToInternalClassName(className); Set<String> methods = mDelegateMethods.get(className); if (methods == null) { methods = new HashSet<String>(); mDelegateMethods.put(className, methods); } methods.add(DelegateClassAdapter.ALL_NATIVES); } // Create the map of classes to rename. mRenameClasses = new HashMap<String, String>(); mClassesNotRenamed = new HashSet<String>(); String[] renameClasses = createInfo.getRenamedClasses(); int n = renameClasses.length; for (int i = 0; i < n; i += 2) { assert i + 1 < n; // The ASM class names uses "/" separators, whereas regular FQCN use "." String oldFqcn = binaryToInternalClassName(renameClasses[i]); String newFqcn = binaryToInternalClassName(renameClasses[i + 1]); mRenameClasses.put(oldFqcn, newFqcn); mClassesNotRenamed.add(oldFqcn); } // create the map of renamed class -> return type of method to delete. mDeleteReturns = new HashMap<String, Set<String>>(); String[] deleteReturns = createInfo.getDeleteReturns(); Set<String> returnTypes = null; String renamedClass = null; for (String className : deleteReturns) { // if we reach the end of a section, add it to the main map if (className == null) { if (returnTypes != null) { mDeleteReturns.put(renamedClass, returnTypes); } renamedClass = null; continue; } // if the renamed class is null, this is the beginning of a section if (renamedClass == null) { renamedClass = binaryToInternalClassName(className); continue; } // just a standard return type, we add it to the list. if (returnTypes == null) { returnTypes = new HashSet<String>(); } returnTypes.add(binaryToInternalClassName(className)); } } /** * Returns the list of classes that have not been renamed yet. * <p/> * The names are "internal class names" rather than FQCN, i.e. they use "/" instead "." * as package separators. */ public Set<String> getClassesNotRenamed() { return mClassesNotRenamed; } /** * Utility that returns the internal ASM class name from a fully qualified binary class * name. E.g. it returns android/view/View from android.view.View. */ String binaryToInternalClassName(String className) { if (className == null) { return null; } else { return className.replace('.', '/'); } } /** Sets the map of classes to output as-is, except if they have native methods */ public void setKeep(Map<String, ClassReader> keep) { mKeep = keep; } /** Sets the map of dependencies that must be completely stubbed */ public void setDeps(Map<String, ClassReader> deps) { mDeps = deps; } /** Gets the map of classes to output as-is, except if they have native methods */ public Map<String, ClassReader> getKeep() { return mKeep; } /** Gets the map of dependencies that must be completely stubbed */ public Map<String, ClassReader> getDeps() { return mDeps; } /** Generates the final JAR */ public void generate() throws FileNotFoundException, IOException { TreeMap<String, byte[]> all = new TreeMap<String, byte[]>(); for (Class<?> clazz : mInjectClasses) { String name = classToEntryPath(clazz); InputStream is = ClassLoader.getSystemResourceAsStream(name); ClassReader cr = new ClassReader(is); byte[] b = transform(cr, true /* stubNativesOnly */); name = classNameToEntryPath(transformName(cr.getClassName())); all.put(name, b); } for (Entry<String, ClassReader> entry : mDeps.entrySet()) { ClassReader cr = entry.getValue(); byte[] b = transform(cr, true /* stubNativesOnly */); String name = classNameToEntryPath(transformName(cr.getClassName())); all.put(name, b); } for (Entry<String, ClassReader> entry : mKeep.entrySet()) { ClassReader cr = entry.getValue(); byte[] b = transform(cr, true /* stubNativesOnly */); String name = classNameToEntryPath(transformName(cr.getClassName())); all.put(name, b); } mLog.info("# deps classes: %d", mDeps.size()); mLog.info("# keep classes: %d", mKeep.size()); mLog.info("# renamed : %d", mRenameCount); createJar(new FileOutputStream(mOsDestJar), all); mLog.info("Created JAR file %s", mOsDestJar); } /** * Writes the JAR file. * * @param outStream The file output stream were to write the JAR. * @param all The map of all classes to output. * @throws IOException if an I/O error has occurred */ void createJar(FileOutputStream outStream, Map<String,byte[]> all) throws IOException { JarOutputStream jar = new JarOutputStream(outStream); for (Entry<String, byte[]> entry : all.entrySet()) { String name = entry.getKey(); JarEntry jar_entry = new JarEntry(name); jar.putNextEntry(jar_entry); jar.write(entry.getValue()); jar.closeEntry(); } jar.flush(); jar.close(); } /** * Utility method that converts a fully qualified java name into a JAR entry path * e.g. for the input "android.view.View" it returns "android/view/View.class" */ String classNameToEntryPath(String className) { return className.replaceAll("\\.", "/").concat(".class"); } /** * Utility method to get the JAR entry path from a Class name. * e.g. it returns someting like "com/foo/OuterClass$InnerClass1$InnerClass2.class" */ private String classToEntryPath(Class<?> clazz) { String name = ""; Class<?> parent; while ((parent = clazz.getEnclosingClass()) != null) { name = "$" + clazz.getSimpleName() + name; clazz = parent; } return classNameToEntryPath(clazz.getCanonicalName() + name); } /** * Transforms a class. * <p/> * There are 3 kind of transformations: * * 1- For "mock" dependencies classes, we want to remove all code from methods and replace * by a stub. Native methods must be implemented with this stub too. Abstract methods are * left intact. Modified classes must be overridable (non-private, non-final). * Native methods must be made non-final, non-private. * * 2- For "keep" classes, we want to rewrite all native methods as indicated above. * If a class has native methods, it must also be made non-private, non-final. * * Note that unfortunately static methods cannot be changed to non-static (since static and * non-static are invoked differently.) */ byte[] transform(ClassReader cr, boolean stubNativesOnly) { boolean hasNativeMethods = hasNativeMethods(cr); // Get the class name, as an internal name (e.g. com/android/SomeClass$InnerClass) String className = cr.getClassName(); String newName = transformName(className); // transformName returns its input argument if there's no need to rename the class if (newName != className) { mRenameCount++; // This class is being renamed, so remove it from the list of classes not renamed. mClassesNotRenamed.remove(className); } mLog.debug("Transform %s%s%s%s", className, newName == className ? "" : " (renamed to " + newName + ")", hasNativeMethods ? " -- has natives" : "", stubNativesOnly ? " -- stub natives only" : ""); // Rewrite the new class from scratch, without reusing the constant pool from the // original class reader. ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_MAXS); ClassVisitor rv = cw; if (newName != className) { rv = new RenameClassAdapter(cw, className, newName); } ClassVisitor cv = new TransformClassAdapter(mLog, mStubMethods, mDeleteReturns.get(className), newName, rv, stubNativesOnly, stubNativesOnly || hasNativeMethods); Set<String> delegateMethods = mDelegateMethods.get(className); if (delegateMethods != null && !delegateMethods.isEmpty()) { // If delegateMethods only contains one entry ALL_NATIVES and the class is // known to have no native methods, just skip this step. if (hasNativeMethods || !(delegateMethods.size() == 1 && delegateMethods.contains(DelegateClassAdapter.ALL_NATIVES))) { cv = new DelegateClassAdapter(mLog, cv, className, delegateMethods); } } cr.accept(cv, 0 /* flags */); return cw.toByteArray(); } /** * Should this class be renamed, this returns the new name. Otherwise it returns the * original name. * * @param className The internal ASM name of the class that may have to be renamed * @return A new transformed name or the original input argument. */ String transformName(String className) { String newName = mRenameClasses.get(className); if (newName != null) { return newName; } int pos = className.indexOf('$'); if (pos > 0) { // Is this an inner class of a renamed class? String base = className.substring(0, pos); newName = mRenameClasses.get(base); if (newName != null) { return newName + className.substring(pos); } } return className; } /** * Returns true if a class has any native methods. */ boolean hasNativeMethods(ClassReader cr) { ClassHasNativeVisitor cv = new ClassHasNativeVisitor(); cr.accept(cv, 0 /* flags */); return cv.hasNativeMethods(); } }
gpl-2.0
jaechoon2/droidar
droidar/src/actions/ActionDoAlongAxis.java
2098
package actions; import gl.GLCamera; import util.Vec; import android.view.MotionEvent; /** * This uses the virtual camera rotation to map input from the touchscreen or * the trackball etc and do something along the virtual camera axes (eg camera * movement or object movement or anything else). without mapping it to the * current camera rotation, a x+10 movement would always be along the virtual x * axis and not along the current camera x axis. * * @author Spobo * */ public abstract class ActionDoAlongAxis extends Action { protected GLCamera myTargetCamera; private float myTrackballFactor; private final float myTouchscreenReductionFactor; private Vec movementVec = new Vec(); /** * @param camera * @param trackballFactor * should be around 2-15 * @param touchscreenFactor * 25 would be good value to start.The higher the value the * slower the movement */ public ActionDoAlongAxis(GLCamera camera, float trackballFactor, float touchscreenFactor) { myTargetCamera = camera; myTrackballFactor = trackballFactor; myTouchscreenReductionFactor = touchscreenFactor; } @Override public boolean onTrackballEvent(float x, float y, MotionEvent event) { AlignAcordingToViewAxes(x * myTrackballFactor, -y * myTrackballFactor); return true; } @Override public boolean onTouchMove(MotionEvent e1, MotionEvent e2, float screenDeltaX, float screenDeltaY) { AlignAcordingToViewAxes(screenDeltaX / myTouchscreenReductionFactor, -screenDeltaY / myTouchscreenReductionFactor); return true; } /** * This is where the magic happens. The input movement is mapped according * to the virtual camera rotation around the z axis to do the movement * "along the axes" * * @param x * @param y */ private void AlignAcordingToViewAxes(float x, float y) { movementVec.x = x; movementVec.y = y; movementVec.rotateAroundZAxis(360 - (myTargetCamera .getCameraAnglesInDegree()[0])); doAlongViewAxis(movementVec.x, movementVec.y); } public abstract void doAlongViewAxis(float x, float y); }
gpl-3.0
sbbic/core
qadevOOo/tests/java/mod/_sch/ChartLegend.java
4464
/* * This file is part of the LibreOffice project. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * This file incorporates work covered by the following license notice: * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed * with this work for additional information regarding copyright * ownership. The ASF licenses this file to you under the Apache * License, Version 2.0 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of * the License at http://www.apache.org/licenses/LICENSE-2.0 . */ package mod._sch; import java.io.PrintWriter; import lib.TestCase; import lib.TestEnvironment; import lib.TestParameters; import util.SOfficeFactory; import util.utils; import com.sun.star.chart.XChartDocument; import com.sun.star.drawing.XShape; import com.sun.star.lang.XComponent; import com.sun.star.uno.UnoRuntime; /** * Test for object which is represented by service * <code>com.sun.star.chart.ChartLegend</code>. <p> * Object implements the following interfaces : * <ul> * <li> <code>com::sun::star::drawing::FillProperties</code></li> * <li> <code>com::sun::star::drawing::XShape</code></li> * <li> <code>com::sun::star::drawing::Shape</code></li> * <li> <code>com::sun::star::chart::ChartLegend</code></li> * <li> <code>com::sun::star::drawing::LineProperties</code></li> * <li> <code>com::sun::star::beans::XPropertySet</code></li> * <li> <code>com::sun::star::style::CharacterProperties</code></li> * <li> <code>com::sun::star::drawing::XShapeDescriptor</code></li> * <li> <code>com::sun::star::lang::XComponent</code></li> * </ul> * The following files used by this test : * <ul> * <li><b> TransparencyChart.sxs </b> : to load predefined chart * document where two 'automatic' transparency styles exists : * 'Transparency 1' and 'Transparency 2'.</li> * </ul> <p> * @see com.sun.star.drawing.FillProperties * @see com.sun.star.drawing.XShape * @see com.sun.star.drawing.Shape * @see com.sun.star.chart.ChartLegend * @see com.sun.star.drawing.LineProperties * @see com.sun.star.beans.XPropertySet * @see com.sun.star.style.CharacterProperties * @see com.sun.star.drawing.XShapeDescriptor * @see com.sun.star.lang.XComponent * @see ifc.drawing._FillProperties * @see ifc.drawing._XShape * @see ifc.drawing._Shape * @see ifc.chart._ChartLegend * @see ifc.drawing._LineProperties * @see ifc.beans._XPropertySet * @see ifc.style._CharacterProperties * @see ifc.drawing._XShapeDescriptor * @see ifc.lang._XComponent */ public class ChartLegend extends TestCase { XChartDocument xChartDoc = null; /** * Creates Chart document. */ @Override protected void initialize( TestParameters tParam, PrintWriter log ) throws Exception { // get a soffice factory object SOfficeFactory SOF = SOfficeFactory.getFactory( tParam.getMSF()); log.println( "creating a chartdocument" ); XComponent xComp = SOF.loadDocument( utils.getFullTestURL("TransparencyChart.sxs")); xChartDoc = UnoRuntime.queryInterface(XChartDocument.class,xComp); } /** * Disposes Chart document. */ @Override protected void cleanup( TestParameters tParam, PrintWriter log ) { if( xChartDoc!=null ) { log.println( " closing xChartDoc" ); util.DesktopTools.closeDoc(xChartDoc); xChartDoc = null; } } /** * Creating a TestEnvironment for the interfaces to be tested. * Retrieves the diagram of the chart document. The retrieved * diagram is the instance of the service * <code>com.sun.star.chart.ChartLegend</code>. */ @Override protected TestEnvironment createTestEnvironment(TestParameters Param, PrintWriter log) { XShape oObj = null; // get the Legend log.println( "getting Legend" ); oObj = xChartDoc.getLegend(); log.println( "creating a new environment for chartdocument object" ); TestEnvironment tEnv = new TestEnvironment( oObj ); tEnv.addObjRelation("NoSetSize", "sch.ChartLegend"); return tEnv; } // finish method getTestEnvironment } // finish class ChartLegend
gpl-3.0
lei-cao/zoj
judge_server/src/main/cn/edu/zju/acm/onlinejudge/persistence/sql/SubmissionPersistenceImpl.java
53349
/* * Copyright 2007 Zhang, Zheng <oldbig@gmail.com> Chen, Zhengguang <cerrorism@gmail.com> Xu, Chuan <xuchuan@gmail.com> * * This file is part of ZOJ. * * ZOJ is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either revision 3 of the License, or (at your option) any later revision. * * ZOJ is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along with ZOJ. if not, see * <http://www.gnu.org/licenses/>. */ package cn.edu.zju.acm.onlinejudge.persistence.sql; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Timestamp; import java.text.MessageFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeSet; import cn.edu.zju.acm.onlinejudge.bean.Problem; import cn.edu.zju.acm.onlinejudge.bean.QQ; import cn.edu.zju.acm.onlinejudge.bean.Submission; import cn.edu.zju.acm.onlinejudge.bean.UserProfile; import cn.edu.zju.acm.onlinejudge.bean.enumeration.JudgeReply; import cn.edu.zju.acm.onlinejudge.bean.enumeration.Language; import cn.edu.zju.acm.onlinejudge.bean.request.SubmissionCriteria; import cn.edu.zju.acm.onlinejudge.persistence.PersistenceException; import cn.edu.zju.acm.onlinejudge.persistence.SubmissionPersistence; import cn.edu.zju.acm.onlinejudge.util.ContestStatistics; import cn.edu.zju.acm.onlinejudge.util.PersistenceManager; import cn.edu.zju.acm.onlinejudge.util.ProblemStatistics; import cn.edu.zju.acm.onlinejudge.util.ProblemsetRankList; import cn.edu.zju.acm.onlinejudge.util.RankListEntry; import cn.edu.zju.acm.onlinejudge.util.UserStatistics; /** * <p> * SubmissionPersistenceImpl implements SubmissionPersistence interface. * </p> * <p> * SubmissionPersistence interface defines the API used to manager the submission related affairs in persistence layer. * </p> * * @version 2.0 * @author Zhang, Zheng * @author Xu, Chuan * @author Chen, Zhengguang */ public class SubmissionPersistenceImpl implements SubmissionPersistence { /** * The statement to create a Submission. */ private static final String INSERT_SUBMISSION = MessageFormat.format("INSERT INTO {0} ({1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}, {10}, {11}," + " {12}, {13}, {14}, {15}, {16}, {17}) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 1)", new Object[] {DatabaseConstants.SUBMISSION_TABLE, DatabaseConstants.SUBMISSION_PROBLEM_ID, DatabaseConstants.SUBMISSION_LANGUAGE_ID, DatabaseConstants.SUBMISSION_JUDGE_REPLY_ID, DatabaseConstants.SUBMISSION_USER_PROFILE_ID, DatabaseConstants.SUBMISSION_CONTENT, DatabaseConstants.SUBMISSION_TIME_CONSUMPTION, DatabaseConstants.SUBMISSION_MEMORY_CONSUMPTION, DatabaseConstants.SUBMISSION_SUBMISSION_DATE, DatabaseConstants.SUBMISSION_JUDGE_DATE, DatabaseConstants.SUBMISSION_JUDGE_COMMENT, DatabaseConstants.CREATE_USER, DatabaseConstants.CREATE_DATE, DatabaseConstants.LAST_UPDATE_USER, DatabaseConstants.LAST_UPDATE_DATE, "contest_id", "contest_order", DatabaseConstants.SUBMISSION_ACTIVE}); /** * The statement to update a Submission. */ private static final String UPDATE_SUBMISSION = MessageFormat.format("UPDATE {0} SET {1}=?, {2}=?, {3}=?, {4}=?, {5}=?, {6}=?, {7}=?, {8}=?, " + "{9}=?, {10}=?, {11}=?, {12}=? WHERE {13}=?", new Object[] {DatabaseConstants.SUBMISSION_TABLE, DatabaseConstants.SUBMISSION_PROBLEM_ID, DatabaseConstants.SUBMISSION_LANGUAGE_ID, DatabaseConstants.SUBMISSION_JUDGE_REPLY_ID, DatabaseConstants.SUBMISSION_USER_PROFILE_ID, DatabaseConstants.SUBMISSION_TIME_CONSUMPTION, DatabaseConstants.SUBMISSION_MEMORY_CONSUMPTION, DatabaseConstants.SUBMISSION_SUBMISSION_DATE, DatabaseConstants.SUBMISSION_JUDGE_DATE, DatabaseConstants.SUBMISSION_JUDGE_COMMENT, DatabaseConstants.LAST_UPDATE_USER, DatabaseConstants.LAST_UPDATE_DATE, DatabaseConstants.SUBMISSION_CONTENT, DatabaseConstants.SUBMISSION_SUBMISSION_ID}); /** * The statement to update a Submission. */ private static final String UPDATE_SUBMISSION_WITHOUT_CONTENT = MessageFormat.format("UPDATE {0} SET {1}=?, {2}=?, {3}=?, {4}=?, {5}=?, {6}=?, {7}=?, {8}=?, " + "{9}=?, {10}=?, {11}=? WHERE {12}=?", new Object[] {DatabaseConstants.SUBMISSION_TABLE, DatabaseConstants.SUBMISSION_PROBLEM_ID, DatabaseConstants.SUBMISSION_LANGUAGE_ID, DatabaseConstants.SUBMISSION_JUDGE_REPLY_ID, DatabaseConstants.SUBMISSION_USER_PROFILE_ID, DatabaseConstants.SUBMISSION_TIME_CONSUMPTION, DatabaseConstants.SUBMISSION_MEMORY_CONSUMPTION, DatabaseConstants.SUBMISSION_SUBMISSION_DATE, DatabaseConstants.SUBMISSION_JUDGE_DATE, DatabaseConstants.SUBMISSION_JUDGE_COMMENT, DatabaseConstants.LAST_UPDATE_USER, DatabaseConstants.LAST_UPDATE_DATE, DatabaseConstants.SUBMISSION_SUBMISSION_ID}); /** * The statement to delete a submission. */ private static final String INACTIVE_SUBMISSION = MessageFormat.format("UPDATE {0} SET {1}=0, {2}=?, {3}=? WHERE {4}=?", new Object[] {DatabaseConstants.SUBMISSION_TABLE, DatabaseConstants.SUBMISSION_ACTIVE, DatabaseConstants.LAST_UPDATE_USER, DatabaseConstants.LAST_UPDATE_DATE, DatabaseConstants.SUBMISSION_SUBMISSION_ID}); private static final String GET_SUBMISSION_PREFIX = "SELECT s.submission_id,s.problem_id,s.language_id,s.judge_reply_id,s.user_profile_id,s.time_consumption," + "s.memory_consumption,s.submission_date,s.judge_date,s.judge_comment,s.contest_id,s.contest_order,u.handle,u.nickname,p.code"; private static final String GET_SUBMISSION_WITH_CONTENT_PREFIX = SubmissionPersistenceImpl.GET_SUBMISSION_PREFIX + ",s.content"; private static final String GET_SUBMISSION_FROM_PART = " FROM submission s FORCE_INDEX " + "LEFT JOIN user_profile u ON s.user_profile_id = u.user_profile_id " + "LEFT JOIN problem p ON s.problem_id = p.problem_id " + "WHERE s.active=1 AND u.active=1 AND p.active=1 "; private static final String GET_SUBMISSION = SubmissionPersistenceImpl.GET_SUBMISSION_WITH_CONTENT_PREFIX + SubmissionPersistenceImpl.GET_SUBMISSION_FROM_PART + " AND s.submission_id=?"; private static final String GET_SUBMISSIONS = SubmissionPersistenceImpl.GET_SUBMISSION_PREFIX + SubmissionPersistenceImpl.GET_SUBMISSION_FROM_PART; /** * The query to get submissions. */ private static final String GET_SUBMISSIONS_WITH_CONTENT = SubmissionPersistenceImpl.GET_SUBMISSION_WITH_CONTENT_PREFIX + SubmissionPersistenceImpl.GET_SUBMISSION_FROM_PART; /** * <p> * Creates the specified submission in persistence layer. * </p> * * @param submission * the Submission instance to create * @param user * the id of the user who made this modification * @throws PersistenceException * wrapping a persistence implementation specific exception */ public void createSubmission(Submission submission, long user) throws PersistenceException { Connection conn = null; try { conn = Database.createConnection(); conn.setAutoCommit(false); PreparedStatement ps = null; String maxOrder = null; try { ps = conn.prepareStatement("select max(contest_order) from submission where contest_id=" + submission.getContestId()); ResultSet rs = ps.executeQuery(); if (rs.next()) { maxOrder = rs.getString(1); } } finally { Database.dispose(ps); } long count = maxOrder == null ? 0 : Long.parseLong(maxOrder) + 1; submission.setContestOrder(count); try { // create the submission ps = conn.prepareStatement(SubmissionPersistenceImpl.INSERT_SUBMISSION); ps.setLong(1, submission.getProblemId()); ps.setLong(2, submission.getLanguage().getId()); ps.setLong(3, submission.getJudgeReply().getId()); ps.setLong(4, submission.getUserProfileId()); ps.setString(5, submission.getContent()); ps.setString(10, submission.getJudgeComment()); ps.setInt(6, submission.getTimeConsumption()); ps.setInt(7, submission.getMemoryConsumption()); ps.setTimestamp(8, Database.toTimestamp(submission.getSubmitDate())); ps.setTimestamp(9, Database.toTimestamp(submission.getJudgeDate())); ps.setLong(11, user); ps.setTimestamp(12, new Timestamp(new Date().getTime())); ps.setLong(13, user); ps.setTimestamp(14, new Timestamp(new Date().getTime())); ps.setLong(15, submission.getContestId()); ps.setLong(16, submission.getContestOrder()); ps.executeUpdate(); } finally { Database.dispose(ps); } submission.setId(Database.getLastId(conn)); conn.commit(); } catch (Exception e) { Database.rollback(conn); throw new PersistenceException("Failed to insert submission.", e); } finally { Database.dispose(conn); } } /** * <p> * Updates the specified submission in persistence layer. * </p> * * @param submission * the Submission instance to update * @param user * the id of the user who made this modification * @throws PersistenceException * wrapping a persistence implementation specific exception */ public void updateSubmission(Submission submission, long user) throws PersistenceException { Connection conn = null; try { conn = Database.createConnection(); conn.setAutoCommit(false); // update the submission PreparedStatement ps = null; try { ps = conn .prepareStatement(submission.getContent() == null ? SubmissionPersistenceImpl.UPDATE_SUBMISSION_WITHOUT_CONTENT : SubmissionPersistenceImpl.UPDATE_SUBMISSION); ps.setLong(1, submission.getProblemId()); ps.setLong(2, submission.getLanguage().getId()); ps.setLong(3, submission.getJudgeReply().getId()); ps.setLong(4, submission.getUserProfileId()); ps.setInt(5, submission.getTimeConsumption()); ps.setInt(6, submission.getMemoryConsumption()); ps.setTimestamp(7, Database.toTimestamp(submission.getSubmitDate())); ps.setTimestamp(8, Database.toTimestamp(submission.getJudgeDate())); ps.setString(9, submission.getJudgeComment()); ps.setLong(10, user); ps.setTimestamp(11, new Timestamp(new Date().getTime())); if (submission.getContent() == null) { ps.setLong(12, submission.getId()); } else { ps.setString(12, submission.getContent()); ps.setLong(13, submission.getId()); } ps.executeUpdate(); } finally { Database.dispose(ps); } // TODO(ob): update the user statistics if no tiger? conn.commit(); } catch (Exception e) { Database.rollback(conn); throw new PersistenceException("Failed to update submission.", e); } finally { Database.dispose(conn); } } /** * <p> * Deletes the specified submission in persistence layer. * </p> * * @param id * the id of the submission to delete * @param user * the id of the user who made this modification * @throws PersistenceException * wrapping a persistence implementation specific exception */ public void deleteSubmission(long id, long user) throws PersistenceException { Connection conn = null; try { conn = Database.createConnection(); PreparedStatement ps = null; try { ps = conn.prepareStatement(SubmissionPersistenceImpl.INACTIVE_SUBMISSION); ps.setLong(1, user); ps.setTimestamp(2, new Timestamp(new Date().getTime())); ps.setLong(3, id); if (ps.executeUpdate() == 0) { throw new PersistenceException("no such submission"); } } finally { Database.dispose(ps); } } catch (Exception e) { throw new PersistenceException("Failed to delete submission.", e); } finally { Database.dispose(conn); } } /** * <p> * Gets the submission with given id in persistence layer. * </p> * * @param id * the id of the submission * @return the submission with given id in persistence layer * @throws PersistenceException * wrapping a persistence implementation specific exception */ public Submission getSubmission(long id) throws PersistenceException { Connection conn = null; try { conn = Database.createConnection(); PreparedStatement ps = null; try { ps = conn.prepareStatement(SubmissionPersistenceImpl.GET_SUBMISSION.replace("FORCE_INDEX", "")); ps.setLong(1, id); ResultSet rs = ps.executeQuery(); if (!rs.next()) { return null; } Map<Long, Language> languageMap = PersistenceManager.getInstance().getLanguagePersistence().getLanguageMap(); Submission submission = this.populateSubmission(rs, true, languageMap); return submission; } finally { Database.dispose(ps); } } catch (SQLException e) { throw new PersistenceException("Failed to get the submission with id " + id, e); } finally { Database.dispose(conn); } } public String getSubmissionSource(long id) throws PersistenceException { Connection conn = null; try { conn = Database.createConnection(); PreparedStatement ps = null; try { ps = conn.prepareStatement("SELECT content FROM submission WHERE submission_id=?"); ps.setLong(1, id); ResultSet rs = ps.executeQuery(); if (!rs.next()) { throw new PersistenceException("Submission id " + id + " not found"); } String content = rs.getString("content"); if (content == null) { return ""; } else { return content; } } finally { Database.dispose(ps); } } catch (SQLException e) { throw new PersistenceException("Failed to get the submission with id " + id, e); } finally { Database.dispose(conn); } } /** * Populates an ExtendedSubmission with given ResultSet. * * @param rs * @return an ExtendedSubmission instance * @throws SQLException */ private Submission populateSubmission(ResultSet rs, boolean withContent, Map<Long, Language> languageMap) throws SQLException { Submission submission = new Submission(); submission.setId(rs.getLong(DatabaseConstants.SUBMISSION_SUBMISSION_ID)); submission.setProblemId(rs.getLong(DatabaseConstants.SUBMISSION_PROBLEM_ID)); submission.setUserProfileId(rs.getLong(DatabaseConstants.SUBMISSION_USER_PROFILE_ID)); submission.setJudgeComment(rs.getString(DatabaseConstants.SUBMISSION_JUDGE_COMMENT)); submission.setJudgeDate(Database.getDate(rs, DatabaseConstants.SUBMISSION_JUDGE_DATE)); submission.setSubmitDate(Database.getDate(rs, DatabaseConstants.SUBMISSION_SUBMISSION_DATE)); submission.setMemoryConsumption(rs.getInt(DatabaseConstants.SUBMISSION_MEMORY_CONSUMPTION)); submission.setTimeConsumption(rs.getInt(DatabaseConstants.SUBMISSION_TIME_CONSUMPTION)); submission.setUserName(rs.getString(DatabaseConstants.USER_PROFILE_NICKNAME)); if(submission.getUserName().equals("")) { submission.setUserName(rs.getString(DatabaseConstants.USER_PROFILE_HANDLE)); } submission.setProblemCode(rs.getString(DatabaseConstants.PROBLEM_CODE)); submission.setContestId(rs.getLong("contest_id")); submission.setContestOrder(rs.getLong("contest_order")); if (withContent) { submission.setContent(rs.getString("content")); } // set language long languageId = rs.getLong(DatabaseConstants.SUBMISSION_LANGUAGE_ID); Language language = languageMap.get(languageId); submission.setLanguage(language); // set judge reply long judgeReplyId = rs.getLong(DatabaseConstants.SUBMISSION_JUDGE_REPLY_ID); JudgeReply judgeReply = JudgeReply.findById(judgeReplyId); submission.setJudgeReply(judgeReply); return submission; } /** * <p> * Searches all submissions according with the given criteria in persistence layer. * </p> * * @return a list of submissions according with the given criteria * @param criteria * the submission search criteria * @param lastId * the last id * @param count * the maximum number of submissions in returned list * @throws PersistenceException * wrapping a persistence implementation specific exception */ public List<Submission> searchSubmissions(SubmissionCriteria criteria, long firstId, long lastId, int count) throws PersistenceException { return this.searchSubmissions(criteria, firstId, lastId, count, false); } /** * <p> * Searches all submissions according with the given criteria in persistence layer. * </p> * * @return a list of submissions according with the given criteria * @param criteria * the submission search criteria * @param lastId * the last id * @param count * the maximum number of submissions in returned list * @throws PersistenceException * wrapping a persistence implementation specific exception */ public List<Submission> searchSubmissions(SubmissionCriteria criteria, long firstId, long lastId, int count, boolean withContent) throws PersistenceException { if (lastId < 0) { throw new IllegalArgumentException("offset is negative"); } if (count < 0) { throw new IllegalArgumentException("count is negative"); } Connection conn = null; Map<Long, Language> languageMap = PersistenceManager.getInstance().getLanguagePersistence().getLanguageMap(); try { conn = Database.createConnection(); PreparedStatement ps = null; if (criteria.getUserId() == null && criteria.getHandle() != null) { try { ps = conn.prepareStatement("select user_profile_id from user_profile where handle=? AND active=1"); ps.setString(1, criteria.getHandle()); ResultSet rs = ps.executeQuery(); if (!rs.next()) { return new ArrayList<Submission>(); } long userId = rs.getLong(1); criteria.setUserId(userId); } finally { Database.dispose(ps); } } if (criteria.getProblemId() == null && criteria.getProblemCode() != null) { try { ps = conn .prepareStatement("select problem_id from problem where code=? AND contest_id=? AND active=1"); ps.setString(1, criteria.getProblemCode()); ps.setLong(2, criteria.getContestId()); ResultSet rs = ps.executeQuery(); if (!rs.next()) { return new ArrayList<Submission>(); } long problemId = rs.getLong(1); criteria.setProblemId(problemId); } finally { Database.dispose(ps); } } try { ps = this.buildQuery(withContent ? SubmissionPersistenceImpl.GET_SUBMISSIONS_WITH_CONTENT : SubmissionPersistenceImpl.GET_SUBMISSIONS, criteria, firstId, lastId, count, conn); if (ps == null) { return new ArrayList<Submission>(); } ResultSet rs = ps.executeQuery(); List<Submission> submissions = new ArrayList<Submission>(); while (rs.next()) { Submission submission = this.populateSubmission(rs, withContent, languageMap); submissions.add(submission); } return submissions; } finally { Database.dispose(ps); } } catch (SQLException e) { throw new PersistenceException("Failed to get the submissions", e); } finally { Database.dispose(conn); } } /** * Build search query. * * @param criteria * @param lastId * @param count * @param conn * @param ps * @param rs * @return search query. * @throws SQLException */ private PreparedStatement buildQuery(String perfix, SubmissionCriteria criteria, long firstId, long lastId, int count, Connection conn) throws SQLException { // String userIndex = "index_submission_user"; // String problemIndex = "index_submission_problem"; String userIndex = "index_submission_user_reply_contest"; String problemIndex = "index_submission_problem_reply"; String judgeReplyIndex = "fk_submission_reply"; String defaultIndex = "index_submission_contest_order"; Set<String> easyProblems = new HashSet<String>(Arrays.asList(new String[] {"2060", "1180", "1067", "1292", "1295", "1951", "1025", "2095", "2105", "1008", "1005", "1152", "1240", "2107", "1037", "1205", "1113", "1045", "1489", "1241", "1101", "1049", "1057", "1003", "1151", "1048", "1002", "1115", "1001"})); Set<JudgeReply> easyJudgeReply = new HashSet<JudgeReply>(Arrays.asList(new JudgeReply[] {JudgeReply.ACCEPTED, JudgeReply.WRONG_ANSWER, JudgeReply.TIME_LIMIT_EXCEEDED, JudgeReply.MEMORY_LIMIT_EXCEEDED, JudgeReply.SEGMENTATION_FAULT, JudgeReply.COMPILATION_ERROR, JudgeReply.PRESENTATION_ERROR})); /* * INDEX optimization If user id presents, use fk_submission_user If problem id presents and submission number < * 5000, use fk_submission_problem; If judge_reply_id presents and none of id is 4,5,6,7,12,13 or 16, use * fk_submission_reply when otherwise use index_submission_contest_order; */ String order = firstId == -1 ? "DESC" : "ASC"; if (criteria.getIdStart() != null && firstId < criteria.getIdStart() - 1) { firstId = criteria.getIdStart() - 1; } if (criteria.getIdEnd() != null && lastId > criteria.getIdEnd() + 1) { lastId = criteria.getIdEnd() + 1; } StringBuilder query = new StringBuilder(); query.append(perfix); query.append(" AND s.contest_id=" + criteria.getContestId()); query.append(" AND contest_order BETWEEN " + (firstId + 1) + " and " + (lastId - 1)); String index = null; if (criteria.getUserId() != null) { query.append(" AND s.user_profile_id=" + criteria.getUserId()); index = userIndex; } if (criteria.getProblemId() != null) { query.append(" AND s.problem_id=" + criteria.getProblemId()); if (index == null && !easyProblems.contains(criteria.getProblemCode())) { index = problemIndex; } } String inCondition = null; if (criteria.getJudgeReplies() != null) { if (criteria.getJudgeReplies().size() == 0) { return null; } List<Long> judgeRepliesIds = new ArrayList<Long>(); boolean easy = false; for (JudgeReply judgeReply : criteria.getJudgeReplies()) { judgeRepliesIds.add(judgeReply.getId()); if (easyJudgeReply.contains(judgeReply)) { easy = true; } } inCondition = " AND s.judge_reply_id IN " + Database.createNumberValues(judgeRepliesIds); query.append(inCondition); if (index == null && !easy) { if (criteria.getProblemId() != null) { index = problemIndex; } else { index = judgeReplyIndex; } } } PreparedStatement ps = null; if (index == null && criteria.getJudgeReplies() != null && criteria.getProblemId() != null) { try { ps = conn.prepareStatement("SELECT count(*) from submission s where problem_id=" + criteria.getProblemId() + inCondition); ResultSet rs = ps.executeQuery(); rs.next(); long cnt = rs.getLong(1); if (cnt < 10000) { index = problemIndex; } } finally { Database.dispose(ps); } } if (criteria.getLanguages() != null) { if (criteria.getLanguages().size() == 0) { return null; } List<Long> languageIds = new ArrayList<Long>(); for (Language language : criteria.getLanguages()) { languageIds.add(language.getId()); } query.append(" AND s.language_id IN " + Database.createNumberValues(languageIds)); } query.append(" ORDER BY contest_order " + order); query.append(" LIMIT " + count); if (index == null) { index = defaultIndex; } String queryString = query.toString().replace("FORCE_INDEX", "USE INDEX (" + index + ")"); return conn.prepareStatement(queryString); } public ContestStatistics getContestStatistics(List<Problem> problems) throws PersistenceException { Connection conn = null; ContestStatistics statistics = new ContestStatistics(problems); if (problems.size() == 0) { return statistics; } try { conn = Database.createConnection(); List<Long> problemIds = new ArrayList<Long>(); for (Problem problem : problems) { problemIds.add(new Long(((Problem) problem).getId())); } String inProblemIds = Database.createNumberValues(problemIds); String query = "SELECT problem_id, judge_reply_id, count(*) FROM submission " + "WHERE problem_id IN " + inProblemIds + " GROUP BY problem_id, judge_reply_id"; /* * String query = "SELECT problem_id, judge_reply_id, count FROM problem_statistics " + * "WHERE problem_id IN " + inProblemIds; */ PreparedStatement ps = null; try { ps = conn.prepareStatement(query); ResultSet rs = ps.executeQuery(); while (rs.next()) { long problemId = rs.getLong(1); long judgeReplyId = rs.getLong(2); int value = rs.getInt(3); statistics.setCount(problemId, judgeReplyId, value); } return statistics; } finally { Database.dispose(ps); } } catch (SQLException e) { throw new PersistenceException("Failed to get the statistics", e); } finally { Database.dispose(conn); } } public List<RankListEntry> getRankList(List<Problem> problems, long contestStartDate) throws PersistenceException { return this.getRankList(problems, contestStartDate, -1); } public List<RankListEntry> getRankList(List<Problem> problems, long contestStartDate, long roleId) throws PersistenceException { Connection conn = null; if (problems.size() == 0) { return new ArrayList<RankListEntry>(); } try { conn = Database.createConnection(); PreparedStatement ps = null; List<Long> problemIds = new ArrayList<Long>(); Map<Long, Integer> problemIndexes = new HashMap<Long, Integer>(); int index = 0; for (Problem problem2 : problems) { Problem problem = (Problem) problem2; problemIds.add(new Long(problem.getId())); problemIndexes.put(new Long(problem.getId()), new Integer(index)); index++; } String userIdsCon = ""; if (roleId >= 0) { // TODO performance issue!! List<Long> ids = new ArrayList<Long>(); try { ps = conn.prepareStatement("SELECT user_profile_id FROM user_role WHERE role_id=?"); ps.setLong(1, roleId); ResultSet rs = ps.executeQuery(); while (rs.next()) { ids.add(rs.getLong(1)); } if (ids.size() == 0) { return new ArrayList<RankListEntry>(); } } finally { Database.dispose(ps); } userIdsCon = " AND user_profile_id IN " + Database.createNumberValues(ids); } String inProblemIds = Database.createNumberValues(problemIds); Map<Long, RankListEntry> entries = new HashMap<Long, RankListEntry>(); try { ps = conn .prepareStatement("SELECT user_profile_id, problem_id, judge_reply_id, submission_date FROM submission " + "WHERE problem_id IN " + inProblemIds + userIdsCon + " ORDER BY submission_date"); ResultSet rs = ps.executeQuery(); while (rs.next()) { long userId = rs.getLong(1); RankListEntry entry = (RankListEntry) entries.get(new Long(userId)); if (entry == null) { entry = new RankListEntry(problems.size()); entries.put(new Long(userId), entry); UserProfile profile = new UserProfile(); profile.setId(userId); entry.setUserProfile(profile); } long problemId = rs.getLong(2); long judgeReplyId = rs.getLong(3); int time = (int) ((rs.getTimestamp(4).getTime() - contestStartDate) / 1000 / 60); entry.update(((Integer) problemIndexes.get(new Long(problemId))).intValue(), time, judgeReplyId == JudgeReply.ACCEPTED.getId()); } } finally { Database.dispose(ps); } List<RankListEntry> entryList = new ArrayList<RankListEntry>(entries.values()); Collections.sort(entryList); return entryList; } catch (SQLException e) { throw new PersistenceException("Failed to get the rank list", e); } finally { Database.dispose(conn); } } public RankListEntry getRankListEntry(long contestId, long userId) throws PersistenceException { Connection conn = null; try { conn = Database.createConnection(); PreparedStatement ps = null; try { ps = conn.prepareStatement("SELECT ac_number, submission_number FROM user_stat " + "WHERE contest_id=? AND user_id=?"); ps.setLong(1, contestId); ps.setLong(2, userId); ResultSet rs = ps.executeQuery(); RankListEntry re = null; if (rs.next()) { re = new RankListEntry(1); re.setSolved(rs.getLong(1)); re.setSubmitted(rs.getLong(2)); } return re; } finally { Database.dispose(ps); } } catch (SQLException e) { throw new PersistenceException("Failed to get the rank list", e); } finally { Database.dispose(conn); } } public ProblemsetRankList getProblemsetRankList(long contestId, int offset, int count, String sort) throws PersistenceException { Connection conn = null; try { conn = Database.createConnection(); PreparedStatement ps = null; String sql=null; if(sort.equalsIgnoreCase("submit")){ sql = "SELECT u.user_profile_id, u.handle, u.nickname, up.plan, ua.solved, ua.tiebreak " + "FROM user_ac ua " + "LEFT JOIN user_profile u ON ua.user_profile_id = u.user_profile_id " + "LEFT JOIN user_preference up ON ua.user_profile_id = up.user_profile_id " + "WHERE contest_id=? ORDER BY ua.tiebreak DESC, ua.solved DESC " + "LIMIT " + offset + "," + count; } else { sql = "SELECT u.user_profile_id, u.handle, u.nickname, up.plan, ua.solved, ua.tiebreak " + "FROM user_ac ua " + "LEFT JOIN user_profile u ON ua.user_profile_id = u.user_profile_id " + "LEFT JOIN user_preference up ON ua.user_profile_id = up.user_profile_id " + "WHERE contest_id=? ORDER BY ua.solved DESC, ua.tiebreak ASC " + "LIMIT " + offset + "," + count; } List<UserProfile> users = new ArrayList<UserProfile>(); List<Integer> solved = new ArrayList<Integer>(); List<Integer> total = new ArrayList<Integer>(); try { ps = conn.prepareStatement(sql); ps.setLong(1, contestId); ResultSet rs = ps.executeQuery(); while (rs.next()) { UserProfile user = new UserProfile(); user.setId(rs.getLong(1)); user.setHandle(rs.getString(2)); user.setNickName(rs.getString(3)); user.setDeclaration(rs.getString(4)); users.add(user); solved.add(rs.getInt(5)); total.add(rs.getInt(6)); } } finally { Database.dispose(ps); } int[] solvedArray = new int[solved.size()]; int[] totalArray = new int[solved.size()]; for (int i = 0; i < solvedArray.length; ++i) { solvedArray[i] = solved.get(i); totalArray[i] = total.get(i); } ProblemsetRankList r = new ProblemsetRankList(offset, count); r.setUsers(users.toArray(new UserProfile[0])); r.setSolved(solvedArray); r.setTotal(totalArray); return r; } catch (SQLException e) { throw new PersistenceException("Failed to get the rank list", e); } finally { Database.dispose(conn); } } public UserStatistics getUserStatistics(long contestId, long userId) throws PersistenceException { Connection conn = null; try { UserStatistics statistics = new UserStatistics(userId, contestId); conn = Database.createConnection(); PreparedStatement ps = null; /*String sql = "SELECT DISTINCT p.problem_id, p.code, p.title, s.judge_comment " + SubmissionPersistenceImpl.GET_SUBMISSION_FROM_PART + " AND s.user_profile_id=? AND s.judge_reply_id=? AND s.contest_id=?";*/ String sql = "SELECT p.problem_id, p.code, p.title, s.judge_comment " + SubmissionPersistenceImpl.GET_SUBMISSION_FROM_PART + " AND s.user_profile_id=? AND s.judge_reply_id=? AND s.contest_id=?"; sql = sql.replace("FORCE_INDEX", "USE INDEX (index_submission_user_reply_contest)"); HashSet<Long> solvedid=new HashSet<Long>(); HashSet<Long> confirmedid=new HashSet<Long>(); List<Problem> solved = new ArrayList<Problem>(); List<Problem> confirmed = new ArrayList<Problem>(); try { ps = conn.prepareStatement(sql); ps.setLong(1, userId); ps.setLong(2, JudgeReply.ACCEPTED.getId()); ps.setLong(3, contestId); ResultSet rs = ps.executeQuery(); while (rs.next()) { Long probemid=new Long(rs.getLong("problem_id")); if(!solvedid.contains(probemid)) { Problem p = new Problem(); p.setContestId(contestId); p.setId(rs.getLong("problem_id")); p.setCode(rs.getString("code")); p.setTitle(rs.getString("title")); solved.add(p); solvedid.add(probemid); } String comment=rs.getString("judge_comment"); if(!confirmed.contains(probemid) && "Yes".equals(comment)) { Problem p = new Problem(); p.setContestId(contestId); p.setId(rs.getLong("problem_id")); p.setCode(rs.getString("code")); p.setTitle(rs.getString("title")); confirmed.add(p); confirmedid.add(probemid); } } } finally { Database.dispose(ps); } statistics.setSolved(new TreeSet<Problem>(solved)); statistics.setConfirmed(new TreeSet<Problem>(confirmed)); try { ps = conn .prepareStatement("SELECT judge_reply_id, count(*) FROM submission WHERE contest_id=? AND user_profile_id=? GROUP BY judge_reply_id"); ps.setLong(1, contestId); ps.setLong(2, userId); ResultSet rs = ps.executeQuery(); while (rs.next()) { long jid = rs.getLong(1); int count = rs.getInt(2); statistics.setCount(jid, count); } return statistics; } finally { Database.dispose(ps); } } catch (SQLException e) { throw new PersistenceException("Failed to get the user statistics", e); } finally { Database.dispose(conn); } } public void changeQQStatus(long pid, long uid, String status) throws PersistenceException { Connection conn = null; try { conn = Database.createConnection(); PreparedStatement ps = null; try { ps = conn .prepareStatement("UPDATE submission_status SET status=? WHERE problem_id=? AND user_profile_id=?"); ps.setString(1, status); ps.setLong(2, pid); ps.setLong(3, uid); int changes = ps.executeUpdate(); if (changes == 0) { ps = conn .prepareStatement("INSERT INTO submission_status (problem_id, user_profile_id, status) VALUES (?,?,?)"); ps.setLong(1, pid); ps.setLong(2, uid); ps.setString(3, status); ps.executeUpdate(); } } finally { Database.dispose(ps); } } catch (SQLException e) { throw new PersistenceException("Failed to update the QQs", e); } finally { Database.dispose(conn); } } public List<QQ> searchQQs(long contestId) throws PersistenceException { Connection conn = null; try { conn = Database.createConnection(); PreparedStatement ps = null; try { ps = conn .prepareStatement("SELECT s.submission_id, s.submission_date, " + "u.user_profile_id, u.handle, u.nickname, " + "p.problem_id, p.code, p.color, ss.status " + "FROM submission s " + "LEFT JOIN user_profile u ON s.user_profile_id=u.user_profile_id " + "LEFT JOIN problem p ON s.problem_id=p.problem_id " + "LEFT JOIN submission_status ss ON u.user_profile_id=ss.user_profile_id AND p.problem_id=ss.problem_id " + "WHERE p.contest_id=? AND s.judge_reply_id=? AND p.active=1 AND (ss.status IS NULL OR ss.status<>?) " + "ORDER BY s.submission_date"); ps.setLong(1, contestId); ps.setLong(2, JudgeReply.ACCEPTED.getId()); ps.setString(3, QQ.QQ_FINISHED); ResultSet rs = ps.executeQuery(); List<QQ> qqs = new ArrayList<QQ>(); while (rs.next()) { QQ qq = new QQ(); qq.setCode(rs.getString("code")); qq.setColor(rs.getString("color")); qq.setNickName(rs.getString("nickname")); qq.setHandle(rs.getString("handle")); qq.setProblemId(rs.getLong("problem_id")); qq.setUserProfileId(rs.getLong("user_profile_id")); qq.setSubmissionId(rs.getLong("submission_id")); qq.setSubmissionDate(Database.getDate(rs, "submission_date")); qq.setStatus(rs.getString("status")); if (qq.getStatus() == null) { qq.setStatus(QQ.QQ_NEW); } qqs.add(qq); } return qqs; } finally { Database.dispose(ps); } } catch (SQLException e) { throw new PersistenceException("Failed to get the QQs", e); } finally { Database.dispose(conn); } } public ProblemStatistics getProblemStatistics(long problemId, String orderBy, int count) throws PersistenceException { Connection conn = null; String ob = null; ProblemStatistics ret = null; if ("time".equals(orderBy)) { ob = "s.time_consumption ASC,memory_consumption ASC,s.submission_date ASC"; ret = new ProblemStatistics(problemId, "time"); } else if ("memory".equals(orderBy)) { ob = "s.memory_consumption ASC,s.time_consumption ASC,submission_date ASC"; ret = new ProblemStatistics(problemId, "memory"); } else { ob = "s.submission_date ASC,s.time_consumption ASC,memory_consumption ASC"; ret = new ProblemStatistics(problemId, "date"); } Map<Long, Language> languageMap = PersistenceManager.getInstance().getLanguagePersistence().getLanguageMap(); try { conn = Database.createConnection(); PreparedStatement ps = null; try { ps = conn .prepareStatement("SELECT judge_reply_id, count(*) FROM submission WHERE problem_id=? GROUP BY judge_reply_id"); ps.setLong(1, problemId); ResultSet rs = ps.executeQuery(); while (rs.next()) { long jid = rs.getLong(1); int c = rs.getInt(2); ret.setCount(jid, c); } } finally { Database.dispose(ps); } String sql = SubmissionPersistenceImpl.GET_SUBMISSIONS + " AND s.problem_id=? AND s.judge_reply_id=? ORDER BY " + ob + " LIMIT " + count; sql = sql.replace("FORCE_INDEX", "USE INDEX (index_submission_problem_reply)"); try { ps = conn.prepareStatement(sql); ps.setLong(1, problemId); ps.setLong(2, JudgeReply.ACCEPTED.getId()); ResultSet rs = ps.executeQuery(); List<Submission> submissions = new ArrayList<Submission>(); while (rs.next()) { Submission submission = this.populateSubmission(rs, false, languageMap); submissions.add(submission); } ret.setBestRuns(submissions); return ret; } finally { Database.dispose(ps); } } catch (SQLException e) { throw new PersistenceException("Failed to get the QQs", e); } finally { Database.dispose(conn); } } public List<Submission> getQueueingSubmissions(long maxSubmissionId, int count) throws PersistenceException { Connection conn = null; try { conn = Database.createConnection(); PreparedStatement ps = null; if (maxSubmissionId < 0) { ps = conn.prepareStatement("SELECT MAX(submission_id) FROM submission;"); try { ResultSet rs = ps.executeQuery(); rs.next(); maxSubmissionId = rs.getLong(1); } finally { Database.dispose(ps); } } StringBuilder query = new StringBuilder(SubmissionPersistenceImpl.GET_SUBMISSIONS_WITH_CONTENT.replace("FORCE_INDEX", "")); query.append(" AND s.judge_reply_id="); query.append(JudgeReply.QUEUING.getId()); query.append(" AND s.submission_id<="); query.append(maxSubmissionId); query.append(" ORDER BY s.submission_id DESC LIMIT "); query.append(count); System.out.println(query.toString()); ps = conn.prepareStatement(query.toString()); try { ResultSet rs = ps.executeQuery(); List<Submission> submissions = new ArrayList<Submission>(); Map<Long, Language> languageMap = PersistenceManager.getInstance().getLanguagePersistence().getLanguageMap(); while (rs.next()) { Submission submission = this.populateSubmission(rs, true, languageMap); submissions.add(submission); } return submissions; } finally { Database.dispose(ps); } } catch (SQLException e) { throw new PersistenceException("Failed to get queueing submissions", e); } finally { Database.dispose(conn); } } }
gpl-3.0
emmanuelbernard/hibernate-ogm
core/src/test/java/org/hibernate/ogm/backendtck/associations/collection/types/GrandChild.java
587
/* * Hibernate OGM, Domain model persistence for NoSQL datastores * * License: GNU Lesser General Public License (LGPL), version 2.1 or later * See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>. */ package org.hibernate.ogm.backendtck.associations.collection.types; import javax.persistence.Embeddable; /** * @author Emmanuel Bernard &lt;emmanuel@hibernate.org&gt; */ @Embeddable public class GrandChild { private String name; public String getName() { return name; } public void setName(String name) { this.name = name; } }
lgpl-2.1
lucee/unoffical-Lucee-no-jre
source/java/core/src/lucee/runtime/tag/Report.java
3114
/** * * Copyright (c) 2014, the Railo Company Ltd. All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library. If not, see <http://www.gnu.org/licenses/>. * **/ package lucee.runtime.tag; import lucee.runtime.exp.TagNotSupported; import lucee.runtime.ext.tag.BodyTagImpl; /** * Runs a predefined Crystal Reports report. * * * **/ public final class Report extends BodyTagImpl { private String template; private String format; private String name; private String filename; private String query; private boolean overwrite; private String encryption; private String ownerpassword; private String userpassword; private String permissions; private String datasource; private String type; private double timeout; private String password; private String orderby; private String report; private String username; private String formula; /** * constructor for the tag class * @throws TagNotSupported **/ public Report() throws TagNotSupported { // TODO implement tag throw new TagNotSupported("report"); } /** set the value password * @param password value to set **/ public void setPassword(String password) { this.password=password; } /** set the value orderby * Orders results according to your specifications. * @param orderby value to set **/ public void setOrderby(String orderby) { this.orderby=orderby; } /** set the value report * @param report value to set **/ public void setReport(String report) { this.report=report; } /** set the value username * @param username value to set **/ public void setUsername(String username) { this.username=username; } /** set the value formula * Specifies one or more named formulas. Terminate each formula specification with a semicolon. * @param formula value to set **/ public void setFormula(String formula) { this.formula=formula; } @Override public int doStartTag() { return SKIP_BODY; } @Override public int doEndTag() { return EVAL_PAGE; } @Override public void doInitBody() { } @Override public int doAfterBody() { return SKIP_BODY; } @Override public void release() { super.release(); password=""; orderby=""; report=""; username=""; formula=""; template=""; format=""; name=""; filename=""; query=""; overwrite=false; encryption=""; ownerpassword=""; userpassword=""; permissions=""; datasource=""; type=""; timeout=0; } public void addReportParam(ReportParamBean param) { // TODO Auto-generated method stub } }
lgpl-2.1
mcgilman/nifi
nifi-system-tests/nifi-system-test-suite/src/test/java/org/apache/nifi/tests/system/clustering/JoinClusterWithDifferentFlow.java
12977
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.nifi.tests.system.clustering; import org.apache.nifi.controller.serialization.FlowEncodingVersion; import org.apache.nifi.controller.serialization.FlowFromDOMFactory; import org.apache.nifi.encrypt.StringEncryptor; import org.apache.nifi.tests.system.InstanceConfiguration; import org.apache.nifi.tests.system.NiFiInstance; import org.apache.nifi.tests.system.NiFiInstanceFactory; import org.apache.nifi.tests.system.NiFiSystemIT; import org.apache.nifi.tests.system.SpawnedClusterNiFiInstanceFactory; import org.apache.nifi.toolkit.cli.impl.client.nifi.NiFiClient; import org.apache.nifi.toolkit.cli.impl.client.nifi.NiFiClientException; import org.apache.nifi.util.NiFiProperties; import org.apache.nifi.web.api.dto.AffectedComponentDTO; import org.apache.nifi.web.api.dto.FlowSnippetDTO; import org.apache.nifi.web.api.dto.NodeDTO; import org.apache.nifi.web.api.dto.ParameterContextDTO; import org.apache.nifi.web.api.dto.ParameterContextReferenceDTO; import org.apache.nifi.web.api.dto.ProcessGroupDTO; import org.apache.nifi.web.api.dto.ProcessorDTO; import org.apache.nifi.web.api.dto.flow.FlowDTO; import org.apache.nifi.web.api.dto.flow.ProcessGroupFlowDTO; import org.apache.nifi.web.api.entity.AffectedComponentEntity; import org.apache.nifi.web.api.entity.ClusterEntity; import org.apache.nifi.web.api.entity.ControllerServiceEntity; import org.apache.nifi.web.api.entity.ControllerServicesEntity; import org.apache.nifi.web.api.entity.NodeEntity; import org.apache.nifi.web.api.entity.ParameterEntity; import org.apache.nifi.web.api.entity.ProcessorEntity; import org.junit.Test; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.xml.sax.InputSource; import org.xml.sax.SAXException; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.StringReader; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.zip.GZIPInputStream; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; public class JoinClusterWithDifferentFlow extends NiFiSystemIT { @Override protected NiFiInstanceFactory getInstanceFactory() { return new SpawnedClusterNiFiInstanceFactory( new InstanceConfiguration.Builder() .bootstrapConfig("src/test/resources/conf/clustered/node1/bootstrap.conf") .instanceDirectory("target/node1") .flowXml(new File("src/test/resources/flows/mismatched-flows/flow1.xml.gz")) .build(), new InstanceConfiguration.Builder() .bootstrapConfig("src/test/resources/conf/clustered/node2/bootstrap.conf") .instanceDirectory("target/node2") .flowXml(new File("src/test/resources/flows/mismatched-flows/flow2.xml.gz")) .build() ); } @Test public void testStartupWithDifferentFlow() throws IOException, SAXException, ParserConfigurationException, NiFiClientException, InterruptedException { final NiFiInstance node2 = getNiFiInstance().getNodeInstance(2); final File node2ConfDir = new File(node2.getInstanceDirectory(), "conf"); final File backupFile = getBackupFile(node2ConfDir); final NodeDTO node2Dto = getNodeDTO(5672); verifyFlowContentsOnDisk(backupFile); disconnectNode(node2Dto); verifyInMemoryFlowContents(); // Reconnect the node so that we can properly shutdown reconnectNode(node2Dto); } private File getBackupFile(final File confDir) { final File[] flowXmlFileArray = confDir.listFiles(file -> file.getName().startsWith("flow") && file.getName().endsWith(".xml.gz")); final List<File> flowXmlFiles = new ArrayList<>(Arrays.asList(flowXmlFileArray)); assertEquals(2, flowXmlFiles.size()); flowXmlFiles.removeIf(file -> file.getName().equals("flow.xml.gz")); assertEquals(1, flowXmlFiles.size()); final File backupFile = flowXmlFiles.get(0); return backupFile; } private void verifyFlowContentsOnDisk(final File backupFile) throws IOException, SAXException, ParserConfigurationException { // Read the flow and make sure that the backup looks the same as the original. We don't just do a byte comparison because the compression may result in different // gzipped bytes and because if the two flows do differ, we want to have the String representation so that we can compare to see how they are different. final String flowXml = readFlow(backupFile); final String expectedFlow = readFlow(new File("src/test/resources/flows/mismatched-flows/flow2.xml.gz")); assertEquals(expectedFlow, flowXml); // Verify some of the values that were persisted to disk final File confDir = backupFile.getParentFile(); final String loadedFlow = readFlow(new File(confDir, "flow.xml.gz")); final DocumentBuilder documentBuilder = DocumentBuilderFactory.newInstance().newDocumentBuilder(); final Document document = documentBuilder.parse(new InputSource(new StringReader(loadedFlow))); final Element rootElement = (Element) document.getElementsByTagName("flowController").item(0); final FlowEncodingVersion encodingVersion = FlowEncodingVersion.parse(rootElement); final NiFiInstance node2 = getNiFiInstance().getNodeInstance(2); final StringEncryptor encryptor = createEncryptorFromProperties(node2.getProperties()); final Element rootGroupElement = (Element) rootElement.getElementsByTagName("rootGroup").item(0); final ProcessGroupDTO groupDto = FlowFromDOMFactory.getProcessGroup(null, rootGroupElement, encryptor, encodingVersion); final Set<ProcessGroupDTO> childGroupDtos = groupDto.getContents().getProcessGroups(); assertEquals(1, childGroupDtos.size()); final ProcessGroupDTO childGroup = childGroupDtos.iterator().next(); assertFalse(childGroup.getId().endsWith("00")); final FlowSnippetDTO childContents = childGroup.getContents(); final Set<ProcessorDTO> childProcessors = childContents.getProcessors(); assertEquals(1, childProcessors.size()); final ProcessorDTO procDto = childProcessors.iterator().next(); assertFalse(procDto.getId().endsWith("00")); assertFalse(procDto.getName().endsWith("00")); } private NodeDTO getNodeDTO(final int apiPort) throws NiFiClientException, IOException { final ClusterEntity clusterEntity = getNifiClient().getControllerClient().getNodes(); final NodeDTO node2Dto = clusterEntity.getCluster().getNodes().stream() .filter(nodeDto -> nodeDto.getApiPort() == apiPort) .findAny() .orElseThrow(() -> new RuntimeException("Could not locate Node 2")); return node2Dto; } private void disconnectNode(final NodeDTO nodeDto) throws NiFiClientException, IOException, InterruptedException { // Disconnect Node 2 so that we can go to the node directly via the REST API and ensure that the flow is correct. final NodeEntity nodeEntity = new NodeEntity(); nodeEntity.setNode(nodeDto); getNifiClient().getControllerClient().disconnectNode(nodeDto.getNodeId(), nodeEntity); // Give the node a second to disconnect Thread.sleep(1000L); } private void reconnectNode(final NodeDTO nodeDto) throws NiFiClientException, IOException { final NodeEntity nodeEntity = new NodeEntity(); nodeEntity.setNode(nodeDto); getNifiClient().getControllerClient().connectNode(nodeDto.getNodeId(), nodeEntity); waitForAllNodesConnected(); } private void verifyInMemoryFlowContents() throws NiFiClientException, IOException, InterruptedException { final NiFiClient node2Client = createClient(5672); final ProcessGroupFlowDTO rootGroupFlow = node2Client.getFlowClient().getProcessGroup("root").getProcessGroupFlow(); final FlowDTO flowDto = rootGroupFlow.getFlow(); assertEquals(1, flowDto.getProcessGroups().size()); final ParameterContextReferenceDTO paramContextReference = flowDto.getProcessGroups().iterator().next().getParameterContext().getComponent(); assertEquals("65b6403c-016e-1000-900b-357b13fcc7c4", paramContextReference.getId()); assertEquals("Context 1", paramContextReference.getName()); ProcessorEntity generateFlowFileEntity = node2Client.getProcessorClient().getProcessor("65b8f293-016e-1000-7b8f-6c6752fa921b"); final Map<String, String> generateProperties = generateFlowFileEntity.getComponent().getConfig().getProperties(); assertEquals("01 B", generateProperties.get("File Size")); assertEquals("1", generateProperties.get("Batch Size")); assertEquals("1 hour", generateFlowFileEntity.getComponent().getConfig().getSchedulingPeriod()); String currentState = null; while ("RUNNING".equals(currentState)) { Thread.sleep(50L); generateFlowFileEntity = node2Client.getProcessorClient().getProcessor("65b8f293-016e-1000-7b8f-6c6752fa921b"); currentState = generateFlowFileEntity.getComponent().getState(); } final ParameterContextDTO contextDto = node2Client.getParamContextClient().getParamContext(paramContextReference.getId()).getComponent(); assertEquals(2, contextDto.getBoundProcessGroups().size()); assertEquals(1, contextDto.getParameters().size()); final ParameterEntity parameterEntity = contextDto.getParameters().iterator().next(); assertEquals("ABC", parameterEntity.getParameter().getName()); assertEquals("XYZ", parameterEntity.getParameter().getValue()); final Set<AffectedComponentEntity> affectedComponentEntities = parameterEntity.getParameter().getReferencingComponents(); assertEquals(1, affectedComponentEntities.size()); final AffectedComponentDTO affectedComponent = affectedComponentEntities.iterator().next().getComponent(); assertEquals("65b8f293-016e-1000-7b8f-6c6752fa921b", affectedComponent.getId()); assertEquals(AffectedComponentDTO.COMPONENT_TYPE_PROCESSOR, affectedComponent.getReferenceType()); // The original Controller Service, whose UUID ended with 00 should be removed and a new one inherited. final ControllerServicesEntity controllerLevelServices = node2Client.getFlowClient().getControllerServices(); assertEquals(1, controllerLevelServices.getControllerServices().size()); final ControllerServiceEntity firstService = controllerLevelServices.getControllerServices().iterator().next(); assertFalse(firstService.getId().endsWith("00")); } private StringEncryptor createEncryptorFromProperties(Properties nifiProperties) { final String algorithm = nifiProperties.getProperty(NiFiProperties.SENSITIVE_PROPS_ALGORITHM); final String provider = nifiProperties.getProperty(NiFiProperties.SENSITIVE_PROPS_PROVIDER); final String password = nifiProperties.getProperty(NiFiProperties.SENSITIVE_PROPS_KEY); return StringEncryptor.createEncryptor(algorithm, provider, password); } private String readFlow(final File file) throws IOException { final ByteArrayOutputStream baos = new ByteArrayOutputStream(); try (final InputStream fis = new FileInputStream(file); final InputStream gzipIn = new GZIPInputStream(fis)) { final byte[] buffer = new byte[4096]; int len; while ((len = gzipIn.read(buffer)) > 0) { baos.write(buffer, 0, len); } } final byte[] bytes = baos.toByteArray(); return new String(bytes, StandardCharsets.UTF_8); } }
apache-2.0
siosio/intellij-community
platform/lang-impl/src/com/intellij/execution/lineMarker/RunnableStatusListener.java
2330
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.execution.lineMarker; import com.intellij.codeInsight.daemon.DaemonCodeAnalyzer; import com.intellij.codeInsight.daemon.LineMarkerSettings; import com.intellij.codeInsight.daemon.impl.DaemonCodeAnalyzerImpl; import com.intellij.openapi.editor.Editor; import com.intellij.openapi.fileEditor.FileEditor; import com.intellij.openapi.fileEditor.TextEditor; import com.intellij.openapi.project.Project; import com.intellij.openapi.vfs.VirtualFile; import com.intellij.psi.FileViewProvider; import com.intellij.psi.PsiFile; import com.intellij.psi.PsiManager; import com.intellij.util.containers.ContainerUtil; import org.jetbrains.annotations.NotNull; import java.util.Collection; class RunnableStatusListener implements DaemonCodeAnalyzer.DaemonListener { @Override public void daemonFinished(@NotNull Collection<? extends FileEditor> fileEditors) { if (!LineMarkerSettings.getSettings().isEnabled(new RunLineMarkerProvider())) return; for (FileEditor fileEditor : fileEditors) { if (fileEditor instanceof TextEditor && fileEditor.isValid()) { Editor editor = ((TextEditor)fileEditor).getEditor(); Project project = editor.getProject(); VirtualFile file = fileEditor.getFile(); if (file != null && project != null && file.isValid()) { boolean hasRunMarkers = ContainerUtil.findInstance( DaemonCodeAnalyzerImpl.getLineMarkers(editor.getDocument(), project), RunLineMarkerProvider.RunLineMarkerInfo.class) != null; FileViewProvider vp = PsiManager.getInstance(project).findViewProvider(file); if (hasRunMarkers || (vp != null && weMayTrustRunGutterContributors(vp))) { RunLineMarkerProvider.markRunnable(file, hasRunMarkers); } } } } } private static boolean weMayTrustRunGutterContributors(FileViewProvider vp) { for (PsiFile file : vp.getAllFiles()) { for (RunLineMarkerContributor contributor : RunLineMarkerContributor.EXTENSION.allForLanguage(file.getLanguage())) { if (!contributor.producesAllPossibleConfigurations(file)) { return false; } } } return true; } }
apache-2.0
millmanorama/autopsy
Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/AutoIngestJobEvent.java
1280
/* * Autopsy Forensic Browser * * Copyright 2015 Basis Technology Corp. * Contact: carrier <at> sleuthkit <dot> org * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.sleuthkit.autopsy.experimental.autoingest; import java.io.Serializable; import javax.annotation.concurrent.Immutable; import org.sleuthkit.autopsy.events.AutopsyEvent; @Immutable abstract class AutoIngestJobEvent extends AutopsyEvent implements Serializable { private static final long serialVersionUID = 1L; private final AutoIngestJob job; AutoIngestJobEvent(AutoIngestManager.Event eventSubType, AutoIngestJob job) { super(eventSubType.toString(), null, null); this.job = job; } AutoIngestJob getJob() { return this.job; } }
apache-2.0
himasha/carbon-business-process
components/bpel/org.wso2.carbon.bpel.b4p/src/main/java/org/wso2/carbon/bpel/b4p/coordination/dao/jpa/openjpa/entity/HTProtocolHandler.java
2858
/* * Copyright (c) WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.wso2.carbon.bpel.b4p.coordination.dao.jpa.openjpa.entity; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.wso2.carbon.bpel.b4p.coordination.dao.HTProtocolHandlerDAO; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.Id; import javax.persistence.Table; /** * Entity class for HT_COORDINATION_DATA table. */ @Entity @Table(name = "HT_COORDINATION_DATA") public class HTProtocolHandler implements HTProtocolHandlerDAO { private static Log log = LogFactory.getLog(HTProtocolHandler.class); /** * Used to specify Unique message ID (UUID) generated by B4P component. */ @Id @Column(name = "MESSAGE_ID", nullable = false, unique = true) private String messageID; /** * Used to specify Task parent, in this case BPEL Process instance ID. */ @Column(name = "PROCESS_INSTANCE_ID", nullable = true) private String processInstanceID; /** * Used to specify TaskID. A process instance can have multiple task IDs. */ @Column(name = "TASK_ID", nullable = true) private String taskID; /** * Used to specify Protocol Handler URL for a Task. */ @Column(name = "PROTOCOL_HANDlER_URL", nullable = false) private String protocolHandlerURL; public HTProtocolHandler() { } @Override public String getMessageID() { return messageID; } @Override public void setMessageID(String messageID) { this.messageID = messageID; } @Override public String getProcessInstanceID() { return processInstanceID; } @Override public void setProcessInstanceID(String processInstanceID) { this.processInstanceID = processInstanceID; } @Override public String getTaskID() { return taskID; } @Override public void setTaskID(String taskID) { this.taskID = taskID; } @Override public String getHumanTaskProtocolHandlerURL() { return protocolHandlerURL; } @Override public void setHumanTaskProtocolHandlerURL(String protocolHandlerURL) { this.protocolHandlerURL = protocolHandlerURL; } }
apache-2.0
gingerwizard/elasticsearch
x-pack/plugin/search-business-rules/src/main/java/org/apache/lucene/search/CappedScoreQuery.java
7086
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ package org.apache.lucene.search; import java.io.IOException; import java.util.Objects; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.util.Bits; /** * A query that wraps another query and ensures scores do not exceed a maximum value */ public final class CappedScoreQuery extends Query { private final Query query; private final float maxScore; /** Caps scores from the passed in Query to the supplied maxScore parameter */ public CappedScoreQuery(Query query, float maxScore) { this.query = Objects.requireNonNull(query, "Query must not be null"); if (maxScore > 0 == false) { throw new IllegalArgumentException(this.getClass().getName() + " maxScore must be >0, " + maxScore + " supplied."); } this.maxScore = maxScore; } /** Returns the encapsulated query. */ public Query getQuery() { return query; } @Override public void visit(QueryVisitor visitor) { query.visit(visitor.getSubVisitor(BooleanClause.Occur.MUST, this)); } @Override public Query rewrite(IndexReader reader) throws IOException { Query rewritten = query.rewrite(reader); if (rewritten != query) { return new CappedScoreQuery(rewritten, maxScore); } if (rewritten.getClass() == CappedScoreQuery.class) { return rewritten; } if (rewritten.getClass() == BoostQuery.class) { return new CappedScoreQuery(((BoostQuery) rewritten).getQuery(), maxScore); } return super.rewrite(reader); } /** * We return this as our {@link BulkScorer} so that if the CSQ wraps a query with its own optimized top-level scorer (e.g. * BooleanScorer) we can use that top-level scorer. */ protected static class CappedBulkScorer extends BulkScorer { final BulkScorer bulkScorer; final Weight weight; final float maxScore; public CappedBulkScorer(BulkScorer bulkScorer, Weight weight, float maxScore) { this.bulkScorer = bulkScorer; this.weight = weight; this.maxScore = maxScore; } @Override public int score(LeafCollector collector, Bits acceptDocs, int min, int max) throws IOException { return bulkScorer.score(wrapCollector(collector), acceptDocs, min, max); } private LeafCollector wrapCollector(LeafCollector collector) { return new FilterLeafCollector(collector) { @Override public void setScorer(Scorable scorer) throws IOException { // we must wrap again here, but using the scorer passed in as parameter: in.setScorer(new FilterScorable(scorer) { @Override public float score() throws IOException { return Math.min(maxScore, in.score()); } @Override public void setMinCompetitiveScore(float minScore) throws IOException { scorer.setMinCompetitiveScore(minScore); } }); } }; } @Override public long cost() { return bulkScorer.cost(); } } @Override public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { final Weight innerWeight = searcher.createWeight(query, scoreMode, boost); if (scoreMode.needsScores()) { return new CappedScoreWeight(this, innerWeight, maxScore) { @Override public BulkScorer bulkScorer(LeafReaderContext context) throws IOException { final BulkScorer innerScorer = innerWeight.bulkScorer(context); if (innerScorer == null) { return null; } return new CappedBulkScorer(innerScorer, this, maxScore); } @Override public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException { ScorerSupplier innerScorerSupplier = innerWeight.scorerSupplier(context); if (innerScorerSupplier == null) { return null; } return new ScorerSupplier() { @Override public Scorer get(long leadCost) throws IOException { final Scorer innerScorer = innerScorerSupplier.get(leadCost); // test scoreMode to avoid NPE - see https://github.com/elastic/elasticsearch/issues/51034 if (scoreMode == ScoreMode.TOP_SCORES) { // short-circuit if scores will not need capping innerScorer.advanceShallow(0); if (innerScorer.getMaxScore(DocIdSetIterator.NO_MORE_DOCS) <= maxScore) { return innerScorer; } } return new CappedScorer(innerWeight, innerScorer, maxScore); } @Override public long cost() { return innerScorerSupplier.cost(); } }; } @Override public Matches matches(LeafReaderContext context, int doc) throws IOException { return innerWeight.matches(context, doc); } @Override public Scorer scorer(LeafReaderContext context) throws IOException { ScorerSupplier scorerSupplier = scorerSupplier(context); if (scorerSupplier == null) { return null; } return scorerSupplier.get(Long.MAX_VALUE); } }; } else { return innerWeight; } } @Override public String toString(String field) { return new StringBuilder("CappedScore(").append(query.toString(field)).append(')').toString(); } @Override public boolean equals(Object other) { return sameClassAs(other) && maxScore == ((CappedScoreQuery) other).maxScore && query.equals(((CappedScoreQuery) other).query); } @Override public int hashCode() { return 31 * classHash() + query.hashCode() + Float.hashCode(maxScore); } }
apache-2.0
beebeandwer/TDDL
tddl-optimizer/src/test/java/com/taobao/tddl/optimizer/costbased/MergeJoinMergeOptimizerTest.java
5945
package com.taobao.tddl.optimizer.costbased; import org.codehaus.groovy.syntax.ParserException; import org.junit.Test; import com.taobao.tddl.optimizer.BaseOptimizerTest; import com.taobao.tddl.optimizer.core.ASTNodeFactory; import com.taobao.tddl.optimizer.core.plan.IQueryTree; import com.taobao.tddl.optimizer.core.plan.query.IJoin; import com.taobao.tddl.optimizer.core.plan.query.IMerge; import com.taobao.tddl.optimizer.core.plan.query.IQuery; import com.taobao.tddl.optimizer.costbased.after.MergeJoinMergeOptimizer; import com.taobao.tddl.optimizer.exceptions.QueryException; public class MergeJoinMergeOptimizerTest extends BaseOptimizerTest { private MergeJoinMergeOptimizer o = new MergeJoinMergeOptimizer(); @Test public void testExpandLeft() throws ParserException, QueryException { System.out.println("==========testExpandLeft=============="); IJoin j = this.getMergeJoinMerge(); System.out.println(j); IQueryTree res = o.expandLeft(j); System.out.println(res); System.out.println("========================"); j = this.getMergeJoinQuery(); System.out.println(j); res = o.expandLeft(j); System.out.println(res); System.out.println("========================"); j = this.getQueryJoinMerge(); System.out.println(j); res = o.expandLeft(j); System.out.println(res); System.out.println("========================"); j = this.getQueryJoinQuery(); System.out.println(j); res = o.expandLeft(j); System.out.println(res); System.out.println("========================"); } @Test public void testExpandRight() throws ParserException, QueryException { System.out.println("==========testExpandRight=============="); IJoin j = this.getMergeJoinMerge(); System.out.println(j); IQueryTree res = o.expandRight(j); System.out.println(res); System.out.println("========================"); j = this.getMergeJoinQuery(); System.out.println(j); res = o.expandRight(j); System.out.println(res); System.out.println("========================"); j = this.getQueryJoinMerge(); System.out.println(j); res = o.expandRight(j); System.out.println(res); System.out.println("========================"); j = this.getQueryJoinQuery(); System.out.println(j); res = o.expandRight(j); System.out.println(res); System.out.println("========================"); } @Test public void testCartesianProduct() throws ParserException, QueryException { System.out.println("==========testCartesianProduct=============="); IJoin j = this.getMergeJoinMerge(); System.out.println(j); IQueryTree res = o.cartesianProduct(j); System.out.println(res); System.out.println("========================"); j = this.getMergeJoinQuery(); System.out.println(j); res = o.cartesianProduct(j); System.out.println(res); System.out.println("========================"); j = this.getQueryJoinMerge(); System.out.println(j); res = o.cartesianProduct(j); System.out.println(res); System.out.println("========================"); j = this.getQueryJoinQuery(); System.out.println(j); res = o.cartesianProduct(j); System.out.println(res); System.out.println("========================"); } private IJoin getMergeJoinMerge() { IJoin j = this.getJoin(); j.setLeftOuter(true); j.setRightOuter(true); IQuery q1 = this.getQuery(1); IQuery q2 = this.getQuery(2); IQuery q3 = this.getQuery(3); IQuery q4 = this.getQuery(4); IQuery q5 = this.getQuery(5); IQuery q6 = this.getQuery(6); IMerge leftMerge = this.getMerge(7); IMerge rightMerge = this.getMerge(8); leftMerge.addSubNode(q1).addSubNode(q2).addSubNode(q3); rightMerge.addSubNode(q4).addSubNode(q5).addSubNode(q6); j.setLeftNode(leftMerge); j.setRightNode(rightMerge); return j; } private IJoin getQueryJoinMerge() { IJoin j = this.getJoin(); j.setLeftOuter(true); j.setRightOuter(true); IQuery q1 = this.getQuery(1); IQuery q4 = this.getQuery(4); IQuery q5 = this.getQuery(5); IQuery q6 = this.getQuery(6); IMerge rightMerge = this.getMerge(8); rightMerge.addSubNode(q4).addSubNode(q5).addSubNode(q6); j.setLeftNode(q1); j.setRightNode(rightMerge); return j; } private IJoin getMergeJoinQuery() { IJoin j = this.getJoin(); j.setLeftOuter(true); j.setRightOuter(true); IQuery q1 = this.getQuery(1); IQuery q2 = this.getQuery(2); IQuery q3 = this.getQuery(3); IQuery q4 = this.getQuery(4); IMerge leftMerge = this.getMerge(7); leftMerge.addSubNode(q1).addSubNode(q2).addSubNode(q3); j.setLeftNode(leftMerge); j.setRightNode(q4); return j; } private IJoin getQueryJoinQuery() { IJoin j = this.getJoin(); j.setLeftOuter(true); j.setRightOuter(true); IQuery q1 = this.getQuery(1); IQuery q4 = this.getQuery(4); j.setLeftNode(q1); j.setRightNode(q4); return j; } private IQuery getQuery(Integer id) { IQuery q = ASTNodeFactory.getInstance().createQuery(); q.setAlias(id.toString()); return q; } private IMerge getMerge(Integer id) { IMerge m = ASTNodeFactory.getInstance().createMerge(); m.setAlias(id.toString()); return m; } private IJoin getJoin() { return ASTNodeFactory.getInstance().createJoin(); } }
apache-2.0
zhuyuanyan/PCCredit_TY
src/java/com/cardpay/pccredit/customer/filter/CustomerOverdueHistoryFilter.java
613
package com.cardpay.pccredit.customer.filter; import com.wicresoft.jrad.base.web.filter.BaseQueryFilter; /** * * @author 季东晓 * * 2014-11-13 下午3:44:17 */ public class CustomerOverdueHistoryFilter extends BaseQueryFilter { private String customerId; private String productId; public String getCustomerId() { return customerId; } public void setCustomerId(String customerId) { this.customerId = customerId; } public String getProductId() { return productId; } public void setProductId(String productId) { this.productId = productId; } }
apache-2.0
ckelsel/AndroidApiDemos
src/com/example/android/apis/animation/ActivityTransitionDetails.java
2538
/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.example.android.apis.animation; import com.example.android.apis.R; import android.app.Activity; import android.app.ActivityOptions; import android.content.Intent; import android.graphics.drawable.ColorDrawable; import android.graphics.drawable.Drawable; import android.os.Bundle; import android.view.View; import android.widget.ImageView; /** * */ public class ActivityTransitionDetails extends Activity { private static final String TAG = "ActivityTransitionDetails"; private static final String KEY_ID = "ViewTransitionValues:id"; private int mImageResourceId = R.drawable.ducky; private String mName = "ducky"; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); getWindow().setBackgroundDrawable(new ColorDrawable(randomColor())); setContentView(R.layout.image_details); ImageView titleImage = (ImageView) findViewById(R.id.titleImage); titleImage.setImageDrawable(getHeroDrawable()); } private Drawable getHeroDrawable() { String name = getIntent().getStringExtra(KEY_ID); if (name != null) { mName = name; mImageResourceId = ActivityTransition.getDrawableIdForKey(name); } return getResources().getDrawable(mImageResourceId); } public void clicked(View v) { Intent intent = new Intent(this, ActivityTransition.class); intent.putExtra(KEY_ID, mName); ActivityOptions activityOptions = ActivityOptions.makeSceneTransitionAnimation(this, v, "hero"); startActivity(intent, activityOptions.toBundle()); } private static int randomColor() { int red = (int)(Math.random() * 128); int green = (int)(Math.random() * 128); int blue = (int)(Math.random() * 128); return 0xFF000000 | (red << 16) | (green << 8) | blue; } }
apache-2.0
mdeinum/spring-boot
spring-boot-project/spring-boot-actuator/src/test/java/org/springframework/boot/actuate/logging/LogFileWebEndpointWebIntegrationTests.java
2717
/* * Copyright 2012-2021 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot.actuate.logging; import java.io.File; import java.io.IOException; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.io.TempDir; import org.springframework.boot.actuate.endpoint.web.test.WebEndpointTest; import org.springframework.boot.logging.LogFile; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.http.MediaType; import org.springframework.mock.env.MockEnvironment; import org.springframework.test.web.reactive.server.WebTestClient; import org.springframework.util.FileCopyUtils; /** * Integration tests for {@link LogFileWebEndpoint} exposed by Jersey, Spring MVC, and * WebFlux. * * @author Andy Wilkinson */ class LogFileWebEndpointWebIntegrationTests { private WebTestClient client; private static File tempFile; @BeforeEach void setUp(WebTestClient client) { this.client = client; } @BeforeAll static void setup(@TempDir File temp) { tempFile = temp; } @WebEndpointTest void getRequestProducesResponseWithLogFile() { this.client.get().uri("/actuator/logfile").exchange().expectStatus().isOk().expectHeader() .contentType("text/plain; charset=UTF-8").expectBody(String.class).isEqualTo("--TEST--"); } @WebEndpointTest void getRequestThatAcceptsTextPlainProducesResponseWithLogFile() { this.client.get().uri("/actuator/logfile").accept(MediaType.TEXT_PLAIN).exchange().expectStatus().isOk() .expectHeader().contentType("text/plain; charset=UTF-8").expectBody(String.class).isEqualTo("--TEST--"); } @Configuration(proxyBeanMethods = false) static class TestConfiguration { @Bean LogFileWebEndpoint logFileEndpoint() throws IOException { File logFile = new File(tempFile, "test.log"); FileCopyUtils.copy("--TEST--".getBytes(), logFile); MockEnvironment environment = new MockEnvironment(); environment.setProperty("logging.file.name", logFile.getAbsolutePath()); return new LogFileWebEndpoint(LogFile.get(environment), null); } } }
apache-2.0
alanfgates/hive
ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFInline.java
2739
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hive.ql.udf.generic; import java.util.ArrayList; import java.util.List; import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; @Description(name ="inline", value= "_FUNC_( ARRAY( STRUCT()[,STRUCT()] " + "- explodes and array and struct into a table") public class GenericUDTFInline extends GenericUDTF { private transient ListObjectInspector li; public GenericUDTFInline(){ } @Override public StructObjectInspector initialize(ObjectInspector[] ois) throws UDFArgumentException { //There should be one argument that is a array of struct if (ois.length!=1){ throw new UDFArgumentException("UDF tables only one argument"); } if (ois[0].getCategory()!= Category.LIST){ throw new UDFArgumentException("Top level object must be an array but " + "was "+ois[0].getTypeName()); } li = (ListObjectInspector) ois[0]; ObjectInspector sub=li.getListElementObjectInspector(); if (sub.getCategory() != Category.STRUCT){ throw new UDFArgumentException("The sub element must be struct, but was "+sub.getTypeName()); } return (StructObjectInspector) sub; } @Override public void process(Object[] os) throws HiveException { List<?> list = li.getList(os[0]); if (list != null && !list.isEmpty()) { for (Object row : list.toArray()) { forward(row); } } } @Override public void close() throws HiveException { } @Override public String toString() { return "inline"; } }
apache-2.0
apache/incubator-reef
lang/java/reef-tests/src/main/java/org/apache/reef/tests/fail/task/BridgeClient.java
3773
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.reef.tests.fail.task; import org.apache.reef.annotations.audience.ClientSide; import org.apache.reef.annotations.audience.Private; import org.apache.reef.bridge.driver.client.DriverClientConfiguration; import org.apache.reef.bridge.proto.ClientProtocol; import org.apache.reef.client.LauncherStatus; import org.apache.reef.tang.Configuration; import org.apache.reef.tang.JavaConfigurationBuilder; import org.apache.reef.tang.Tang; import org.apache.reef.tang.exceptions.InjectionException; import org.apache.reef.task.Task; import org.apache.reef.tests.TestDriverLauncher; import org.apache.reef.tests.fail.util.FailBridgeClientUtils; import org.apache.reef.util.EnvironmentUtils; import org.apache.reef.util.OSUtils; import java.io.IOException; /** * Fail task bridge client. */ @Private @ClientSide public final class BridgeClient { /** * Empty private constructor to prohibit instantiation of utility class. */ private BridgeClient() { } public static LauncherStatus run( final Class<? extends Task> failTaskClass, final Configuration runtimeConfig, final int timeOut) throws IOException, InjectionException { ClientProtocol.DriverClientConfiguration.Builder builder = ClientProtocol.DriverClientConfiguration.newBuilder() .setJobid("FailBridge_" + failTaskClass.getSimpleName()) .addGlobalLibraries(EnvironmentUtils.getClassLocation(Driver.class)); builder.setOperatingSystem( OSUtils.isWindows() ? ClientProtocol.DriverClientConfiguration.OS.WINDOWS : ClientProtocol.DriverClientConfiguration.OS.LINUX); return run(failTaskClass, runtimeConfig, builder.build(), timeOut); } public static LauncherStatus run( final Class<? extends Task> failTaskClass, final Configuration runtimeConfig, final ClientProtocol.DriverClientConfiguration driverClientConfiguration, final int timeOut) throws InjectionException, IOException { final Configuration driverConfig = DriverClientConfiguration.CONF .set(DriverClientConfiguration.ON_EVALUATOR_ALLOCATED, Driver.AllocatedEvaluatorHandler.class) .set(DriverClientConfiguration.ON_TASK_RUNNING, Driver.RunningTaskHandler.class) .set(DriverClientConfiguration.ON_CONTEXT_ACTIVE, Driver.ActiveContextHandler.class) .set(DriverClientConfiguration.ON_DRIVER_STARTED, Driver.StartHandler.class) .build(); final JavaConfigurationBuilder cb = Tang.Factory.getTang().newConfigurationBuilder(); cb.addConfiguration(driverConfig); cb.bindNamedParameter(Driver.FailTaskName.class, failTaskClass.getSimpleName()); final Configuration driverServiceConfiguration = FailBridgeClientUtils.setupDriverService( runtimeConfig, cb.build(), driverClientConfiguration); return TestDriverLauncher.getLauncher(runtimeConfig).run(driverServiceConfiguration, timeOut); } }
apache-2.0
sdmcraft/sling
contrib/crankstart/launcher/src/test/java/org/apache/sling/crankstart/launcher/RunModeBIT.java
1926
package org.apache.sling.crankstart.launcher; import static org.junit.Assert.assertEquals; import java.io.IOException; import org.apache.http.impl.client.DefaultHttpClient; import org.apache.sling.commons.testing.junit.Retry; import org.apache.sling.commons.testing.junit.RetryRule; import org.apache.sling.testing.tools.osgi.WebconsoleClient; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; /** Test our run modes support */ public class RunModeBIT { private static CrankstartSetup C = new CrankstartSetup(); private static WebconsoleClient osgiConsole; private DefaultHttpClient client; private static final String RUN_MODES = "bala,B,laika,another"; @Rule public final RetryRule retryRule = new RetryRule(); @BeforeClass public static void setupClass() throws Exception { System.setProperty(RunModeFilter.SLING_RUN_MODES, RUN_MODES); C.setup(); osgiConsole = new WebconsoleClient(C.getBaseUrl(), U.ADMIN, U.ADMIN); } @Before public void setup() throws IOException { client = new DefaultHttpClient(); } @AfterClass public static void cleanupClass() { System.clearProperty(RunModeFilter.SLING_RUN_MODES); } @Test @Retry(timeoutMsec=U.LONG_TIMEOUT_MSEC, intervalMsec=U.STD_INTERVAL) public void testSlingApiVersionA() throws Exception { assertEquals("2.0.6", osgiConsole.getBundleVersion(U.SLING_API_BUNDLE)); } @Test @Retry(timeoutMsec=U.LONG_TIMEOUT_MSEC, intervalMsec=U.STD_INTERVAL) public void testConfigA() throws Exception { U.setAdminCredentials(client); U.assertHttpGet(C, client, "/test/config/runmode.test", "runmode.test#mode=(String)That's B + another#service.pid=(String)runmode.test##EOC#"); } }
apache-2.0
jomarko/kie-wb-common
kie-wb-common-widgets/kie-wb-common-ui/src/main/java/org/kie/workbench/common/widgets/client/widget/ListPresenter.java
4269
/* * Copyright (C) 2018 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kie.workbench.common.widgets.client.widget; import java.util.ArrayList; import java.util.List; import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.Supplier; import elemental2.dom.Element; import elemental2.dom.HTMLTableElement; import elemental2.dom.HTMLTableSectionElement; import org.jboss.errai.common.client.dom.elemental2.Elemental2DomUtil; import org.jboss.errai.ioc.client.api.ManagedInstance; import static java.util.stream.Collectors.toList; public abstract class ListPresenter<T, P extends ListItemPresenter<T, ?, ?>> { private static final Elemental2DomUtil elemental2DomUtil = new Elemental2DomUtil(); private final Function<T, P> itemPresenters; private List<T> objects; private List<P> presenters; private Element listElement; private BiConsumer<T, P> itemPresenterConfigurator; public ListPresenter(final Function<T, P> itemPresenters) { this.itemPresenters = itemPresenters; } public ListPresenter(final ManagedInstance<P> itemPresenters) { this.itemPresenters = item -> itemPresenters.get(); } public void setup(final Element listElement, final List<T> objects, final BiConsumer<T, P> itemPresenterConfigurator) { this.objects = objects; this.presenters = new ArrayList<>(); this.listElement = listElement; this.itemPresenterConfigurator = itemPresenterConfigurator; handleTable(); elemental2DomUtil.removeAllElementChildren(this.listElement); this.objects.forEach(this::addToListElement); } public void setupWithPresenters(final Element listElement, final List<P> presenters, final BiConsumer<T, P> itemPresenterConfigurator) { this.objects = presenters.stream().map(p -> p.getObject()).collect(toList()); this.presenters = presenters; this.listElement = listElement; this.itemPresenterConfigurator = itemPresenterConfigurator; handleTable(); elemental2DomUtil.removeAllElementChildren(this.listElement); presenters.forEach(this::addPresenter); } private void handleTable() { if (listElement instanceof HTMLTableSectionElement) { final HTMLTableElement tableElement = (HTMLTableElement) listElement.parentNode; if (objects.isEmpty()) { tableElement.classList.add("hidden"); } else { tableElement.classList.remove("hidden"); } } } public void add(final T o) { addToListElement(o); objects.add(o); handleTable(); } public void addToListElement(final T o) { addPresenter(newPresenterFor(o)); } public void addPresenter(final P presenter) { presenters.add(presenter); listElement.appendChild(presenter.getView().getElement()); } public P newPresenterFor(final T o) { final P listItemPresenter = this.itemPresenters.apply(o); listItemPresenter.setListPresenter(this); itemPresenterConfigurator.accept(o, listItemPresenter); return listItemPresenter; } public void remove(final ListItemPresenter<T, ?, ?> listItemPresenter) { objects.remove(listItemPresenter.getObject()); listElement.removeChild(listItemPresenter.getView().getElement()); handleTable(); } public List<T> getObjectsList() { return objects; } public List<P> getPresenters() { return presenters; } }
apache-2.0
fazlan-nazeem/carbon-ml
components/ml/org.wso2.carbon.ml.rest.api/src/main/java/org/wso2/carbon/ml/rest/api/ConfigurationApiV11.java
8028
/* * Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.wso2.carbon.ml.rest.api; import java.util.List; import javax.ws.rs.*; import javax.ws.rs.core.Response; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.http.HttpHeaders; import org.wso2.carbon.analytics.api.AnalyticsDataAPI; import org.wso2.carbon.analytics.datasource.commons.exception.AnalyticsException; import org.wso2.carbon.context.PrivilegedCarbonContext; import org.wso2.carbon.ml.commons.constants.MLConstants; import org.wso2.carbon.ml.commons.domain.config.MLAlgorithm; import org.wso2.carbon.ml.commons.domain.config.SummaryStatisticsSettings; import org.wso2.carbon.ml.core.utils.MLCoreServiceValueHolder; import org.wso2.carbon.ml.core.utils.MLUtils; /** * This class is to handle REST verbs GET , POST and DELETE. */ @Path("/configs") public class ConfigurationApiV11 extends MLRestAPI { private static final Log logger = LogFactory.getLog(ConfigurationApiV11.class); public ConfigurationApiV11() { } @OPTIONS public Response options() { return Response.ok().header(HttpHeaders.ALLOW, "GET").build(); } /** * Get all supported algorithms. * * @return JSON array of {@link org.wso2.carbon.ml.commons.domain.config.MLAlgorithm} objects */ @GET @Path("/algorithms") @Produces("application/json") public Response getAllAlgorithms() { List<MLAlgorithm> mlAlgorithms = MLCoreServiceValueHolder.getInstance().getAlgorithms(); return Response.ok(mlAlgorithms).build(); } /** * Get {@link org.wso2.carbon.ml.commons.domain.config.MLAlgorithm} object by algorithm name. * * @param algorithmName Name of the algorithm * @return JSON of {@link org.wso2.carbon.ml.commons.domain.config.MLAlgorithm} object */ @GET @Path("/algorithms/{algorithmName}") @Produces("application/json") public Response getAlgorithm(@PathParam("algorithmName") String algorithmName) { if (algorithmName == null) { return Response.status(Response.Status.BAD_REQUEST).entity("Cannot find the Algorithm name from the URI.") .build(); } List<MLAlgorithm> mlAlgorithms = MLCoreServiceValueHolder.getInstance().getAlgorithms(); for (MLAlgorithm mlAlgorithm : mlAlgorithms) { if (algorithmName.equals(mlAlgorithm.getName())) { return Response.ok(mlAlgorithm).build(); } } return Response.status(Response.Status.NOT_FOUND).entity("No algorithm found with the name: " + algorithmName) .build(); } /** * Get hyper-parameters of an algorithm. * * @param algorithmName Name of the algorithm * @return JSON array of {@link org.wso2.carbon.ml.commons.domain.MLHyperParameter} objects */ @GET @Path("/algorithms/{algorithmName}/hyperParams") @Produces("application/json") public Response getHyperParamsOfAlgorithm(@PathParam("algorithmName") String algorithmName) { if (algorithmName == null) { return Response.status(Response.Status.BAD_REQUEST).entity("Cannot find the Algorithm name from the URI.") .build(); } List<MLAlgorithm> mlAlgorithms = MLCoreServiceValueHolder.getInstance().getAlgorithms(); for (MLAlgorithm mlAlgorithm : mlAlgorithms) { if (algorithmName.equals(mlAlgorithm.getName())) { return Response.ok(mlAlgorithm.getParameters()).build(); } } return Response.status(Response.Status.NOT_FOUND).entity("No algorithm found with the name: " + algorithmName) .build(); } /** * Get available WSO2 DAS tables. * * @return JSON array of table names */ @GET @Path("/das/tables") @Produces("application/json") public Response getDASTables() { PrivilegedCarbonContext carbonContext = PrivilegedCarbonContext.getThreadLocalCarbonContext(); int tenantId = carbonContext.getTenantId(); AnalyticsDataAPI analyticsDataApi = (AnalyticsDataAPI) PrivilegedCarbonContext .getThreadLocalCarbonContext().getOSGiService(AnalyticsDataAPI.class, null); if (analyticsDataApi == null) { String msg = String .format("Error occurred while retrieving DAS tables of tenant [id] %s . Cause: AnalyticsDataAPI is null.", tenantId); logger.error(msg); return Response.status(Response.Status.INTERNAL_SERVER_ERROR).entity(msg).build(); } List<String> tableNames; try { tableNames = analyticsDataApi.listTables(tenantId); } catch (AnalyticsException e) { String msg = MLUtils.getErrorMsg( String.format("Error occurred while retrieving DAS tables of tenant [id] %s .", tenantId), e); logger.error(msg, e); return Response.status(Response.Status.INTERNAL_SERVER_ERROR).entity(e.getMessage()).build(); } return Response.ok(tableNames).build(); } /** * Get summary statistics settings. * * @return JSON of {@link org.wso2.carbon.ml.commons.domain.config.SummaryStatisticsSettings} object */ @GET @Path("/summaryStatSettings") @Produces("application/json") public Response getSummaryStatSettings() { PrivilegedCarbonContext carbonContext = PrivilegedCarbonContext.getThreadLocalCarbonContext(); int tenantId = carbonContext.getTenantId(); SummaryStatisticsSettings summaryStatisticsSettings = MLCoreServiceValueHolder.getInstance().getSummaryStatSettings(); if (summaryStatisticsSettings == null) { String msg = String .format("Error occurred while retrieving summary statistics settings of tenant [id] %s.", tenantId); logger.error(msg); return Response.status(Response.Status.INTERNAL_SERVER_ERROR).entity(msg).build(); } return Response.ok(summaryStatisticsSettings).build(); } /** * Get PMML availability of an algorithm. */ @GET @Path("/algorithms/{algorithmName}/exportable") @Produces("application/json") public Response getPMMLAvailability(@PathParam("algorithmName") String algorithmName, @QueryParam("format") String format) { if (algorithmName == null) { return Response.status(Response.Status.BAD_REQUEST).entity("Cannot find the algorithm name from the URI.") .build(); } if (format == null || format.equals(MLConstants.ML_MODEL_FORMAT_PMML)) { List<MLAlgorithm> mlAlgorithms = MLCoreServiceValueHolder.getInstance().getAlgorithms(); for (MLAlgorithm mlAlgorithm : mlAlgorithms) { if (algorithmName.equals(mlAlgorithm.getName()) && mlAlgorithm.getPmmlExportable()) { return Response.ok().build(); } } return Response.status(Response.Status.NOT_FOUND) .entity("PMML download not supported for : " + algorithmName).build(); } else { return Response.status(Response.Status.BAD_REQUEST).entity("unidentified value for query parameter") .build(); } } }
apache-2.0
Zamrath/Sample
framework/agents/src/main/java/org/apache/manifoldcf/agents/interfaces/IOutputConnectionManager.java
3783
/* $Id: IOutputConnectionManager.java 996524 2010-09-13 13:38:01Z kwright $ */ /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.manifoldcf.agents.interfaces; import org.apache.manifoldcf.core.interfaces.*; /** Manager classes of this kind use the database to contain a human description of an output connection. */ public interface IOutputConnectionManager { public static final String _rcsid = "@(#)$Id: IOutputConnectionManager.java 996524 2010-09-13 13:38:01Z kwright $"; /** Install the manager. */ public void install() throws ManifoldCFException; /** Uninstall the manager. */ public void deinstall() throws ManifoldCFException; /** Export configuration */ public void exportConfiguration(java.io.OutputStream os) throws java.io.IOException, ManifoldCFException; /** Import configuration */ public void importConfiguration(java.io.InputStream is) throws java.io.IOException, ManifoldCFException; /** Obtain a list of the output connections, ordered by name. *@return an array of connection objects. */ public IOutputConnection[] getAllConnections() throws ManifoldCFException; /** Load an output connection by name. *@param name is the name of the output connection. *@return the loaded connection object, or null if not found. */ public IOutputConnection load(String name) throws ManifoldCFException; /** Load a set of output connections. *@param names are the names of the output connections. *@return the descriptors of the output connections, with null * values for those not found. */ public IOutputConnection[] loadMultiple(String[] names) throws ManifoldCFException; /** Create a new output connection object. *@return the new object. */ public IOutputConnection create() throws ManifoldCFException; /** Save an output connection object. *@param object is the object to save. *@return true if the object was created, false otherwise. */ public boolean save(IOutputConnection object) throws ManifoldCFException; /** Delete an output connection. *@param name is the name of the connection to delete. If the * name does not exist, no error is returned. */ public void delete(String name) throws ManifoldCFException; /** Get a list of output connections that share the same connector. *@param className is the class name of the connector. *@return the repository connections that use that connector. */ public String[] findConnectionsForConnector(String className) throws ManifoldCFException; /** Check if underlying connector exists. *@param name is the name of the connection to check. *@return true if the underlying connector is registered. */ public boolean checkConnectorExists(String name) throws ManifoldCFException; // Schema related /** Return the primary table name. *@return the table name. */ public String getTableName(); /** Return the name column. *@return the name column. */ public String getConnectionNameColumn(); }
apache-2.0
amitsela/incubator-beam
sdks/java/core/src/test/java/org/apache/beam/sdk/io/XmlSourceTest.java
33666
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.beam.sdk.io; import static org.apache.beam.sdk.testing.SourceTestUtils.assertSplitAtFractionExhaustive; import static org.apache.beam.sdk.testing.SourceTestUtils.assertSplitAtFractionFails; import static org.apache.beam.sdk.testing.SourceTestUtils.assertSplitAtFractionSucceedsAndConsistent; import static org.apache.beam.sdk.transforms.display.DisplayDataMatchers.hasDisplayItem; import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import com.google.common.collect.ImmutableList; import java.io.BufferedWriter; import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.util.ArrayList; import java.util.List; import java.util.Random; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlRootElement; import org.apache.beam.sdk.io.Source.Reader; import org.apache.beam.sdk.options.PipelineOptions; import org.apache.beam.sdk.options.PipelineOptionsFactory; import org.apache.beam.sdk.testing.NeedsRunner; import org.apache.beam.sdk.testing.PAssert; import org.apache.beam.sdk.testing.TestPipeline; import org.apache.beam.sdk.transforms.display.DisplayData; import org.apache.beam.sdk.values.PCollection; import org.hamcrest.Matchers; import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.ExpectedException; import org.junit.rules.TemporaryFolder; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; /** * Tests XmlSource. */ @RunWith(JUnit4.class) public class XmlSourceTest { @Rule public TestPipeline p = TestPipeline.create(); @Rule public TemporaryFolder tempFolder = new TemporaryFolder(); @Rule public ExpectedException exception = ExpectedException.none(); String tinyXML = "<trains><train><name>Thomas</name></train><train><name>Henry</name></train>" + "<train><name>James</name></train></trains>"; String xmlWithMultiByteElementName = "<දුම්රියන්><දුම්රිය><name>Thomas</name></දුම්රිය><දුම්රිය><name>Henry</name></දුම්රිය>" + "<දුම්රිය><name>James</name></දුම්රිය></දුම්රියන්>"; String xmlWithMultiByteChars = "<trains><train><name>Thomas¥</name></train><train><name>Hen¶ry</name></train>" + "<train><name>Jamßes</name></train></trains>"; String trainXML = "<trains>" + "<train><name>Thomas</name><number>1</number><color>blue</color></train>" + "<train><name>Henry</name><number>3</number><color>green</color></train>" + "<train><name>Toby</name><number>7</number><color>brown</color></train>" + "<train><name>Gordon</name><number>4</number><color>blue</color></train>" + "<train><name>Emily</name><number>-1</number><color>red</color></train>" + "<train><name>Percy</name><number>6</number><color>green</color></train>" + "</trains>"; String trainXMLWithEmptyTags = "<trains>" + "<train/>" + "<train><name>Thomas</name><number>1</number><color>blue</color></train>" + "<train><name>Henry</name><number>3</number><color>green</color></train>" + "<train/>" + "<train><name>Toby</name><number>7</number><color>brown</color></train>" + "<train><name>Gordon</name><number>4</number><color>blue</color></train>" + "<train><name>Emily</name><number>-1</number><color>red</color></train>" + "<train><name>Percy</name><number>6</number><color>green</color></train>" + "</trains>"; String trainXMLWithAttributes = "<trains>" + "<train size=\"small\"><name>Thomas</name><number>1</number><color>blue</color></train>" + "<train size=\"big\"><name>Henry</name><number>3</number><color>green</color></train>" + "<train size=\"small\"><name>Toby</name><number>7</number><color>brown</color></train>" + "<train size=\"big\"><name>Gordon</name><number>4</number><color>blue</color></train>" + "<train size=\"small\"><name>Emily</name><number>-1</number><color>red</color></train>" + "<train size=\"small\"><name>Percy</name><number>6</number><color>green</color></train>" + "</trains>"; String trainXMLWithSpaces = "<trains>" + "<train><name>Thomas </name> <number>1</number><color>blue</color></train>" + "<train><name>Henry</name><number>3</number><color>green</color></train>\n" + "<train><name>Toby</name><number>7</number><color> brown </color></train> " + "<train><name>Gordon</name> <number>4</number><color>blue</color>\n</train>\t" + "<train><name>Emily</name><number>-1</number>\t<color>red</color></train>" + "<train>\n<name>Percy</name> <number>6 </number> <color>green</color></train>" + "</trains>"; String trainXMLWithAllFeaturesMultiByte = "<දුම්රියන්>" + "<දුම්රිය/>" + "<දුම්රිය size=\"small\"><name> Thomas¥</name><number>1</number><color>blue</color>" + "</දුම්රිය>" + "<දුම්රිය size=\"big\"><name>He nry</name><number>3</number><color>green</color></දුම්රිය>" + "<දුම්රිය size=\"small\"><name>Toby </name><number>7</number><color>br¶own</color>" + "</දුම්රිය>" + "<දුම්රිය/>" + "<දුම්රිය size=\"big\"><name>Gordon</name><number>4</number><color> blue</color></දුම්රිය>" + "<දුම්රිය size=\"small\"><name>Emily</name><number>-1</number><color>red</color></දුම්රිය>" + "<දුම්රිය size=\"small\"><name>Percy</name><number>6</number><color>green</color>" + "</දුම්රිය>" + "</දුම්රියන්>"; String trainXMLWithAllFeaturesSingleByte = "<trains>" + "<train/>" + "<train size=\"small\"><name> Thomas</name><number>1</number><color>blue</color>" + "</train>" + "<train size=\"big\"><name>He nry</name><number>3</number><color>green</color></train>" + "<train size=\"small\"><name>Toby </name><number>7</number><color>brown</color>" + "</train>" + "<train/>" + "<train size=\"big\"><name>Gordon</name><number>4</number><color> blue</color></train>" + "<train size=\"small\"><name>Emily</name><number>-1</number><color>red</color></train>" + "<train size=\"small\"><name>Percy</name><number>6</number><color>green</color>" + "</train>" + "</trains>"; @XmlRootElement static class Train { public static final int TRAIN_NUMBER_UNDEFINED = -1; public String name = null; public String color = null; public int number = TRAIN_NUMBER_UNDEFINED; @XmlAttribute(name = "size") public String size = null; public Train() {} public Train(String name, int number, String color, String size) { this.name = name; this.number = number; this.color = color; this.size = size; } @Override public int hashCode() { int hashCode = 1; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); hashCode = 31 * hashCode + number; hashCode = 31 * hashCode + (color == null ? 0 : name.hashCode()); hashCode = 31 * hashCode + (size == null ? 0 : name.hashCode()); return hashCode; } @Override public boolean equals(Object obj) { if (!(obj instanceof Train)) { return false; } Train other = (Train) obj; return (name == null || name.equals(other.name)) && (number == other.number) && (color == null || color.equals(other.color)) && (size == null || size.equals(other.size)); } @Override public String toString() { String str = "Train["; boolean first = true; if (name != null) { str = str + "name=" + name; first = false; } if (number != Integer.MIN_VALUE) { if (!first) { str = str + ","; } str = str + "number=" + number; first = false; } if (color != null) { if (!first) { str = str + ","; } str = str + "color=" + color; first = false; } if (size != null) { if (!first) { str = str + ","; } str = str + "size=" + size; } str = str + "]"; return str; } } private List<Train> generateRandomTrainList(int size) { String[] names = {"Thomas", "Henry", "Gordon", "Emily", "Toby", "Percy", "Mavis", "Edward", "Bertie", "Harold", "Hiro", "Terence", "Salty", "Trevor"}; int[] numbers = {-1, 1, 2, 3, 4, 5, 6, 7, 8, 9}; String[] colors = {"red", "blue", "green", "orange", "brown", "black", "white"}; String[] sizes = {"small", "medium", "big"}; Random random = new Random(System.currentTimeMillis()); List<Train> trains = new ArrayList<>(); for (int i = 0; i < size; i++) { trains.add(new Train(names[random.nextInt(names.length - 1)], numbers[random.nextInt(numbers.length - 1)], colors[random.nextInt(colors.length - 1)], sizes[random.nextInt(sizes.length - 1)])); } return trains; } private String trainToXMLElement(Train train) { return "<train size=\"" + train.size + "\"><name>" + train.name + "</name><number>" + train.number + "</number><color>" + train.color + "</color></train>"; } private File createRandomTrainXML(String fileName, List<Train> trains) throws IOException { File file = tempFolder.newFile(fileName); try (BufferedWriter writer = new BufferedWriter(new FileWriter(file))) { writer.write("<trains>"); writer.newLine(); for (Train train : trains) { String str = trainToXMLElement(train); writer.write(str); writer.newLine(); } writer.write("</trains>"); writer.newLine(); } return file; } private List<Train> readEverythingFromReader(Reader<Train> reader) throws IOException { List<Train> results = new ArrayList<>(); for (boolean available = reader.start(); available; available = reader.advance()) { Train train = reader.getCurrent(); results.add(train); } return results; } @Test public void testReadXMLTiny() throws IOException { File file = tempFolder.newFile("trainXMLTiny"); Files.write(file.toPath(), tinyXML.getBytes(StandardCharsets.UTF_8)); XmlSource<Train> source = XmlSource.<Train>from(file.toPath().toString()) .withRootElement("trains") .withRecordElement("train") .withRecordClass(Train.class) .withMinBundleSize(1024); List<Train> expectedResults = ImmutableList.of( new Train("Thomas", Train.TRAIN_NUMBER_UNDEFINED, null, null), new Train("Henry", Train.TRAIN_NUMBER_UNDEFINED, null, null), new Train("James", Train.TRAIN_NUMBER_UNDEFINED, null, null)); assertThat( trainsToStrings(expectedResults), containsInAnyOrder( trainsToStrings(readEverythingFromReader(source.createReader(null))).toArray())); } @Test public void testReadXMLWithMultiByteChars() throws IOException { File file = tempFolder.newFile("trainXMLTiny"); Files.write(file.toPath(), xmlWithMultiByteChars.getBytes(StandardCharsets.UTF_8)); XmlSource<Train> source = XmlSource.<Train>from(file.toPath().toString()) .withRootElement("trains") .withRecordElement("train") .withRecordClass(Train.class) .withMinBundleSize(1024); List<Train> expectedResults = ImmutableList.of( new Train("Thomas¥", Train.TRAIN_NUMBER_UNDEFINED, null, null), new Train("Hen¶ry", Train.TRAIN_NUMBER_UNDEFINED, null, null), new Train("Jamßes", Train.TRAIN_NUMBER_UNDEFINED, null, null)); assertThat( trainsToStrings(expectedResults), containsInAnyOrder( trainsToStrings(readEverythingFromReader(source.createReader(null))).toArray())); } @Test @Ignore( "Multi-byte characters in XML are not supported because the parser " + "currently does not correctly report byte offsets") public void testReadXMLWithMultiByteElementName() throws IOException { File file = tempFolder.newFile("trainXMLTiny"); Files.write(file.toPath(), xmlWithMultiByteElementName.getBytes(StandardCharsets.UTF_8)); XmlSource<Train> source = XmlSource.<Train>from(file.toPath().toString()) .withRootElement("දුම්රියන්") .withRecordElement("දුම්රිය") .withRecordClass(Train.class) .withMinBundleSize(1024); List<Train> expectedResults = ImmutableList.of( new Train("Thomas", Train.TRAIN_NUMBER_UNDEFINED, null, null), new Train("Henry", Train.TRAIN_NUMBER_UNDEFINED, null, null), new Train("James", Train.TRAIN_NUMBER_UNDEFINED, null, null)); assertThat( trainsToStrings(expectedResults), containsInAnyOrder( trainsToStrings(readEverythingFromReader(source.createReader(null))).toArray())); } @Test public void testSplitWithEmptyBundleAtEnd() throws Exception { File file = tempFolder.newFile("trainXMLTiny"); Files.write(file.toPath(), tinyXML.getBytes(StandardCharsets.UTF_8)); XmlSource<Train> source = XmlSource.<Train>from(file.toPath().toString()) .withRootElement("trains") .withRecordElement("train") .withRecordClass(Train.class) .withMinBundleSize(10); List<? extends FileBasedSource<Train>> splits = source.splitIntoBundles(50, null); assertTrue(splits.size() > 2); List<Train> results = new ArrayList<>(); for (FileBasedSource<Train> split : splits) { results.addAll(readEverythingFromReader(split.createReader(null))); } List<Train> expectedResults = ImmutableList.of( new Train("Thomas", Train.TRAIN_NUMBER_UNDEFINED, null, null), new Train("Henry", Train.TRAIN_NUMBER_UNDEFINED, null, null), new Train("James", Train.TRAIN_NUMBER_UNDEFINED, null, null)); assertThat( trainsToStrings(expectedResults), containsInAnyOrder(trainsToStrings(results).toArray())); } List<String> trainsToStrings(List<Train> input) { List<String> strings = new ArrayList<>(); for (Object data : input) { strings.add(data.toString()); } return strings; } @Test public void testReadXMLSmall() throws IOException { File file = tempFolder.newFile("trainXMLSmall"); Files.write(file.toPath(), trainXML.getBytes(StandardCharsets.UTF_8)); XmlSource<Train> source = XmlSource.<Train>from(file.toPath().toString()) .withRootElement("trains") .withRecordElement("train") .withRecordClass(Train.class) .withMinBundleSize(1024); List<Train> expectedResults = ImmutableList.of(new Train("Thomas", 1, "blue", null), new Train("Henry", 3, "green", null), new Train("Toby", 7, "brown", null), new Train("Gordon", 4, "blue", null), new Train("Emily", -1, "red", null), new Train("Percy", 6, "green", null)); assertThat( trainsToStrings(expectedResults), containsInAnyOrder( trainsToStrings(readEverythingFromReader(source.createReader(null))).toArray())); } @Test public void testReadXMLNoRootElement() throws IOException { File file = tempFolder.newFile("trainXMLSmall"); Files.write(file.toPath(), trainXML.getBytes(StandardCharsets.UTF_8)); XmlSource<Train> source = XmlSource.<Train>from(file.toPath().toString()) .withRecordElement("train") .withRecordClass(Train.class); exception.expect(NullPointerException.class); exception.expectMessage( "rootElement is null. Use builder method withRootElement() to set this."); readEverythingFromReader(source.createReader(null)); } @Test public void testReadXMLNoRecordElement() throws IOException { File file = tempFolder.newFile("trainXMLSmall"); Files.write(file.toPath(), trainXML.getBytes(StandardCharsets.UTF_8)); XmlSource<Train> source = XmlSource.<Train>from(file.toPath().toString()) .withRootElement("trains") .withRecordClass(Train.class); exception.expect(NullPointerException.class); exception.expectMessage( "recordElement is null. Use builder method withRecordElement() to set this."); readEverythingFromReader(source.createReader(null)); } @Test public void testReadXMLNoRecordClass() throws IOException { File file = tempFolder.newFile("trainXMLSmall"); Files.write(file.toPath(), trainXML.getBytes(StandardCharsets.UTF_8)); XmlSource<Train> source = XmlSource.<Train>from(file.toPath().toString()) .withRootElement("trains") .withRecordElement("train"); exception.expect(NullPointerException.class); exception.expectMessage( "recordClass is null. Use builder method withRecordClass() to set this."); readEverythingFromReader(source.createReader(null)); } @Test public void testReadXMLIncorrectRootElement() throws IOException { File file = tempFolder.newFile("trainXMLSmall"); Files.write(file.toPath(), trainXML.getBytes(StandardCharsets.UTF_8)); XmlSource<Train> source = XmlSource.<Train>from(file.toPath().toString()) .withRootElement("something") .withRecordElement("train") .withRecordClass(Train.class); exception.expectMessage("Unexpected close tag </trains>; expected </something>."); readEverythingFromReader(source.createReader(null)); } @Test public void testReadXMLIncorrectRecordElement() throws IOException { File file = tempFolder.newFile("trainXMLSmall"); Files.write(file.toPath(), trainXML.getBytes(StandardCharsets.UTF_8)); XmlSource<Train> source = XmlSource.<Train>from(file.toPath().toString()) .withRootElement("trains") .withRecordElement("something") .withRecordClass(Train.class); assertEquals(readEverythingFromReader(source.createReader(null)), new ArrayList<Train>()); } @XmlRootElement private static class WrongTrainType { @SuppressWarnings("unused") public String something; } @Test public void testReadXMLInvalidRecordClass() throws IOException { File file = tempFolder.newFile("trainXMLSmall"); Files.write(file.toPath(), trainXML.getBytes(StandardCharsets.UTF_8)); XmlSource<WrongTrainType> source = XmlSource.<WrongTrainType>from(file.toPath().toString()) .withRootElement("trains") .withRecordElement("train") .withRecordClass(WrongTrainType.class); exception.expect(RuntimeException.class); // JAXB internationalizes the error message. So this is all we can match for. exception.expectMessage(both(containsString("name")).and(Matchers.containsString("something"))); try (Reader<WrongTrainType> reader = source.createReader(null)) { List<WrongTrainType> results = new ArrayList<>(); for (boolean available = reader.start(); available; available = reader.advance()) { WrongTrainType train = reader.getCurrent(); results.add(train); } } } @Test public void testReadXMLNoBundleSize() throws IOException { File file = tempFolder.newFile("trainXMLSmall"); Files.write(file.toPath(), trainXML.getBytes(StandardCharsets.UTF_8)); XmlSource<Train> source = XmlSource.<Train>from(file.toPath().toString()) .withRootElement("trains") .withRecordElement("train") .withRecordClass(Train.class); List<Train> expectedResults = ImmutableList.of(new Train("Thomas", 1, "blue", null), new Train("Henry", 3, "green", null), new Train("Toby", 7, "brown", null), new Train("Gordon", 4, "blue", null), new Train("Emily", -1, "red", null), new Train("Percy", 6, "green", null)); assertThat( trainsToStrings(expectedResults), containsInAnyOrder( trainsToStrings(readEverythingFromReader(source.createReader(null))).toArray())); } @Test public void testReadXMLWithEmptyTags() throws IOException { File file = tempFolder.newFile("trainXMLSmall"); Files.write(file.toPath(), trainXMLWithEmptyTags.getBytes(StandardCharsets.UTF_8)); XmlSource<Train> source = XmlSource.<Train>from(file.toPath().toString()) .withRootElement("trains") .withRecordElement("train") .withRecordClass(Train.class) .withMinBundleSize(1024); List<Train> expectedResults = ImmutableList.of(new Train("Thomas", 1, "blue", null), new Train("Henry", 3, "green", null), new Train("Toby", 7, "brown", null), new Train("Gordon", 4, "blue", null), new Train("Emily", -1, "red", null), new Train("Percy", 6, "green", null), new Train(), new Train()); assertThat( trainsToStrings(expectedResults), containsInAnyOrder( trainsToStrings(readEverythingFromReader(source.createReader(null))).toArray())); } @Test @Category(NeedsRunner.class) public void testReadXMLSmallPipeline() throws IOException { File file = tempFolder.newFile("trainXMLSmall"); Files.write(file.toPath(), trainXML.getBytes(StandardCharsets.UTF_8)); XmlSource<Train> source = XmlSource.<Train>from(file.toPath().toString()) .withRootElement("trains") .withRecordElement("train") .withRecordClass(Train.class) .withMinBundleSize(1024); PCollection<Train> output = p.apply("ReadFileData", Read.from(source)); List<Train> expectedResults = ImmutableList.of(new Train("Thomas", 1, "blue", null), new Train("Henry", 3, "green", null), new Train("Toby", 7, "brown", null), new Train("Gordon", 4, "blue", null), new Train("Emily", -1, "red", null), new Train("Percy", 6, "green", null)); PAssert.that(output).containsInAnyOrder(expectedResults); p.run(); } @Test public void testReadXMLWithAttributes() throws IOException { File file = tempFolder.newFile("trainXMLSmall"); Files.write(file.toPath(), trainXMLWithAttributes.getBytes(StandardCharsets.UTF_8)); XmlSource<Train> source = XmlSource.<Train>from(file.toPath().toString()) .withRootElement("trains") .withRecordElement("train") .withRecordClass(Train.class) .withMinBundleSize(1024); List<Train> expectedResults = ImmutableList.of(new Train("Thomas", 1, "blue", "small"), new Train("Henry", 3, "green", "big"), new Train("Toby", 7, "brown", "small"), new Train("Gordon", 4, "blue", "big"), new Train("Emily", -1, "red", "small"), new Train("Percy", 6, "green", "small")); assertThat( trainsToStrings(expectedResults), containsInAnyOrder( trainsToStrings(readEverythingFromReader(source.createReader(null))).toArray())); } @Test public void testReadXMLWithWhitespaces() throws IOException { File file = tempFolder.newFile("trainXMLSmall"); Files.write(file.toPath(), trainXMLWithSpaces.getBytes(StandardCharsets.UTF_8)); XmlSource<Train> source = XmlSource.<Train>from(file.toPath().toString()) .withRootElement("trains") .withRecordElement("train") .withRecordClass(Train.class) .withMinBundleSize(1024); List<Train> expectedResults = ImmutableList.of(new Train("Thomas ", 1, "blue", null), new Train("Henry", 3, "green", null), new Train("Toby", 7, " brown ", null), new Train("Gordon", 4, "blue", null), new Train("Emily", -1, "red", null), new Train("Percy", 6, "green", null)); assertThat( trainsToStrings(expectedResults), containsInAnyOrder( trainsToStrings(readEverythingFromReader(source.createReader(null))).toArray())); } @Test public void testReadXMLLarge() throws IOException { String fileName = "temp.xml"; List<Train> trains = generateRandomTrainList(100); File file = createRandomTrainXML(fileName, trains); XmlSource<Train> source = XmlSource.<Train>from(file.toPath().toString()) .withRootElement("trains") .withRecordElement("train") .withRecordClass(Train.class) .withMinBundleSize(1024); assertThat( trainsToStrings(trains), containsInAnyOrder( trainsToStrings(readEverythingFromReader(source.createReader(null))).toArray())); } @Test @Category(NeedsRunner.class) public void testReadXMLLargePipeline() throws IOException { String fileName = "temp.xml"; List<Train> trains = generateRandomTrainList(100); File file = createRandomTrainXML(fileName, trains); XmlSource<Train> source = XmlSource.<Train>from(file.toPath().toString()) .withRootElement("trains") .withRecordElement("train") .withRecordClass(Train.class) .withMinBundleSize(1024); PCollection<Train> output = p.apply("ReadFileData", Read.from(source)); PAssert.that(output).containsInAnyOrder(trains); p.run(); } @Test public void testSplitWithEmptyBundles() throws Exception { String fileName = "temp.xml"; List<Train> trains = generateRandomTrainList(10); File file = createRandomTrainXML(fileName, trains); XmlSource<Train> source = XmlSource.<Train>from(file.toPath().toString()) .withRootElement("trains") .withRecordElement("train") .withRecordClass(Train.class) .withMinBundleSize(10); List<? extends FileBasedSource<Train>> splits = source.splitIntoBundles(100, null); assertTrue(splits.size() > 2); List<Train> results = new ArrayList<>(); for (FileBasedSource<Train> split : splits) { results.addAll(readEverythingFromReader(split.createReader(null))); } assertThat(trainsToStrings(trains), containsInAnyOrder(trainsToStrings(results).toArray())); } @Test public void testXMLWithSplits() throws Exception { String fileName = "temp.xml"; List<Train> trains = generateRandomTrainList(100); File file = createRandomTrainXML(fileName, trains); XmlSource<Train> source = XmlSource.<Train>from(file.toPath().toString()) .withRootElement("trains") .withRecordElement("train") .withRecordClass(Train.class) .withMinBundleSize(10); List<? extends FileBasedSource<Train>> splits = source.splitIntoBundles(256, null); // Not a trivial split assertTrue(splits.size() > 2); List<Train> results = new ArrayList<>(); for (FileBasedSource<Train> split : splits) { results.addAll(readEverythingFromReader(split.createReader(null))); } assertThat(trainsToStrings(trains), containsInAnyOrder(trainsToStrings(results).toArray())); } @Test public void testSplitAtFraction() throws Exception { PipelineOptions options = PipelineOptionsFactory.create(); String fileName = "temp.xml"; List<Train> trains = generateRandomTrainList(100); File file = createRandomTrainXML(fileName, trains); XmlSource<Train> fileSource = XmlSource.<Train>from(file.toPath().toString()) .withRootElement("trains") .withRecordElement("train") .withRecordClass(Train.class) .withMinBundleSize(10); List<? extends FileBasedSource<Train>> splits = fileSource.splitIntoBundles(file.length() / 3, null); for (BoundedSource<Train> splitSource : splits) { int numItems = readEverythingFromReader(splitSource.createReader(null)).size(); // Should not split while unstarted. assertSplitAtFractionFails(splitSource, 0, 0.7, options); assertSplitAtFractionSucceedsAndConsistent(splitSource, 1, 0.7, options); assertSplitAtFractionSucceedsAndConsistent(splitSource, 15, 0.7, options); assertSplitAtFractionFails(splitSource, 0, 0.0, options); assertSplitAtFractionFails(splitSource, 20, 0.3, options); assertSplitAtFractionFails(splitSource, numItems, 1.0, options); // After reading 100 elements we will be approximately at position // 0.99 * (endOffset - startOffset) hence trying to split at fraction 0.9 will be // unsuccessful. assertSplitAtFractionFails(splitSource, numItems, 0.9, options); // Following passes since we can always find a fraction that is extremely close to 1 such that // the position suggested by the fraction will be larger than the position the reader is at // after reading "items - 1" elements. // This also passes for "numItemsToReadBeforeSplit = items" if the position at suggested // fraction is larger than the position the reader is at after reading all "items" elements // (i.e., the start position of the last element). This is true for most cases but will not // be true if reader position is only one less than the end position. (i.e., the last element // of the bundle start at the last byte that belongs to the bundle). assertSplitAtFractionSucceedsAndConsistent(splitSource, numItems - 1, 0.999, options); } } @Test public void testSplitAtFractionExhaustiveSingleByte() throws Exception { PipelineOptions options = PipelineOptionsFactory.create(); File file = tempFolder.newFile("trainXMLSmall"); Files.write(file.toPath(), trainXMLWithAllFeaturesSingleByte.getBytes(StandardCharsets.UTF_8)); XmlSource<Train> source = XmlSource.<Train>from(file.toPath().toString()) .withRootElement("trains") .withRecordElement("train") .withRecordClass(Train.class); assertSplitAtFractionExhaustive(source, options); } @Test @Ignore( "Multi-byte characters in XML are not supported because the parser " + "currently does not correctly report byte offsets") public void testSplitAtFractionExhaustiveMultiByte() throws Exception { PipelineOptions options = PipelineOptionsFactory.create(); File file = tempFolder.newFile("trainXMLSmall"); Files.write(file.toPath(), trainXMLWithAllFeaturesMultiByte.getBytes(StandardCharsets.UTF_8)); XmlSource<Train> source = XmlSource.<Train>from(file.toPath().toString()) .withRootElement("දුම්රියන්") .withRecordElement("දුම්රිය") .withRecordClass(Train.class); assertSplitAtFractionExhaustive(source, options); } @Test @Category(NeedsRunner.class) public void testReadXMLFilePattern() throws IOException { List<Train> trains1 = generateRandomTrainList(20); File file = createRandomTrainXML("temp1.xml", trains1); List<Train> trains2 = generateRandomTrainList(10); createRandomTrainXML("temp2.xml", trains2); List<Train> trains3 = generateRandomTrainList(15); createRandomTrainXML("temp3.xml", trains3); generateRandomTrainList(8); createRandomTrainXML("otherfile.xml", trains1); XmlSource<Train> source = XmlSource.<Train>from(file.getParent() + "/" + "temp*.xml") .withRootElement("trains") .withRecordElement("train") .withRecordClass(Train.class) .withMinBundleSize(1024); PCollection<Train> output = p.apply("ReadFileData", Read.from(source)); List<Train> expectedResults = new ArrayList<>(); expectedResults.addAll(trains1); expectedResults.addAll(trains2); expectedResults.addAll(trains3); PAssert.that(output).containsInAnyOrder(expectedResults); p.run(); } @Test public void testDisplayData() { XmlSource<?> source = XmlSource .<Integer>from("foo.xml") .withRootElement("bird") .withRecordElement("cat") .withMinBundleSize(1234) .withRecordClass(Integer.class); DisplayData displayData = DisplayData.from(source); assertThat(displayData, hasDisplayItem("filePattern", "foo.xml")); assertThat(displayData, hasDisplayItem("rootElement", "bird")); assertThat(displayData, hasDisplayItem("recordElement", "cat")); assertThat(displayData, hasDisplayItem("recordClass", Integer.class)); assertThat(displayData, hasDisplayItem("minBundleSize", 1234)); } }
apache-2.0
arturog8m/ocs
bundle/jsky.app.ot/src/main/java/jsky/app/ot/viewer/action/QueryAction.java
1015
package jsky.app.ot.viewer.action; import jsky.app.ot.viewer.QueryManager; import jsky.util.gui.BusyWin; import javax.swing.*; import java.awt.event.ActionEvent; /** * The QueryAction is for querying the online database and displaying * the science programs found. */ public final class QueryAction extends AbstractAction { // An object that can display a window for querying the science program database private final QueryManager _queryManager; /** * Set the (observatory specific) object responsible for displaying a window where * the user can query the science program database */ public QueryAction(final QueryManager queryManager) { super("OT Browser..."); _queryManager = queryManager; if (_queryManager == null) { setEnabled(false); } } public void actionPerformed(final ActionEvent evt) { if (_queryManager != null) { BusyWin.showBusy(); _queryManager.queryDB(); } } }
bsd-3-clause
shaotuanchen/sunflower_exp
tools/source/gcc-4.2.4/libjava/classpath/javax/swing/plaf/metal/OceanTheme.java
11106
/* DefaultMetalTheme.java -- A modern theme for the Metal L&F Copyright (C) 2005 Free Software Foundation, Inc. This file is part of GNU Classpath. GNU Classpath is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GNU Classpath is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GNU Classpath; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Linking this library statically or dynamically with other modules is making a combined work based on this library. Thus, the terms and conditions of the GNU General Public License cover the whole combination. As a special exception, the copyright holders of this library give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify this library, you may extend this exception to your version of the library, but you are not obligated to do so. If you do not wish to do so, delete this exception statement from your version. */ package javax.swing.plaf.metal; import java.awt.Color; import java.awt.Insets; import java.util.Arrays; import javax.swing.UIDefaults; import javax.swing.plaf.ColorUIResource; import javax.swing.plaf.BorderUIResource.LineBorderUIResource; /** * A modern theme for the Metal Look &amp; Feel. * @since 1.5 * * @author Roman Kennke (roman@kennke.org) */ public class OceanTheme extends DefaultMetalTheme { /** * The OceanTheme value for black. */ static final ColorUIResource BLACK = new ColorUIResource(51, 51, 51); /** * The OceanTheme value for primary1. */ static final ColorUIResource PRIMARY1 = new ColorUIResource(99, 130, 191); /** * The OceanTheme value for primary1. */ static final ColorUIResource PRIMARY2 = new ColorUIResource(163, 184, 204); /** * The OceanTheme value for primary1. */ static final ColorUIResource PRIMARY3 = new ColorUIResource(184, 207, 229); /** * The OceanTheme value for secondary1. */ static final ColorUIResource SECONDARY1 = new ColorUIResource(122, 138, 153); /** * The OceanTheme value for secondary2. */ static final ColorUIResource SECONDARY2 = new ColorUIResource(184, 207, 229); /** * The OceanTheme value for secondary3. */ static final ColorUIResource SECONDARY3 = new ColorUIResource(238, 238, 238); /** * The OceanTheme value for inactive control text. */ static final ColorUIResource INACTIVE_CONTROL_TEXT = new ColorUIResource(153, 153, 153); /** * Returns the name of this theme, &quot;Ocean&quot; */ public String getName() { return "Ocean"; } /** * Returns the color for control text, which is the * value of the theme's black value. */ public ColorUIResource getControlTextColor() { return getBlack(); } /** * Returns the desktop color, which is the theme's white color. */ public ColorUIResource getDesktopColor() { return getWhite(); } /** * Returns the color for inactive control text, which is the * RGB value (153, 153, 153). */ public ColorUIResource getInactiveControlTextColor() { return INACTIVE_CONTROL_TEXT; } /** * Returns the OceanTheme's color for disabled menu foreground, * */ public ColorUIResource getMenuDisabledForeground() { return INACTIVE_CONTROL_TEXT; } /** * Returns the OceanTheme's color for black, the RGB value * (51, 51, 51). * * @return Returns the OceanTheme's value for black */ protected ColorUIResource getBlack() { return BLACK; } /** * Return the OceanTheme's value for primary 1, the RGB value * (99, 130, 191). */ protected ColorUIResource getPrimary1() { return PRIMARY1; } /** * Return the OceanTheme's value for primary 2, the RGB value * (163, 184, 204). */ protected ColorUIResource getPrimary2() { return PRIMARY2; } /** * Return the OceanTheme's value for primary 1, the RGB value * (184, 207, 229). */ protected ColorUIResource getPrimary3() { return PRIMARY3; } /** * Return the OceanTheme's value for secondary 1, the RGB value * (122, 138, 153). */ protected ColorUIResource getSecondary1() { return SECONDARY1; } /** * Return the OceanTheme's value for secondary 2, the RGB value * (184, 207, 229). */ protected ColorUIResource getSecondary2() { return SECONDARY2; } /** * Return the OceanTheme's value for secondary 3, the RGB value * (238, 238, 238). */ protected ColorUIResource getSecondary3() { return SECONDARY3; } /** * Adds customized entries to the UIDefaults table. * * @param defaults the UI defaults table */ public void addCustomEntriesToTable(UIDefaults defaults) { // Gradients. defaults.put("Button.gradient", Arrays.asList(new Object[] {new Float(0.3), new Float(0.0), new ColorUIResource(221, 232, 243), new ColorUIResource(Color.WHITE), new ColorUIResource(184, 207, 229)})); defaults.put("CheckBox.gradient", Arrays.asList(new Object[] {new Float(0.3), new Float(0.0), new ColorUIResource(221, 232, 243), new ColorUIResource(Color.WHITE), new ColorUIResource(184, 207, 229)})); defaults.put("CheckBoxMenuItem.gradient", Arrays.asList(new Object[] {new Float(0.3), new Float(0.0), new ColorUIResource(221, 232, 243), new ColorUIResource(Color.WHITE), new ColorUIResource(184, 207, 229)})); defaults.put("MenuBar.gradient", Arrays.asList(new Object[] {new Float(1.0), new Float(0.0), new ColorUIResource(Color.WHITE), new ColorUIResource(218, 218, 218), new ColorUIResource(218, 218, 218)})); defaults.put("RadioButton.gradient", Arrays.asList(new Object[] {new Float(0.3), new Float(0.0), new ColorUIResource(221, 232, 243), new ColorUIResource(Color.WHITE), new ColorUIResource(184, 207, 229)})); defaults.put("RadioButtonMenuItem.gradient", Arrays.asList(new Object[] {new Float(0.3), new Float(0.0), new ColorUIResource(221, 232, 243), new ColorUIResource(Color.WHITE), new ColorUIResource(184, 207, 229)})); defaults.put("ScrollBar.gradient", Arrays.asList(new Object[] {new Float(0.3), new Float(0.0), new ColorUIResource(221, 232, 243), new ColorUIResource(Color.WHITE), new ColorUIResource(184, 207, 229)})); defaults.put("Slider.gradient", Arrays.asList(new Object[] {new Float(0.3), new Float(0.2), new ColorUIResource(200, 221, 242), new ColorUIResource(Color.WHITE), new ColorUIResource(184, 207, 229)})); defaults.put("Slider.focusGradient", Arrays.asList(new Object[] {new Float(0.3), new Float(0.2), new ColorUIResource(200, 221, 242), new ColorUIResource(Color.WHITE), new ColorUIResource(184, 207, 229)})); defaults.put("ToggleButton.gradient", Arrays.asList(new Object[] {new Float(0.3), new Float(0.0), new ColorUIResource(221, 232, 243), new ColorUIResource(Color.WHITE), new ColorUIResource(184, 207, 229)})); defaults.put("InternalFrame.activeTitleGradient", Arrays.asList(new Object[] {new Float(0.3), new Float(0.0), new ColorUIResource(221, 232, 243), new ColorUIResource(Color.WHITE), new ColorUIResource(184, 207, 229)})); // Colors. ColorUIResource c1 = new ColorUIResource(200, 221, 242); ColorUIResource c2 = new ColorUIResource(153, 153, 153); ColorUIResource c3 = new ColorUIResource(204, 204, 204); ColorUIResource c4 = new ColorUIResource(210, 226, 239); ColorUIResource c5 = new ColorUIResource(218, 218, 218); defaults.put("Button.disabledToolBarBorderBackground", c3); defaults.put("Button.toolBarBorderBackground", c2); defaults.put("Label.disabledForeground", c2); defaults.put("MenuBar.borderColor", c3); defaults.put("Slider.altTrackColor", c4); defaults.put("SplitPane.dividerFocusColor", c1); defaults.put("TabbedPane.contentAreaColor", c1); defaults.put("TabbedPane.borderHightlightColor", PRIMARY1); defaults.put("TabbedPane.selected", c1); defaults.put("TabbedPane.tabAreaBackground", c5); defaults.put("TabbedPane.unselectedBackground", SECONDARY3); defaults.put("Table.gridColor", SECONDARY1); defaults.put("ToolBar.borderColor", c3); defaults.put("Tree.selectionBorderColor", PRIMARY1); // Borders. defaults.put("Table.focusCellHighlightBorder", new LineBorderUIResource(getPrimary1())); // Insets. defaults.put("TabbedPane.contentBorderInsets", new Insets(4, 2, 3, 3)); defaults.put("TabbedPane.tabAreaInsets", new Insets(2, 2, 0, 6)); // Flags. defaults.put("SplitPane.oneTouchButtonsOpaque", Boolean.FALSE); defaults.put("Menu.opaque", Boolean.FALSE); defaults.put("ToolBar.isRollover", Boolean.TRUE); defaults.put("RadioButton.rollover", Boolean.TRUE); defaults.put("CheckBox.rollover", Boolean.TRUE); defaults.put("Button.rollover", Boolean.TRUE); // Icons. // FIXME: Add OceanTheme icons. // defaults.put("Tree.leafIcon", XXX); // defaults.put("Tree.expandedIcon", XXX); // defaults.put("Tree.openIcon", XXX); // defaults.put("Tree.closedIcon", XXX); // defaults.put("Tree.collapsedIcon", XXX); // defaults.put("FileChooser.newFolderIcon", XXX); // defaults.put("FileChooser.homeFolderIcon", XXX); // defaults.put("FileChooser.upFolderIcon", XXX); // defaults.put("FileView.hardDriveIcon", XXX); // defaults.put("FileView.floppyDriveIcon", XXX); // defaults.put("FileView.fileIcon", XXX); // defaults.put("FileView.computerIcon", XXX); // defaults.put("FileView.directoryIcon", XXX); // defaults.put("OptionPane.questionIcon", XXX); // defaults.put("OptionPane.errorIcon", XXX); // defaults.put("OptionPane.warningIcon", XXX); // defaults.put("OptionPane.informationIcon", XXX); // defaults.put("InternalFrame.icon", XXX); // defaults.put("InternalFrame.closeIcon", XXX); // defaults.put("InternalFrame.iconifyIcon", XXX); // defaults.put("InternalFrame.minimizeIcon", XXX); // defaults.put("InternalFrame.maximizeIcon", XXX); // defaults.put("InternalFrame.paletteCloseIcon", XXX); // UI classes. defaults.put("MenuBarUI", "javax.swing.plaf.metal.MetalMenuBarUI"); // Others. defaults.put("Button.rolloverIconType", "ocean"); } }
bsd-3-clause
aosm/gccfast
libjava/java/awt/image/RGBImageFilter.java
8791
/* RGBImageFilter.java -- Java class for filtering Pixels by RGB values Copyright (C) 1999 Free Software Foundation, Inc. This file is part of GNU Classpath. GNU Classpath is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GNU Classpath is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GNU Classpath; see the file COPYING. If not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. Linking this library statically or dynamically with other modules is making a combined work based on this library. Thus, the terms and conditions of the GNU General Public License cover the whole combination. As a special exception, the copyright holders of this library give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify this library, you may extend this exception to your version of the library, but you are not obligated to do so. If you do not wish to do so, delete this exception statement from your version. */ package java.awt.image; /** * A filter designed to filter images in the default RGBColorModel regardless of * the ImageProducer's ColorModel. * * @author Mark Benvenuto <mcb54@columbia.edu> */ public abstract class RGBImageFilter extends ImageFilter { protected ColorModel origmodel = ColorModel.getRGBdefault(); protected ColorModel newmodel; /** Specifies whether to apply the filter to the index entries of the IndexColorModel. Subclasses should set this to true if the filter does not depend on the pixel's coordinate. */ protected boolean canFilterIndexColorModel = false; /** Construct new RGBImageFilter. */ public RGBImageFilter() { } /** * Sets the ColorModel used to filter with. If the specified ColorModel is IndexColorModel * and canFilterIndexColorModel is true, we subsitute the ColorModel for a filtered one * here and in setPixels whenever the original one appears. Otherwise overrides the default * ColorModel of ImageProducer and specifies the default RGBColorModel * * @param model the color model to be used most often by setPixels * @see ColorModel */ public void setColorModel(ColorModel model) { origmodel = model; newmodel = model; if( ( model instanceof IndexColorModel) && canFilterIndexColorModel ) { newmodel = filterIndexColorModel( (IndexColorModel) model ); } } /** Registers a new ColorModel to subsitute for the old ColorModel when setPixels encounters the a pixel with the old ColorModel. The pixel remains unchanged except for a new ColorModel. @param oldcm the old ColorModel @param newcm the new ColorModel */ public void substituteColorModel(ColorModel oldcm, ColorModel newcm) { origmodel = oldcm; newmodel = newcm; } /** Filters an IndexColorModel through the filterRGB function. Uses coordinates of -1 to indicate its filtering an index and not a pixel. @param icm an IndexColorModel to filter */ public IndexColorModel filterIndexColorModel(IndexColorModel icm) { int len = icm.getMapSize(), rgb; byte reds[] = new byte[len], greens[] = new byte[len], blues[] = new byte[len], alphas[] = new byte[len]; icm.getAlphas( alphas ); icm.getReds( reds ); icm.getGreens( greens ); icm.getBlues( blues ); for( int i = 0; i < len; i++ ) { rgb = filterRGB( -1, -1, makeColor ( alphas[i], reds[i], greens[i], blues[i] ) ); alphas[i] = (byte)(( 0xff000000 & rgb ) >> 24); reds[i] = (byte)(( 0xff0000 & rgb ) >> 16); greens[i] = (byte)(( 0xff00 & rgb ) >> 8); blues[i] = (byte)(0xff & rgb); } return new IndexColorModel( icm.getPixelSize(), len, reds, greens, blues, alphas ); } private int makeColor( byte a, byte r, byte g, byte b ) { return ( 0xff000000 & (a << 24) | 0xff0000 & (r << 16) | 0xff00 & (b << 8) | 0xff & g ); } /** This functions filters a set of RGB pixels through filterRGB. @param x the x coordinate of the rectangle @param y the y coordinate of the rectangle @param w the width of the rectangle @param h the height of the rectangle @param model the <code>ColorModel</code> used to translate the pixels @param pixels the array of pixel values @param offset the index of the first pixels in the <code>pixels</code> array @param scansize the width to use in extracting pixels from the <code>pixels</code> array */ public void filterRGBPixels(int x, int y, int w, int h, int[] pixels, int off, int scansize) { int xp, yp, i; for( xp = x; xp < ( x + w); xp++ ) for( yp = y; yp < (y + h); yp++ ) pixels[ off + yp * scansize + xp ] = filterRGB( xp, yp, pixels[ off + yp * scansize + xp ] ); } /** * If the ColorModel is the same ColorModel which as already converted * then it converts it the converted ColorModel. Otherwise it passes the * array of pixels through filterRGBpixels. * * @param x the x coordinate of the rectangle * @param y the y coordinate of the rectangle * @param w the width of the rectangle * @param h the height of the rectangle * @param model the <code>ColorModel</code> used to translate the pixels * @param pixels the array of pixel values * @param offset the index of the first pixels in the <code>pixels</code> array * @param scansize the width to use in extracting pixels from the <code>pixels</code> array */ public void setPixels(int x, int y, int w, int h, ColorModel model, byte[] pixels, int offset, int scansize) { if( model == origmodel ) { consumer.setPixels(x, y, w, h, newmodel, pixels, offset, scansize); } else { //FIXME //convert to proper CM int pixelsi[] = new int[ pixels.length / 4 ]; filterRGBPixels( x, y, w, h, pixelsi, offset, scansize ); } } /** * This function delivers a rectangle of pixels where any * pixel(m,n) is stored in the array as an <code>int</code> at * index (n * scansize + m + offset). * * @param x the x coordinate of the rectangle * @param y the y coordinate of the rectangle * @param w the width of the rectangle * @param h the height of the rectangle * @param model the <code>ColorModel</code> used to translate the pixels * @param pixels the array of pixel values * @param offset the index of the first pixels in the <code>pixels</code> array * @param scansize the width to use in extracting pixels from the <code>pixels</code> array */ public void setPixels(int x, int y, int w, int h, ColorModel model, int[] pixels, int offset, int scansize) { if( model == origmodel ) { consumer.setPixels(x, y, w, h, newmodel, pixels, offset, scansize); } else { convertColorModelToDefault( x, y, w, h, model, pixels, offset, scansize ); filterRGBPixels( x, y, w, h, pixels, offset, scansize ); } } private void convertColorModelToDefault( int x, int y, int w, int h, ColorModel model, int pixels[], int offset, int scansize) { int xp, yp, i; for( xp = x; xp < ( x + w); xp++ ) for( yp = y; yp < (y + h); yp++ ) pixels[ offset + yp * scansize + xp ] = makeColorbyDefaultCM( pixels[ offset + yp * scansize + xp ] ); } private int makeColorbyDefaultCM( int rgb ) { return makeColor( origmodel.getRed( rgb ), origmodel.getGreen( rgb ), origmodel.getGreen( rgb ), origmodel.getBlue( rgb ) ); } private int makeColor( int a, int r, int g, int b ) { return (int)( 0xff000000 & (a << 24) | 0xff0000 & (r << 16) | 0xff00 & (b << 8) | 0xff & g ); } /** Filters a single pixel from the default ColorModel. @param x x-coordinate @param y y-coordinate @param rgb color */ public abstract int filterRGB(int x, int y, int rgb); }
gpl-2.0
JKereliuk/phenotips
components/base-war/src/main/java/com/xpn/xwiki/web/XWikiContextInitializationFilter.java
7613
/* * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see http://www.gnu.org/licenses/ */ package com.xpn.xwiki.web; import org.xwiki.container.Container; import org.xwiki.container.servlet.ServletContainerException; import org.xwiki.container.servlet.ServletContainerInitializer; import org.xwiki.context.Execution; import org.xwiki.model.reference.DocumentReference; import org.xwiki.model.reference.DocumentReferenceResolver; import org.xwiki.model.reference.SpaceReference; import org.xwiki.model.reference.WikiReference; import java.io.IOException; import java.lang.reflect.Type; import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; import javax.servlet.ServletException; import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import com.xpn.xwiki.XWiki; import com.xpn.xwiki.XWikiContext; import com.xpn.xwiki.XWikiException; import com.xpn.xwiki.user.api.XWikiRightService; import com.xpn.xwiki.user.api.XWikiUser; /** * This filter can be used to initialize the XWiki context before processing a request. * * @version $Id$ */ public class XWikiContextInitializationFilter implements Filter { /** * The filter configuration object. */ private FilterConfig filterConfig; /** * XWiki context mode. */ private int mode; @Override public void destroy() { this.filterConfig = null; } @Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { try { // Only HTTP requests are supported. if (request instanceof HttpServletRequest) { initializeXWikiContext(request, response); } chain.doFilter(request, response); } finally { if (request instanceof HttpServletRequest) { cleanupComponents(); } } } @Override public void init(FilterConfig filterConfig) throws ServletException { this.filterConfig = filterConfig; try { this.mode = Integer.parseInt(filterConfig.getInitParameter("mode")); } catch (Exception e) { this.mode = -1; } } /** * Initializes the XWiki context. * * @param request the request being processed * @param response the response * @throws ServletException if the initialization fails */ protected void initializeXWikiContext(ServletRequest request, ServletResponse response) throws ServletException { try { // Not all request types specify an action (e.g. GWT-RPC) so we default to the empty string. String action = ""; XWikiServletContext xwikiEngine = new XWikiServletContext(this.filterConfig.getServletContext()); XWikiServletRequest xwikiRequest = new XWikiServletRequest((HttpServletRequest) request); XWikiServletResponse xwikiResponse = new XWikiServletResponse((HttpServletResponse) response); // Create the XWiki context. XWikiContext context = Utils.prepareContext(action, xwikiRequest, xwikiResponse, xwikiEngine); // Overwrite the context mode if the mode filter initialization parameter is specified. if (this.mode >= 0) { context.setMode(this.mode); } // Initialize the Container component which is the new way of transporting the Context in the new component // architecture. Further initialization might require the Container component. initializeContainerComponent(context); // Initialize the XWiki database. XWiki#getXWiki(XWikiContext) calls XWikiContext.setWiki(XWiki). XWiki xwiki = XWiki.getXWiki(context); // Initialize the URL factory. context.setURLFactory(xwiki.getURLFactoryService().createURLFactory(context.getMode(), context)); // Prepare the localized resources, according to the selected language. xwiki.prepareResources(context); // Initialize the current user. XWikiUser user = context.getWiki().checkAuth(context); if (user != null) { @SuppressWarnings("deprecation") DocumentReferenceResolver<String> documentReferenceResolver = Utils.getComponent(DocumentReferenceResolver.TYPE_STRING, "explicit"); SpaceReference defaultUserSpace = new SpaceReference(XWiki.SYSTEM_SPACE, new WikiReference(context.getWikiId())); DocumentReference userReference = documentReferenceResolver.resolve(user.getUser(), defaultUserSpace); context.setUserReference(XWikiRightService.GUEST_USER.equals(userReference.getName()) ? null : userReference); } } catch (XWikiException e) { throw new ServletException("Failed to initialize the XWiki context.", e); } } /** * @param context the XWiki context * @throws ServletException if the container component initialization fails */ protected void initializeContainerComponent(XWikiContext context) throws ServletException { // Initialize the Container fields (request, response, session). Note that this is a bridge between the old core // and the component architecture. In the new component architecture we use ThreadLocal to transport the // request, response and session to components which require them. @SuppressWarnings("deprecation") ServletContainerInitializer containerInitializer = Utils.getComponent((Type) ServletContainerInitializer.class); try { containerInitializer.initializeRequest(context.getRequest().getHttpServletRequest(), context); containerInitializer.initializeResponse(context.getResponse()); containerInitializer.initializeSession(context.getRequest().getHttpServletRequest()); } catch (ServletContainerException e) { throw new ServletException("Failed to initialize Request/Response or Session", e); } } /** * We must ensure we clean the ThreadLocal variables located in the Container and Execution components as otherwise * we will have a potential memory leak. */ protected void cleanupComponents() { @SuppressWarnings("deprecation") Container container = Utils.getComponent((Type) Container.class); container.removeRequest(); container.removeResponse(); container.removeSession(); @SuppressWarnings("deprecation") Execution execution = Utils.getComponent((Type) Execution.class); execution.removeContext(); } }
agpl-3.0
jstourac/wildfly
messaging-activemq/subsystem/src/main/java/org/wildfly/extension/messaging/activemq/SecuritySettingAdd.java
3047
/* * JBoss, Home of Professional Open Source. * Copyright 2011, Red Hat, Inc., and individual contributors * as indicated by the @author tags. See the copyright.txt file in the * distribution for a full listing of individual contributors. * * This is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this software; if not, write to the Free * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA, or see the FSF site: http://www.fsf.org. */ package org.wildfly.extension.messaging.activemq; import java.util.HashSet; import org.apache.activemq.artemis.core.security.Role; import org.apache.activemq.artemis.core.server.ActiveMQServer; import org.jboss.as.controller.AbstractAddStepHandler; import org.jboss.as.controller.OperationContext; import org.jboss.as.controller.OperationFailedException; import org.jboss.as.controller.PathAddress; import org.jboss.as.controller.descriptions.ModelDescriptionConstants; import org.jboss.dmr.ModelNode; import org.jboss.msc.service.ServiceController; import org.jboss.msc.service.ServiceName; /** * {@code OperationStepHandler} for adding a new security setting. * * @author Emanuel Muckenhuber */ class SecuritySettingAdd extends AbstractAddStepHandler { static final SecuritySettingAdd INSTANCE = new SecuritySettingAdd(); @Override protected void populateModel(ModelNode operation, ModelNode model) throws OperationFailedException { model.setEmptyObject(); } @Override protected void performRuntime(OperationContext context, ModelNode operation, ModelNode model) throws OperationFailedException { final ActiveMQServer server = getServer(context, operation); if(server != null) { final PathAddress address = PathAddress.pathAddress(operation.require(ModelDescriptionConstants.OP_ADDR)); final String match = address.getLastElement().getValue(); server.getSecurityRepository().addMatch(match, new HashSet<Role>()); } } static ActiveMQServer getServer(final OperationContext context, ModelNode operation) { final ServiceName serviceName = MessagingServices.getActiveMQServiceName(PathAddress.pathAddress(operation.get(ModelDescriptionConstants.OP_ADDR))); final ServiceController<?> controller = context.getServiceRegistry(true).getService(serviceName); if(controller != null) { return ActiveMQServer.class.cast(controller.getValue()); } return null; } }
lgpl-2.1
jamezp/wildfly-core
controller/src/main/java/org/jboss/as/controller/services/path/PathRemoveHandler.java
7462
/* * JBoss, Home of Professional Open Source * Copyright 2011 Red Hat Inc. and/or its affiliates and other contributors * as indicated by the @authors tag. All rights reserved. * See the copyright.txt in the distribution for a * full listing of individual contributors. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License, v. 2.1. * This program is distributed in the hope that it will be useful, but WITHOUT A * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A * PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. * You should have received a copy of the GNU Lesser General Public License, * v.2.1 along with this distribution; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. */ package org.jboss.as.controller.services.path; import static org.jboss.as.controller.descriptions.ModelDescriptionConstants.REMOVE; import static org.jboss.as.controller.logging.ControllerLogger.MGMT_OP_LOGGER; import static org.jboss.as.controller.services.path.PathResourceDefinition.PATH_CAPABILITY; import static org.jboss.as.controller.services.path.PathResourceDefinition.PATH_SPECIFIED; import static org.jboss.as.controller.services.path.PathResourceDefinition.READ_ONLY; import static org.jboss.as.controller.services.path.PathResourceDefinition.RELATIVE_TO; import static org.jboss.as.controller.services.path.PathResourceDefinition.RELATIVE_TO_LOCAL; import org.jboss.as.controller.OperationContext; import org.jboss.as.controller.OperationFailedException; import org.jboss.as.controller.OperationStepHandler; import org.jboss.as.controller.PathAddress; import org.jboss.as.controller.descriptions.ModelDescriptionConstants; import org.jboss.as.controller.logging.ControllerLogger; import org.jboss.as.controller.services.path.PathManager.Event; import org.jboss.as.controller.services.path.PathManagerService.PathEventContextImpl; import org.jboss.dmr.ModelNode; import org.jboss.msc.service.ServiceTarget; /** * Handler for the path resource remove operation. * * @author Brian Stansberry (c) 2011 Red Hat Inc. */ public class PathRemoveHandler implements OperationStepHandler { // TODO make this package protected public static final String OPERATION_NAME = REMOVE; private final PathManagerService pathManager; /** * Create the PathRemoveHandler * * @param pathManager the path manager, or {@code null} if interaction with the path manager is not required * for the resource */ PathRemoveHandler(final PathManagerService pathManager) { this.pathManager = pathManager; } /** * Create the PathRemoveHandler * * @param pathManager the path manager, or {@code null} if interaction with the path manager is not required * for the resource * @param services {@code true} if interaction with the path manager is required for the resource * * @deprecated not for use outside the kernel; may be removed at any time */ @Deprecated protected PathRemoveHandler(final PathManagerService pathManager, final boolean services) { this(services ? null : pathManager); assert !services || pathManager != null; } static PathRemoveHandler createNamedInstance() { return new PathRemoveHandler(null); } static PathRemoveHandler createSpecifiedInstance(final PathManagerService pathManager) { assert pathManager != null; return new PathRemoveHandler(pathManager); } static PathRemoveHandler createSpecifiedNoServicesInstance() { return new PathRemoveHandler(null); } public void execute(OperationContext context, ModelNode operation) throws OperationFailedException { final String name = context.getCurrentAddressValue(); final ModelNode model = context.readResource(PathAddress.EMPTY_ADDRESS).getModel(); if (model.get(READ_ONLY.getName()).asBoolean(false)) { throw ControllerLogger.ROOT_LOGGER.cannotRemoveReadOnlyPath(name); } context.removeResource(PathAddress.EMPTY_ADDRESS); context.deregisterCapability(PATH_CAPABILITY.getDynamicName(context.getCurrentAddressValue())); RELATIVE_TO_LOCAL.removeCapabilityRequirements(context, model.get(RELATIVE_TO.getName())); if (pathManager != null) { final PathEventContextImpl pathEventContext = pathManager.checkRestartRequired(context, name, Event.REMOVED); // Capture the existing values to restore the PathEntry and services in case of rollback final String path; final String relativeTo; if (pathEventContext.isInstallServices()) { pathManager.removePathEntry(name, true); path = PathAddHandler.getPathValue(context, PATH_SPECIFIED, model); relativeTo = PathAddHandler.getPathValue(context, RELATIVE_TO, model); } else { path = relativeTo = null; } context.addStep(new OperationStepHandler() { public void execute(OperationContext context, ModelNode operation) throws OperationFailedException { if (pathEventContext.isInstallServices()) { pathManager.removePathService(context, name); } context.completeStep(new OperationContext.RollbackHandler() { @Override public void handleRollback(OperationContext context, ModelNode operation) { try { if (pathEventContext.isInstallServices()) { final ServiceTarget target = context.getServiceTarget(); if (relativeTo == null) { pathManager.addAbsolutePathService(target, name, path); } else { pathManager.addRelativePathService(target, name, path, false, relativeTo); } } else { context.revertRestartRequired(); } } catch (Exception e) { MGMT_OP_LOGGER.errorRevertingOperation(e, getClass().getSimpleName(), operation.require(ModelDescriptionConstants.OP).asString(), PathAddress.pathAddress(operation.get(ModelDescriptionConstants.OP_ADDR))); } } }); } }, OperationContext.Stage.RUNTIME); context.completeStep(new OperationContext.RollbackHandler() { @Override public void handleRollback(OperationContext context, ModelNode operation) { if (pathEventContext.isInstallServices()) { // Re-add entry to the path manager pathManager.addPathEntry(name, path, relativeTo, false); } } }); } } }
lgpl-2.1
xasx/wildfly
testsuite/integration/basic/src/test/java/org/jboss/as/test/integration/ejb/home/remotehome/injection/Injection.java
1279
/* * JBoss, Home of Professional Open Source * Copyright 2010, Red Hat Inc., and individual contributors as indicated * by the @authors tag. See the copyright.txt in the distribution for a * full listing of individual contributors. * * This is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this software; if not, write to the Free * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA, or see the FSF site: http://www.fsf.org. */ package org.jboss.as.test.integration.ejb.home.remotehome.injection; import java.rmi.RemoteException; import javax.ejb.EJBObject; /** * @author Stuart Douglas */ public interface Injection extends EJBObject { String message() throws RemoteException; }
lgpl-2.1
ibek/jbpm
jbpm-human-task/jbpm-human-task-jpa/src/main/java/org/jbpm/services/task/query/DeadlineSummaryImpl.java
2561
/** * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jbpm.services.task.query; import java.util.Date; public class DeadlineSummaryImpl implements org.kie.internal.task.api.model.DeadlineSummary { private long taskId; private long deadlineId; private Date date; public DeadlineSummaryImpl() { // default constructor } public DeadlineSummaryImpl(long taskId, long deadlineId, Date date) { super(); this.taskId = taskId; this.deadlineId = deadlineId; this.date = date; } public long getTaskId() { return taskId; } public void setTaskId(long taskId) { this.taskId = taskId; } public long getDeadlineId() { return deadlineId; } public void setDeadlineId(long deadlineId) { this.deadlineId = deadlineId; } public Date getDate() { return date; } public void setDate(Date date) { this.date = date; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((date == null) ? 0 : date.hashCode()); result = prime * result + (int) (deadlineId ^ (deadlineId >>> 32)); result = prime * result + (int) (taskId ^ (taskId >>> 32)); return result; } @Override public boolean equals(Object obj) { if ( this == obj ) return true; if ( obj == null ) return false; if ( !(obj instanceof DeadlineSummaryImpl) ) return false; DeadlineSummaryImpl other = (DeadlineSummaryImpl) obj; if ( date == null ) { if ( other.date != null ) return false; } else if ( date.getTime() != other.date.getTime() ) return false; if ( deadlineId != other.deadlineId ) return false; if ( taskId != other.taskId ) return false; return true; } }
apache-2.0
prigaux/cas
api/cas-server-core-api-ticket/src/main/java/org/apereo/cas/ticket/ServiceTicket.java
2204
package org.apereo.cas.ticket; import com.fasterxml.jackson.annotation.JsonTypeInfo; import org.apereo.cas.authentication.Authentication; import org.apereo.cas.authentication.principal.Service; import org.apereo.cas.ticket.proxy.ProxyGrantingTicket; /** * Interface for a Service Ticket. A service ticket is used to grant access to a * specific service for a principal. A Service Ticket is generally a one-time * use ticket. * * @author Scott Battaglia * @since 3.0.0 */ @JsonTypeInfo(use=JsonTypeInfo.Id.CLASS, include= JsonTypeInfo.As.PROPERTY) public interface ServiceTicket extends Ticket { /** * Prefix generally applied to unique ids generated * by UniqueTicketIdGenerator. */ String PREFIX = "ST"; /** * Retrieve the service this ticket was given for. * * @return the server. */ Service getService(); /** * Determine if this ticket was created at the same time as a * TicketGrantingTicket. * * @return true if it is, false otherwise. */ boolean isFromNewLogin(); /** * Attempts to ensure that the service specified matches the service associated with the ticket. * * @param service The incoming service to match this service ticket against. * @return true, if the match is successful. */ boolean isValidFor(Service service); /** * Method to grant a TicketGrantingTicket from this service to the * authentication. Analogous to the ProxyGrantingTicket. * * @param id The unique identifier for this ticket. * @param authentication The Authentication we wish to grant a ticket for. * @param expirationPolicy expiration policy associated with this ticket * @return The ticket granting ticket. * @throws AbstractTicketException ticket exception thrown when generating the ticket * @since 4.2 */ ProxyGrantingTicket grantProxyGrantingTicket(String id, Authentication authentication, ExpirationPolicy expirationPolicy) throws AbstractTicketException; }
apache-2.0
huiyi-learning/hadoop
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
88787
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs; import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS; import java.io.BufferedOutputStream; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.InterruptedIOException; import java.io.OutputStream; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.Socket; import java.nio.channels.ClosedChannelException; import java.util.ArrayList; import java.util.Arrays; import java.util.EnumSet; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.crypto.CryptoProtocolVersion; import org.apache.hadoop.fs.CanSetDropBehind; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSOutputSummer; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.Syncable; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.RetryStartFileException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.util.ByteArrayManager; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum.Type; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Time; import org.apache.htrace.NullScope; import org.apache.htrace.Sampler; import org.apache.htrace.Span; import org.apache.htrace.Trace; import org.apache.htrace.TraceInfo; import org.apache.htrace.TraceScope; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalNotification; /**************************************************************** * DFSOutputStream creates files from a stream of bytes. * * The client application writes data that is cached internally by * this stream. Data is broken up into packets, each packet is * typically 64K in size. A packet comprises of chunks. Each chunk * is typically 512 bytes and has an associated checksum with it. * * When a client application fills up the currentPacket, it is * enqueued into dataQueue. The DataStreamer thread picks up * packets from the dataQueue, sends it to the first datanode in * the pipeline and moves it from the dataQueue to the ackQueue. * The ResponseProcessor receives acks from the datanodes. When an * successful ack for a packet is received from all datanodes, the * ResponseProcessor removes the corresponding packet from the * ackQueue. * * In case of error, all outstanding packets and moved from * ackQueue. A new pipeline is setup by eliminating the bad * datanode from the original pipeline. The DataStreamer now * starts sending packets from the dataQueue. ****************************************************************/ @InterfaceAudience.Private public class DFSOutputStream extends FSOutputSummer implements Syncable, CanSetDropBehind { private final long dfsclientSlowLogThresholdMs; /** * Number of times to retry creating a file when there are transient * errors (typically related to encryption zones and KeyProvider operations). */ @VisibleForTesting static final int CREATE_RETRY_COUNT = 10; @VisibleForTesting static CryptoProtocolVersion[] SUPPORTED_CRYPTO_VERSIONS = CryptoProtocolVersion.supported(); private final DFSClient dfsClient; private final ByteArrayManager byteArrayManager; private Socket s; // closed is accessed by different threads under different locks. private volatile boolean closed = false; private String src; private final long fileId; private final long blockSize; /** Only for DataTransferProtocol.writeBlock(..) */ private final DataChecksum checksum4WriteBlock; private final int bytesPerChecksum; // both dataQueue and ackQueue are protected by dataQueue lock private final LinkedList<DFSPacket> dataQueue = new LinkedList<DFSPacket>(); private final LinkedList<DFSPacket> ackQueue = new LinkedList<DFSPacket>(); private DFSPacket currentPacket = null; private DataStreamer streamer; private long currentSeqno = 0; private long lastQueuedSeqno = -1; private long lastAckedSeqno = -1; private long bytesCurBlock = 0; // bytes written in current block private int packetSize = 0; // write packet size, not including the header. private int chunksPerPacket = 0; private final AtomicReference<IOException> lastException = new AtomicReference<IOException>(); private long artificialSlowdown = 0; private long lastFlushOffset = 0; // offset when flush was invoked //persist blocks on namenode private final AtomicBoolean persistBlocks = new AtomicBoolean(false); private volatile boolean appendChunk = false; // appending to existing partial block private long initialFileSize = 0; // at time of file open private final Progressable progress; private final short blockReplication; // replication factor of file private boolean shouldSyncBlock = false; // force blocks to disk upon close private final AtomicReference<CachingStrategy> cachingStrategy; private boolean failPacket = false; private FileEncryptionInfo fileEncryptionInfo; private static final BlockStoragePolicySuite blockStoragePolicySuite = BlockStoragePolicySuite.createDefaultSuite(); /** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/ private DFSPacket createPacket(int packetSize, int chunksPerPkt, long offsetInBlock, long seqno, boolean lastPacketInBlock) throws InterruptedIOException { final byte[] buf; final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize; try { buf = byteArrayManager.newByteArray(bufferSize); } catch (InterruptedException ie) { final InterruptedIOException iioe = new InterruptedIOException( "seqno=" + seqno); iioe.initCause(ie); throw iioe; } return new DFSPacket(buf, chunksPerPkt, offsetInBlock, seqno, getChecksumSize(), lastPacketInBlock); } /** * For heartbeat packets, create buffer directly by new byte[] * since heartbeats should not be blocked. */ private DFSPacket createHeartbeatPacket() throws InterruptedIOException { final byte[] buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN]; return new DFSPacket(buf, 0, 0, DFSPacket.HEART_BEAT_SEQNO, getChecksumSize(), false); } // // The DataStreamer class is responsible for sending data packets to the // datanodes in the pipeline. It retrieves a new blockid and block locations // from the namenode, and starts streaming packets to the pipeline of // Datanodes. Every packet has a sequence number associated with // it. When all the packets for a block are sent out and acks for each // if them are received, the DataStreamer closes the current block. // class DataStreamer extends Daemon { private volatile boolean streamerClosed = false; private ExtendedBlock block; // its length is number of bytes acked private Token<BlockTokenIdentifier> accessToken; private DataOutputStream blockStream; private DataInputStream blockReplyStream; private ResponseProcessor response = null; private volatile DatanodeInfo[] nodes = null; // list of targets for current block private volatile StorageType[] storageTypes = null; private volatile String[] storageIDs = null; private final LoadingCache<DatanodeInfo, DatanodeInfo> excludedNodes = CacheBuilder.newBuilder() .expireAfterWrite( dfsClient.getConf().excludedNodesCacheExpiry, TimeUnit.MILLISECONDS) .removalListener(new RemovalListener<DatanodeInfo, DatanodeInfo>() { @Override public void onRemoval( RemovalNotification<DatanodeInfo, DatanodeInfo> notification) { DFSClient.LOG.info("Removing node " + notification.getKey() + " from the excluded nodes list"); } }) .build(new CacheLoader<DatanodeInfo, DatanodeInfo>() { @Override public DatanodeInfo load(DatanodeInfo key) throws Exception { return key; } }); private String[] favoredNodes; volatile boolean hasError = false; volatile int errorIndex = -1; // Restarting node index AtomicInteger restartingNodeIndex = new AtomicInteger(-1); private long restartDeadline = 0; // Deadline of DN restart private BlockConstructionStage stage; // block construction stage private long bytesSent = 0; // number of bytes that've been sent private final boolean isLazyPersistFile; /** Nodes have been used in the pipeline before and have failed. */ private final List<DatanodeInfo> failed = new ArrayList<DatanodeInfo>(); /** The last ack sequence number before pipeline failure. */ private long lastAckedSeqnoBeforeFailure = -1; private int pipelineRecoveryCount = 0; /** Has the current block been hflushed? */ private boolean isHflushed = false; /** Append on an existing block? */ private final boolean isAppend; private DataStreamer(HdfsFileStatus stat, ExtendedBlock block) { isAppend = false; isLazyPersistFile = isLazyPersist(stat); this.block = block; stage = BlockConstructionStage.PIPELINE_SETUP_CREATE; } /** * Construct a data streamer for appending to the last partial block * @param lastBlock last block of the file to be appended * @param stat status of the file to be appended * @param bytesPerChecksum number of bytes per checksum * @throws IOException if error occurs */ private DataStreamer(LocatedBlock lastBlock, HdfsFileStatus stat, int bytesPerChecksum) throws IOException { isAppend = true; stage = BlockConstructionStage.PIPELINE_SETUP_APPEND; block = lastBlock.getBlock(); bytesSent = block.getNumBytes(); accessToken = lastBlock.getBlockToken(); isLazyPersistFile = isLazyPersist(stat); long usedInLastBlock = stat.getLen() % blockSize; int freeInLastBlock = (int)(blockSize - usedInLastBlock); // calculate the amount of free space in the pre-existing // last crc chunk int usedInCksum = (int)(stat.getLen() % bytesPerChecksum); int freeInCksum = bytesPerChecksum - usedInCksum; // if there is space in the last block, then we have to // append to that block if (freeInLastBlock == blockSize) { throw new IOException("The last block for file " + src + " is full."); } if (usedInCksum > 0 && freeInCksum > 0) { // if there is space in the last partial chunk, then // setup in such a way that the next packet will have only // one chunk that fills up the partial chunk. // computePacketChunkSize(0, freeInCksum); setChecksumBufSize(freeInCksum); appendChunk = true; } else { // if the remaining space in the block is smaller than // that expected size of of a packet, then create // smaller size packet. // computePacketChunkSize(Math.min(dfsClient.getConf().writePacketSize, freeInLastBlock), bytesPerChecksum); } // setup pipeline to append to the last block XXX retries?? setPipeline(lastBlock); errorIndex = -1; // no errors yet. if (nodes.length < 1) { throw new IOException("Unable to retrieve blocks locations " + " for last block " + block + "of file " + src); } } private void setPipeline(LocatedBlock lb) { setPipeline(lb.getLocations(), lb.getStorageTypes(), lb.getStorageIDs()); } private void setPipeline(DatanodeInfo[] nodes, StorageType[] storageTypes, String[] storageIDs) { this.nodes = nodes; this.storageTypes = storageTypes; this.storageIDs = storageIDs; } private void setFavoredNodes(String[] favoredNodes) { this.favoredNodes = favoredNodes; } /** * Initialize for data streaming */ private void initDataStreaming() { this.setName("DataStreamer for file " + src + " block " + block); response = new ResponseProcessor(nodes); response.start(); stage = BlockConstructionStage.DATA_STREAMING; } private void endBlock() { if(DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Closing old block " + block); } this.setName("DataStreamer for file " + src); closeResponder(); closeStream(); setPipeline(null, null, null); stage = BlockConstructionStage.PIPELINE_SETUP_CREATE; } /* * streamer thread is the only thread that opens streams to datanode, * and closes them. Any error recovery is also done by this thread. */ @Override public void run() { long lastPacket = Time.monotonicNow(); TraceScope scope = NullScope.INSTANCE; while (!streamerClosed && dfsClient.clientRunning) { // if the Responder encountered an error, shutdown Responder if (hasError && response != null) { try { response.close(); response.join(); response = null; } catch (InterruptedException e) { DFSClient.LOG.warn("Caught exception ", e); } } DFSPacket one; try { // process datanode IO errors if any boolean doSleep = false; if (hasError && (errorIndex >= 0 || restartingNodeIndex.get() >= 0)) { doSleep = processDatanodeError(); } synchronized (dataQueue) { // wait for a packet to be sent. long now = Time.monotonicNow(); while ((!streamerClosed && !hasError && dfsClient.clientRunning && dataQueue.size() == 0 && (stage != BlockConstructionStage.DATA_STREAMING || stage == BlockConstructionStage.DATA_STREAMING && now - lastPacket < dfsClient.getConf().socketTimeout/2)) || doSleep ) { long timeout = dfsClient.getConf().socketTimeout/2 - (now-lastPacket); timeout = timeout <= 0 ? 1000 : timeout; timeout = (stage == BlockConstructionStage.DATA_STREAMING)? timeout : 1000; try { dataQueue.wait(timeout); } catch (InterruptedException e) { DFSClient.LOG.warn("Caught exception ", e); } doSleep = false; now = Time.monotonicNow(); } if (streamerClosed || hasError || !dfsClient.clientRunning) { continue; } // get packet to be sent. if (dataQueue.isEmpty()) { one = createHeartbeatPacket(); assert one != null; } else { one = dataQueue.getFirst(); // regular data packet long parents[] = one.getTraceParents(); if (parents.length > 0) { scope = Trace.startSpan("dataStreamer", new TraceInfo(0, parents[0])); // TODO: use setParents API once it's available from HTrace 3.2 // scope = Trace.startSpan("dataStreamer", Sampler.ALWAYS); // scope.getSpan().setParents(parents); } } } // get new block from namenode. if (stage == BlockConstructionStage.PIPELINE_SETUP_CREATE) { if(DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Allocating new block"); } setPipeline(nextBlockOutputStream()); initDataStreaming(); } else if (stage == BlockConstructionStage.PIPELINE_SETUP_APPEND) { if(DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Append to block " + block); } setupPipelineForAppendOrRecovery(); initDataStreaming(); } long lastByteOffsetInBlock = one.getLastByteOffsetBlock(); if (lastByteOffsetInBlock > blockSize) { throw new IOException("BlockSize " + blockSize + " is smaller than data size. " + " Offset of packet in block " + lastByteOffsetInBlock + " Aborting file " + src); } if (one.isLastPacketInBlock()) { // wait for all data packets have been successfully acked synchronized (dataQueue) { while (!streamerClosed && !hasError && ackQueue.size() != 0 && dfsClient.clientRunning) { try { // wait for acks to arrive from datanodes dataQueue.wait(1000); } catch (InterruptedException e) { DFSClient.LOG.warn("Caught exception ", e); } } } if (streamerClosed || hasError || !dfsClient.clientRunning) { continue; } stage = BlockConstructionStage.PIPELINE_CLOSE; } // send the packet Span span = null; synchronized (dataQueue) { // move packet from dataQueue to ackQueue if (!one.isHeartbeatPacket()) { span = scope.detach(); one.setTraceSpan(span); dataQueue.removeFirst(); ackQueue.addLast(one); dataQueue.notifyAll(); } } if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("DataStreamer block " + block + " sending packet " + one); } // write out data to remote datanode TraceScope writeScope = Trace.startSpan("writeTo", span); try { one.writeTo(blockStream); blockStream.flush(); } catch (IOException e) { // HDFS-3398 treat primary DN is down since client is unable to // write to primary DN. If a failed or restarting node has already // been recorded by the responder, the following call will have no // effect. Pipeline recovery can handle only one node error at a // time. If the primary node fails again during the recovery, it // will be taken out then. tryMarkPrimaryDatanodeFailed(); throw e; } finally { writeScope.close(); } lastPacket = Time.monotonicNow(); // update bytesSent long tmpBytesSent = one.getLastByteOffsetBlock(); if (bytesSent < tmpBytesSent) { bytesSent = tmpBytesSent; } if (streamerClosed || hasError || !dfsClient.clientRunning) { continue; } // Is this block full? if (one.isLastPacketInBlock()) { // wait for the close packet has been acked synchronized (dataQueue) { while (!streamerClosed && !hasError && ackQueue.size() != 0 && dfsClient.clientRunning) { dataQueue.wait(1000);// wait for acks to arrive from datanodes } } if (streamerClosed || hasError || !dfsClient.clientRunning) { continue; } endBlock(); } if (progress != null) { progress.progress(); } // This is used by unit test to trigger race conditions. if (artificialSlowdown != 0 && dfsClient.clientRunning) { Thread.sleep(artificialSlowdown); } } catch (Throwable e) { // Log warning if there was a real error. if (restartingNodeIndex.get() == -1) { DFSClient.LOG.warn("DataStreamer Exception", e); } if (e instanceof IOException) { setLastException((IOException)e); } else { setLastException(new IOException("DataStreamer Exception: ",e)); } hasError = true; if (errorIndex == -1 && restartingNodeIndex.get() == -1) { // Not a datanode issue streamerClosed = true; } } finally { scope.close(); } } closeInternal(); } private void closeInternal() { closeResponder(); // close and join closeStream(); streamerClosed = true; setClosed(); synchronized (dataQueue) { dataQueue.notifyAll(); } } /* * close both streamer and DFSOutputStream, should be called only * by an external thread and only after all data to be sent has * been flushed to datanode. * * Interrupt this data streamer if force is true * * @param force if this data stream is forced to be closed */ void close(boolean force) { streamerClosed = true; synchronized (dataQueue) { dataQueue.notifyAll(); } if (force) { this.interrupt(); } } private void closeResponder() { if (response != null) { try { response.close(); response.join(); } catch (InterruptedException e) { DFSClient.LOG.warn("Caught exception ", e); } finally { response = null; } } } private void closeStream() { if (blockStream != null) { try { blockStream.close(); } catch (IOException e) { setLastException(e); } finally { blockStream = null; } } if (blockReplyStream != null) { try { blockReplyStream.close(); } catch (IOException e) { setLastException(e); } finally { blockReplyStream = null; } } if (null != s) { try { s.close(); } catch (IOException e) { setLastException(e); } finally { s = null; } } } // The following synchronized methods are used whenever // errorIndex or restartingNodeIndex is set. This is because // check & set needs to be atomic. Simply reading variables // does not require a synchronization. When responder is // not running (e.g. during pipeline recovery), there is no // need to use these methods. /** Set the error node index. Called by responder */ synchronized void setErrorIndex(int idx) { errorIndex = idx; } /** Set the restarting node index. Called by responder */ synchronized void setRestartingNodeIndex(int idx) { restartingNodeIndex.set(idx); // If the data streamer has already set the primary node // bad, clear it. It is likely that the write failed due to // the DN shutdown. Even if it was a real failure, the pipeline // recovery will take care of it. errorIndex = -1; } /** * This method is used when no explicit error report was received, * but something failed. When the primary node is a suspect or * unsure about the cause, the primary node is marked as failed. */ synchronized void tryMarkPrimaryDatanodeFailed() { // There should be no existing error and no ongoing restart. if ((errorIndex == -1) && (restartingNodeIndex.get() == -1)) { errorIndex = 0; } } /** * Examine whether it is worth waiting for a node to restart. * @param index the node index */ boolean shouldWaitForRestart(int index) { // Only one node in the pipeline. if (nodes.length == 1) { return true; } // Is it a local node? InetAddress addr = null; try { addr = InetAddress.getByName(nodes[index].getIpAddr()); } catch (java.net.UnknownHostException e) { // we are passing an ip address. this should not happen. assert false; } if (addr != null && NetUtils.isLocalAddress(addr)) { return true; } return false; } // // Processes responses from the datanodes. A packet is removed // from the ackQueue when its response arrives. // private class ResponseProcessor extends Daemon { private volatile boolean responderClosed = false; private DatanodeInfo[] targets = null; private boolean isLastPacketInBlock = false; ResponseProcessor (DatanodeInfo[] targets) { this.targets = targets; } @Override public void run() { setName("ResponseProcessor for block " + block); PipelineAck ack = new PipelineAck(); TraceScope scope = NullScope.INSTANCE; while (!responderClosed && dfsClient.clientRunning && !isLastPacketInBlock) { // process responses from datanodes. try { // read an ack from the pipeline long begin = Time.monotonicNow(); ack.readFields(blockReplyStream); long duration = Time.monotonicNow() - begin; if (duration > dfsclientSlowLogThresholdMs && ack.getSeqno() != DFSPacket.HEART_BEAT_SEQNO) { DFSClient.LOG .warn("Slow ReadProcessor read fields took " + duration + "ms (threshold=" + dfsclientSlowLogThresholdMs + "ms); ack: " + ack + ", targets: " + Arrays.asList(targets)); } else if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("DFSClient " + ack); } long seqno = ack.getSeqno(); // processes response status from datanodes. for (int i = ack.getNumOfReplies()-1; i >=0 && dfsClient.clientRunning; i--) { final Status reply = PipelineAck.getStatusFromHeader(ack .getHeaderFlag(i)); // Restart will not be treated differently unless it is // the local node or the only one in the pipeline. if (PipelineAck.isRestartOOBStatus(reply) && shouldWaitForRestart(i)) { restartDeadline = dfsClient.getConf().datanodeRestartTimeout + Time.monotonicNow(); setRestartingNodeIndex(i); String message = "A datanode is restarting: " + targets[i]; DFSClient.LOG.info(message); throw new IOException(message); } // node error if (reply != SUCCESS) { setErrorIndex(i); // first bad datanode throw new IOException("Bad response " + reply + " for block " + block + " from datanode " + targets[i]); } } assert seqno != PipelineAck.UNKOWN_SEQNO : "Ack for unknown seqno should be a failed ack: " + ack; if (seqno == DFSPacket.HEART_BEAT_SEQNO) { // a heartbeat ack continue; } // a success ack for a data packet DFSPacket one; synchronized (dataQueue) { one = ackQueue.getFirst(); } if (one.getSeqno() != seqno) { throw new IOException("ResponseProcessor: Expecting seqno " + " for block " + block + one.getSeqno() + " but received " + seqno); } isLastPacketInBlock = one.isLastPacketInBlock(); // Fail the packet write for testing in order to force a // pipeline recovery. if (DFSClientFaultInjector.get().failPacket() && isLastPacketInBlock) { failPacket = true; throw new IOException( "Failing the last packet for testing."); } // update bytesAcked block.setNumBytes(one.getLastByteOffsetBlock()); synchronized (dataQueue) { scope = Trace.continueSpan(one.getTraceSpan()); one.setTraceSpan(null); lastAckedSeqno = seqno; ackQueue.removeFirst(); dataQueue.notifyAll(); one.releaseBuffer(byteArrayManager); } } catch (Exception e) { if (!responderClosed) { if (e instanceof IOException) { setLastException((IOException)e); } hasError = true; // If no explicit error report was received, mark the primary // node as failed. tryMarkPrimaryDatanodeFailed(); synchronized (dataQueue) { dataQueue.notifyAll(); } if (restartingNodeIndex.get() == -1) { DFSClient.LOG.warn("DFSOutputStream ResponseProcessor exception " + " for block " + block, e); } responderClosed = true; } } finally { scope.close(); } } } void close() { responderClosed = true; this.interrupt(); } } // If this stream has encountered any errors so far, shutdown // threads and mark stream as closed. Returns true if we should // sleep for a while after returning from this call. // private boolean processDatanodeError() throws IOException { if (response != null) { DFSClient.LOG.info("Error Recovery for " + block + " waiting for responder to exit. "); return true; } closeStream(); // move packets from ack queue to front of the data queue synchronized (dataQueue) { dataQueue.addAll(0, ackQueue); ackQueue.clear(); } // Record the new pipeline failure recovery. if (lastAckedSeqnoBeforeFailure != lastAckedSeqno) { lastAckedSeqnoBeforeFailure = lastAckedSeqno; pipelineRecoveryCount = 1; } else { // If we had to recover the pipeline five times in a row for the // same packet, this client likely has corrupt data or corrupting // during transmission. if (++pipelineRecoveryCount > 5) { DFSClient.LOG.warn("Error recovering pipeline for writing " + block + ". Already retried 5 times for the same packet."); lastException.set(new IOException("Failing write. Tried pipeline " + "recovery 5 times without success.")); streamerClosed = true; return false; } } boolean doSleep = setupPipelineForAppendOrRecovery(); if (!streamerClosed && dfsClient.clientRunning) { if (stage == BlockConstructionStage.PIPELINE_CLOSE) { // If we had an error while closing the pipeline, we go through a fast-path // where the BlockReceiver does not run. Instead, the DataNode just finalizes // the block immediately during the 'connect ack' process. So, we want to pull // the end-of-block packet from the dataQueue, since we don't actually have // a true pipeline to send it over. // // We also need to set lastAckedSeqno to the end-of-block Packet's seqno, so that // a client waiting on close() will be aware that the flush finished. synchronized (dataQueue) { DFSPacket endOfBlockPacket = dataQueue.remove(); // remove the end of block packet Span span = endOfBlockPacket.getTraceSpan(); if (span != null) { // Close any trace span associated with this Packet TraceScope scope = Trace.continueSpan(span); scope.close(); } assert endOfBlockPacket.isLastPacketInBlock(); assert lastAckedSeqno == endOfBlockPacket.getSeqno() - 1; lastAckedSeqno = endOfBlockPacket.getSeqno(); dataQueue.notifyAll(); } endBlock(); } else { initDataStreaming(); } } return doSleep; } private void setHflush() { isHflushed = true; } private int findNewDatanode(final DatanodeInfo[] original ) throws IOException { if (nodes.length != original.length + 1) { throw new IOException( new StringBuilder() .append("Failed to replace a bad datanode on the existing pipeline ") .append("due to no more good datanodes being available to try. ") .append("(Nodes: current=").append(Arrays.asList(nodes)) .append(", original=").append(Arrays.asList(original)).append("). ") .append("The current failed datanode replacement policy is ") .append(dfsClient.dtpReplaceDatanodeOnFailure).append(", and ") .append("a client may configure this via '") .append(DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY) .append("' in its configuration.") .toString()); } for(int i = 0; i < nodes.length; i++) { int j = 0; for(; j < original.length && !nodes[i].equals(original[j]); j++); if (j == original.length) { return i; } } throw new IOException("Failed: new datanode not found: nodes=" + Arrays.asList(nodes) + ", original=" + Arrays.asList(original)); } private void addDatanode2ExistingPipeline() throws IOException { if (DataTransferProtocol.LOG.isDebugEnabled()) { DataTransferProtocol.LOG.debug("lastAckedSeqno = " + lastAckedSeqno); } /* * Is data transfer necessary? We have the following cases. * * Case 1: Failure in Pipeline Setup * - Append * + Transfer the stored replica, which may be a RBW or a finalized. * - Create * + If no data, then no transfer is required. * + If there are data written, transfer RBW. This case may happens * when there are streaming failure earlier in this pipeline. * * Case 2: Failure in Streaming * - Append/Create: * + transfer RBW * * Case 3: Failure in Close * - Append/Create: * + no transfer, let NameNode replicates the block. */ if (!isAppend && lastAckedSeqno < 0 && stage == BlockConstructionStage.PIPELINE_SETUP_CREATE) { //no data have been written return; } else if (stage == BlockConstructionStage.PIPELINE_CLOSE || stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) { //pipeline is closing return; } //get a new datanode final DatanodeInfo[] original = nodes; final LocatedBlock lb = dfsClient.namenode.getAdditionalDatanode( src, fileId, block, nodes, storageIDs, failed.toArray(new DatanodeInfo[failed.size()]), 1, dfsClient.clientName); setPipeline(lb); //find the new datanode final int d = findNewDatanode(original); //transfer replica final DatanodeInfo src = d == 0? nodes[1]: nodes[d - 1]; final DatanodeInfo[] targets = {nodes[d]}; final StorageType[] targetStorageTypes = {storageTypes[d]}; transfer(src, targets, targetStorageTypes, lb.getBlockToken()); } private void transfer(final DatanodeInfo src, final DatanodeInfo[] targets, final StorageType[] targetStorageTypes, final Token<BlockTokenIdentifier> blockToken) throws IOException { //transfer replica to the new datanode Socket sock = null; DataOutputStream out = null; DataInputStream in = null; try { sock = createSocketForPipeline(src, 2, dfsClient); final long writeTimeout = dfsClient.getDatanodeWriteTimeout(2); OutputStream unbufOut = NetUtils.getOutputStream(sock, writeTimeout); InputStream unbufIn = NetUtils.getInputStream(sock); IOStreamPair saslStreams = dfsClient.saslClient.socketSend(sock, unbufOut, unbufIn, dfsClient, blockToken, src); unbufOut = saslStreams.out; unbufIn = saslStreams.in; out = new DataOutputStream(new BufferedOutputStream(unbufOut, HdfsConstants.SMALL_BUFFER_SIZE)); in = new DataInputStream(unbufIn); //send the TRANSFER_BLOCK request new Sender(out).transferBlock(block, blockToken, dfsClient.clientName, targets, targetStorageTypes); out.flush(); //ack BlockOpResponseProto response = BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in)); if (SUCCESS != response.getStatus()) { throw new IOException("Failed to add a datanode"); } } finally { IOUtils.closeStream(in); IOUtils.closeStream(out); IOUtils.closeSocket(sock); } } /** * Open a DataOutputStream to a DataNode pipeline so that * it can be written to. * This happens when a file is appended or data streaming fails * It keeps on trying until a pipeline is setup */ private boolean setupPipelineForAppendOrRecovery() throws IOException { // check number of datanodes if (nodes == null || nodes.length == 0) { String msg = "Could not get block locations. " + "Source file \"" + src + "\" - Aborting..."; DFSClient.LOG.warn(msg); setLastException(new IOException(msg)); streamerClosed = true; return false; } boolean success = false; long newGS = 0L; while (!success && !streamerClosed && dfsClient.clientRunning) { // Sleep before reconnect if a dn is restarting. // This process will be repeated until the deadline or the datanode // starts back up. if (restartingNodeIndex.get() >= 0) { // 4 seconds or the configured deadline period, whichever is shorter. // This is the retry interval and recovery will be retried in this // interval until timeout or success. long delay = Math.min(dfsClient.getConf().datanodeRestartTimeout, 4000L); try { Thread.sleep(delay); } catch (InterruptedException ie) { lastException.set(new IOException("Interrupted while waiting for " + "datanode to restart. " + nodes[restartingNodeIndex.get()])); streamerClosed = true; return false; } } boolean isRecovery = hasError; // remove bad datanode from list of datanodes. // If errorIndex was not set (i.e. appends), then do not remove // any datanodes // if (errorIndex >= 0) { StringBuilder pipelineMsg = new StringBuilder(); for (int j = 0; j < nodes.length; j++) { pipelineMsg.append(nodes[j]); if (j < nodes.length - 1) { pipelineMsg.append(", "); } } if (nodes.length <= 1) { lastException.set(new IOException("All datanodes " + pipelineMsg + " are bad. Aborting...")); streamerClosed = true; return false; } DFSClient.LOG.warn("Error Recovery for block " + block + " in pipeline " + pipelineMsg + ": bad datanode " + nodes[errorIndex]); failed.add(nodes[errorIndex]); DatanodeInfo[] newnodes = new DatanodeInfo[nodes.length-1]; arraycopy(nodes, newnodes, errorIndex); final StorageType[] newStorageTypes = new StorageType[newnodes.length]; arraycopy(storageTypes, newStorageTypes, errorIndex); final String[] newStorageIDs = new String[newnodes.length]; arraycopy(storageIDs, newStorageIDs, errorIndex); setPipeline(newnodes, newStorageTypes, newStorageIDs); // Just took care of a node error while waiting for a node restart if (restartingNodeIndex.get() >= 0) { // If the error came from a node further away than the restarting // node, the restart must have been complete. if (errorIndex > restartingNodeIndex.get()) { restartingNodeIndex.set(-1); } else if (errorIndex < restartingNodeIndex.get()) { // the node index has shifted. restartingNodeIndex.decrementAndGet(); } else { // this shouldn't happen... assert false; } } if (restartingNodeIndex.get() == -1) { hasError = false; } lastException.set(null); errorIndex = -1; } // Check if replace-datanode policy is satisfied. if (dfsClient.dtpReplaceDatanodeOnFailure.satisfy(blockReplication, nodes, isAppend, isHflushed)) { try { addDatanode2ExistingPipeline(); } catch(IOException ioe) { if (!dfsClient.dtpReplaceDatanodeOnFailure.isBestEffort()) { throw ioe; } DFSClient.LOG.warn("Failed to replace datanode." + " Continue with the remaining datanodes since " + DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_BEST_EFFORT_KEY + " is set to true.", ioe); } } // get a new generation stamp and an access token LocatedBlock lb = dfsClient.namenode.updateBlockForPipeline(block, dfsClient.clientName); newGS = lb.getBlock().getGenerationStamp(); accessToken = lb.getBlockToken(); // set up the pipeline again with the remaining nodes if (failPacket) { // for testing success = createBlockOutputStream(nodes, storageTypes, newGS, isRecovery); failPacket = false; try { // Give DNs time to send in bad reports. In real situations, // good reports should follow bad ones, if client committed // with those nodes. Thread.sleep(2000); } catch (InterruptedException ie) {} } else { success = createBlockOutputStream(nodes, storageTypes, newGS, isRecovery); } if (restartingNodeIndex.get() >= 0) { assert hasError == true; // check errorIndex set above if (errorIndex == restartingNodeIndex.get()) { // ignore, if came from the restarting node errorIndex = -1; } // still within the deadline if (Time.monotonicNow() < restartDeadline) { continue; // with in the deadline } // expired. declare the restarting node dead restartDeadline = 0; int expiredNodeIndex = restartingNodeIndex.get(); restartingNodeIndex.set(-1); DFSClient.LOG.warn("Datanode did not restart in time: " + nodes[expiredNodeIndex]); // Mark the restarting node as failed. If there is any other failed // node during the last pipeline construction attempt, it will not be // overwritten/dropped. In this case, the restarting node will get // excluded in the following attempt, if it still does not come up. if (errorIndex == -1) { errorIndex = expiredNodeIndex; } // From this point on, normal pipeline recovery applies. } } // while if (success) { // update pipeline at the namenode ExtendedBlock newBlock = new ExtendedBlock( block.getBlockPoolId(), block.getBlockId(), block.getNumBytes(), newGS); dfsClient.namenode.updatePipeline(dfsClient.clientName, block, newBlock, nodes, storageIDs); // update client side generation stamp block = newBlock; } return false; // do not sleep, continue processing } /** * Open a DataOutputStream to a DataNode so that it can be written to. * This happens when a file is created and each time a new block is allocated. * Must get block ID and the IDs of the destinations from the namenode. * Returns the list of target datanodes. */ private LocatedBlock nextBlockOutputStream() throws IOException { LocatedBlock lb = null; DatanodeInfo[] nodes = null; StorageType[] storageTypes = null; int count = dfsClient.getConf().nBlockWriteRetry; boolean success = false; ExtendedBlock oldBlock = block; do { hasError = false; lastException.set(null); errorIndex = -1; success = false; DatanodeInfo[] excluded = excludedNodes.getAllPresent(excludedNodes.asMap().keySet()) .keySet() .toArray(new DatanodeInfo[0]); block = oldBlock; lb = locateFollowingBlock(excluded.length > 0 ? excluded : null); block = lb.getBlock(); block.setNumBytes(0); bytesSent = 0; accessToken = lb.getBlockToken(); nodes = lb.getLocations(); storageTypes = lb.getStorageTypes(); // // Connect to first DataNode in the list. // success = createBlockOutputStream(nodes, storageTypes, 0L, false); if (!success) { DFSClient.LOG.info("Abandoning " + block); dfsClient.namenode.abandonBlock(block, fileId, src, dfsClient.clientName); block = null; DFSClient.LOG.info("Excluding datanode " + nodes[errorIndex]); excludedNodes.put(nodes[errorIndex], nodes[errorIndex]); } } while (!success && --count >= 0); if (!success) { throw new IOException("Unable to create new block."); } return lb; } // connects to the first datanode in the pipeline // Returns true if success, otherwise return failure. // private boolean createBlockOutputStream(DatanodeInfo[] nodes, StorageType[] nodeStorageTypes, long newGS, boolean recoveryFlag) { if (nodes.length == 0) { DFSClient.LOG.info("nodes are empty for write pipeline of block " + block); return false; } Status pipelineStatus = SUCCESS; String firstBadLink = ""; boolean checkRestart = false; if (DFSClient.LOG.isDebugEnabled()) { for (int i = 0; i < nodes.length; i++) { DFSClient.LOG.debug("pipeline = " + nodes[i]); } } // persist blocks on namenode on next flush persistBlocks.set(true); int refetchEncryptionKey = 1; while (true) { boolean result = false; DataOutputStream out = null; try { assert null == s : "Previous socket unclosed"; assert null == blockReplyStream : "Previous blockReplyStream unclosed"; s = createSocketForPipeline(nodes[0], nodes.length, dfsClient); long writeTimeout = dfsClient.getDatanodeWriteTimeout(nodes.length); OutputStream unbufOut = NetUtils.getOutputStream(s, writeTimeout); InputStream unbufIn = NetUtils.getInputStream(s); IOStreamPair saslStreams = dfsClient.saslClient.socketSend(s, unbufOut, unbufIn, dfsClient, accessToken, nodes[0]); unbufOut = saslStreams.out; unbufIn = saslStreams.in; out = new DataOutputStream(new BufferedOutputStream(unbufOut, HdfsConstants.SMALL_BUFFER_SIZE)); blockReplyStream = new DataInputStream(unbufIn); // // Xmit header info to datanode // BlockConstructionStage bcs = recoveryFlag? stage.getRecoveryStage(): stage; // We cannot change the block length in 'block' as it counts the number // of bytes ack'ed. ExtendedBlock blockCopy = new ExtendedBlock(block); blockCopy.setNumBytes(blockSize); boolean[] targetPinnings = getPinnings(nodes, true); // send the request new Sender(out).writeBlock(blockCopy, nodeStorageTypes[0], accessToken, dfsClient.clientName, nodes, nodeStorageTypes, null, bcs, nodes.length, block.getNumBytes(), bytesSent, newGS, checksum4WriteBlock, cachingStrategy.get(), isLazyPersistFile, (targetPinnings == null ? false : targetPinnings[0]), targetPinnings); // receive ack for connect BlockOpResponseProto resp = BlockOpResponseProto.parseFrom( PBHelper.vintPrefixed(blockReplyStream)); pipelineStatus = resp.getStatus(); firstBadLink = resp.getFirstBadLink(); // Got an restart OOB ack. // If a node is already restarting, this status is not likely from // the same node. If it is from a different node, it is not // from the local datanode. Thus it is safe to treat this as a // regular node error. if (PipelineAck.isRestartOOBStatus(pipelineStatus) && restartingNodeIndex.get() == -1) { checkRestart = true; throw new IOException("A datanode is restarting."); } String logInfo = "ack with firstBadLink as " + firstBadLink; DataTransferProtoUtil.checkBlockOpStatus(resp, logInfo); assert null == blockStream : "Previous blockStream unclosed"; blockStream = out; result = true; // success restartingNodeIndex.set(-1); hasError = false; } catch (IOException ie) { if (restartingNodeIndex.get() == -1) { DFSClient.LOG.info("Exception in createBlockOutputStream", ie); } if (ie instanceof InvalidEncryptionKeyException && refetchEncryptionKey > 0) { DFSClient.LOG.info("Will fetch a new encryption key and retry, " + "encryption key was invalid when connecting to " + nodes[0] + " : " + ie); // The encryption key used is invalid. refetchEncryptionKey--; dfsClient.clearDataEncryptionKey(); // Don't close the socket/exclude this node just yet. Try again with // a new encryption key. continue; } // find the datanode that matches if (firstBadLink.length() != 0) { for (int i = 0; i < nodes.length; i++) { // NB: Unconditionally using the xfer addr w/o hostname if (firstBadLink.equals(nodes[i].getXferAddr())) { errorIndex = i; break; } } } else { assert checkRestart == false; errorIndex = 0; } // Check whether there is a restart worth waiting for. if (checkRestart && shouldWaitForRestart(errorIndex)) { restartDeadline = dfsClient.getConf().datanodeRestartTimeout + Time.monotonicNow(); restartingNodeIndex.set(errorIndex); errorIndex = -1; DFSClient.LOG.info("Waiting for the datanode to be restarted: " + nodes[restartingNodeIndex.get()]); } hasError = true; setLastException(ie); result = false; // error } finally { if (!result) { IOUtils.closeSocket(s); s = null; IOUtils.closeStream(out); out = null; IOUtils.closeStream(blockReplyStream); blockReplyStream = null; } } return result; } } private boolean[] getPinnings(DatanodeInfo[] nodes, boolean shouldLog) { if (favoredNodes == null) { return null; } else { boolean[] pinnings = new boolean[nodes.length]; HashSet<String> favoredSet = new HashSet<String>(Arrays.asList(favoredNodes)); for (int i = 0; i < nodes.length; i++) { pinnings[i] = favoredSet.remove(nodes[i].getXferAddrWithHostname()); if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug(nodes[i].getXferAddrWithHostname() + " was chosen by name node (favored=" + pinnings[i] + ")."); } } if (shouldLog && !favoredSet.isEmpty()) { // There is one or more favored nodes that were not allocated. DFSClient.LOG.warn( "These favored nodes were specified but not chosen: " + favoredSet + " Specified favored nodes: " + Arrays.toString(favoredNodes)); } return pinnings; } } private LocatedBlock locateFollowingBlock(DatanodeInfo[] excludedNodes) throws IOException { int retries = dfsClient.getConf().nBlockWriteLocateFollowingRetry; long sleeptime = 400; while (true) { long localstart = Time.monotonicNow(); while (true) { try { return dfsClient.namenode.addBlock(src, dfsClient.clientName, block, excludedNodes, fileId, favoredNodes); } catch (RemoteException e) { IOException ue = e.unwrapRemoteException(FileNotFoundException.class, AccessControlException.class, NSQuotaExceededException.class, DSQuotaExceededException.class, UnresolvedPathException.class); if (ue != e) { throw ue; // no need to retry these exceptions } if (NotReplicatedYetException.class.getName(). equals(e.getClassName())) { if (retries == 0) { throw e; } else { --retries; DFSClient.LOG.info("Exception while adding a block", e); long elapsed = Time.monotonicNow() - localstart; if (elapsed > 5000) { DFSClient.LOG.info("Waiting for replication for " + (elapsed / 1000) + " seconds"); } try { DFSClient.LOG.warn("NotReplicatedYetException sleeping " + src + " retries left " + retries); Thread.sleep(sleeptime); sleeptime *= 2; } catch (InterruptedException ie) { DFSClient.LOG.warn("Caught exception ", ie); } } } else { throw e; } } } } } ExtendedBlock getBlock() { return block; } DatanodeInfo[] getNodes() { return nodes; } Token<BlockTokenIdentifier> getBlockToken() { return accessToken; } private void setLastException(IOException e) { lastException.compareAndSet(null, e); } } /** * Create a socket for a write pipeline * @param first the first datanode * @param length the pipeline length * @param client client * @return the socket connected to the first datanode */ static Socket createSocketForPipeline(final DatanodeInfo first, final int length, final DFSClient client) throws IOException { final String dnAddr = first.getXferAddr( client.getConf().connectToDnViaHostname); if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Connecting to datanode " + dnAddr); } final InetSocketAddress isa = NetUtils.createSocketAddr(dnAddr); final Socket sock = client.socketFactory.createSocket(); final int timeout = client.getDatanodeReadTimeout(length); NetUtils.connect(sock, isa, client.getRandomLocalInterfaceAddr(), client.getConf().socketTimeout); sock.setSoTimeout(timeout); sock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE); if(DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Send buf size " + sock.getSendBufferSize()); } return sock; } @Override protected void checkClosed() throws IOException { if (isClosed()) { IOException e = lastException.get(); throw e != null ? e : new ClosedChannelException(); } } // // returns the list of targets, if any, that is being currently used. // @VisibleForTesting public synchronized DatanodeInfo[] getPipeline() { if (streamer == null) { return null; } DatanodeInfo[] currentNodes = streamer.getNodes(); if (currentNodes == null) { return null; } DatanodeInfo[] value = new DatanodeInfo[currentNodes.length]; for (int i = 0; i < currentNodes.length; i++) { value[i] = currentNodes[i]; } return value; } /** * @return the object for computing checksum. * The type is NULL if checksum is not computed. */ private static DataChecksum getChecksum4Compute(DataChecksum checksum, HdfsFileStatus stat) { if (isLazyPersist(stat) && stat.getReplication() == 1) { // do not compute checksum for writing to single replica to memory return DataChecksum.newDataChecksum(Type.NULL, checksum.getBytesPerChecksum()); } return checksum; } private DFSOutputStream(DFSClient dfsClient, String src, Progressable progress, HdfsFileStatus stat, DataChecksum checksum) throws IOException { super(getChecksum4Compute(checksum, stat)); this.dfsClient = dfsClient; this.src = src; this.fileId = stat.getFileId(); this.blockSize = stat.getBlockSize(); this.blockReplication = stat.getReplication(); this.fileEncryptionInfo = stat.getFileEncryptionInfo(); this.progress = progress; this.cachingStrategy = new AtomicReference<CachingStrategy>( dfsClient.getDefaultWriteCachingStrategy()); if ((progress != null) && DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug( "Set non-null progress callback on DFSOutputStream " + src); } this.bytesPerChecksum = checksum.getBytesPerChecksum(); if (bytesPerChecksum <= 0) { throw new HadoopIllegalArgumentException( "Invalid value: bytesPerChecksum = " + bytesPerChecksum + " <= 0"); } if (blockSize % bytesPerChecksum != 0) { throw new HadoopIllegalArgumentException("Invalid values: " + DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY + " (=" + bytesPerChecksum + ") must divide block size (=" + blockSize + ")."); } this.checksum4WriteBlock = checksum; this.dfsclientSlowLogThresholdMs = dfsClient.getConf().dfsclientSlowIoWarningThresholdMs; this.byteArrayManager = dfsClient.getClientContext().getByteArrayManager(); } /** Construct a new output stream for creating a file. */ private DFSOutputStream(DFSClient dfsClient, String src, HdfsFileStatus stat, EnumSet<CreateFlag> flag, Progressable progress, DataChecksum checksum, String[] favoredNodes) throws IOException { this(dfsClient, src, progress, stat, checksum); this.shouldSyncBlock = flag.contains(CreateFlag.SYNC_BLOCK); computePacketChunkSize(dfsClient.getConf().writePacketSize, bytesPerChecksum); streamer = new DataStreamer(stat, null); if (favoredNodes != null && favoredNodes.length != 0) { streamer.setFavoredNodes(favoredNodes); } } static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src, FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, Progressable progress, int buffersize, DataChecksum checksum, String[] favoredNodes) throws IOException { TraceScope scope = dfsClient.getPathTraceScope("newStreamForCreate", src); try { HdfsFileStatus stat = null; // Retry the create if we get a RetryStartFileException up to a maximum // number of times boolean shouldRetry = true; int retryCount = CREATE_RETRY_COUNT; while (shouldRetry) { shouldRetry = false; try { stat = dfsClient.namenode.create(src, masked, dfsClient.clientName, new EnumSetWritable<CreateFlag>(flag), createParent, replication, blockSize, SUPPORTED_CRYPTO_VERSIONS); break; } catch (RemoteException re) { IOException e = re.unwrapRemoteException( AccessControlException.class, DSQuotaExceededException.class, FileAlreadyExistsException.class, FileNotFoundException.class, ParentNotDirectoryException.class, NSQuotaExceededException.class, RetryStartFileException.class, SafeModeException.class, UnresolvedPathException.class, SnapshotAccessControlException.class, UnknownCryptoProtocolVersionException.class); if (e instanceof RetryStartFileException) { if (retryCount > 0) { shouldRetry = true; retryCount--; } else { throw new IOException("Too many retries because of encryption" + " zone operations", e); } } else { throw e; } } } Preconditions.checkNotNull(stat, "HdfsFileStatus should not be null!"); final DFSOutputStream out = new DFSOutputStream(dfsClient, src, stat, flag, progress, checksum, favoredNodes); out.start(); return out; } finally { scope.close(); } } /** Construct a new output stream for append. */ private DFSOutputStream(DFSClient dfsClient, String src, EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum) throws IOException { this(dfsClient, src, progress, stat, checksum); initialFileSize = stat.getLen(); // length of file when opened this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK); boolean toNewBlock = flags.contains(CreateFlag.NEW_BLOCK); // The last partial block of the file has to be filled. if (!toNewBlock && lastBlock != null) { // indicate that we are appending to an existing block bytesCurBlock = lastBlock.getBlockSize(); streamer = new DataStreamer(lastBlock, stat, bytesPerChecksum); } else { computePacketChunkSize(dfsClient.getConf().writePacketSize, bytesPerChecksum); streamer = new DataStreamer(stat, lastBlock != null ? lastBlock.getBlock() : null); } this.fileEncryptionInfo = stat.getFileEncryptionInfo(); } static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src, EnumSet<CreateFlag> flags, int bufferSize, Progressable progress, LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum, String[] favoredNodes) throws IOException { TraceScope scope = dfsClient.getPathTraceScope("newStreamForAppend", src); try { final DFSOutputStream out = new DFSOutputStream(dfsClient, src, flags, progress, lastBlock, stat, checksum); if (favoredNodes != null && favoredNodes.length != 0) { out.streamer.setFavoredNodes(favoredNodes); } out.start(); return out; } finally { scope.close(); } } private static boolean isLazyPersist(HdfsFileStatus stat) { final BlockStoragePolicy p = blockStoragePolicySuite.getPolicy( HdfsConstants.MEMORY_STORAGE_POLICY_NAME); return p != null && stat.getStoragePolicy() == p.getId(); } private void computePacketChunkSize(int psize, int csize) { final int bodySize = psize - PacketHeader.PKT_MAX_HEADER_LEN; final int chunkSize = csize + getChecksumSize(); chunksPerPacket = Math.max(bodySize/chunkSize, 1); packetSize = chunkSize*chunksPerPacket; if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("computePacketChunkSize: src=" + src + ", chunkSize=" + chunkSize + ", chunksPerPacket=" + chunksPerPacket + ", packetSize=" + packetSize); } } private void queueCurrentPacket() { synchronized (dataQueue) { if (currentPacket == null) return; currentPacket.addTraceParent(Trace.currentSpan()); dataQueue.addLast(currentPacket); lastQueuedSeqno = currentPacket.getSeqno(); if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Queued packet " + currentPacket.getSeqno()); } currentPacket = null; dataQueue.notifyAll(); } } private void waitAndQueueCurrentPacket() throws IOException { synchronized (dataQueue) { try { // If queue is full, then wait till we have enough space boolean firstWait = true; try { while (!isClosed() && dataQueue.size() + ackQueue.size() > dfsClient.getConf().writeMaxPackets) { if (firstWait) { Span span = Trace.currentSpan(); if (span != null) { span.addTimelineAnnotation("dataQueue.wait"); } firstWait = false; } try { dataQueue.wait(); } catch (InterruptedException e) { // If we get interrupted while waiting to queue data, we still need to get rid // of the current packet. This is because we have an invariant that if // currentPacket gets full, it will get queued before the next writeChunk. // // Rather than wait around for space in the queue, we should instead try to // return to the caller as soon as possible, even though we slightly overrun // the MAX_PACKETS length. Thread.currentThread().interrupt(); break; } } } finally { Span span = Trace.currentSpan(); if ((span != null) && (!firstWait)) { span.addTimelineAnnotation("end.wait"); } } checkClosed(); queueCurrentPacket(); } catch (ClosedChannelException e) { } } } // @see FSOutputSummer#writeChunk() @Override protected synchronized void writeChunk(byte[] b, int offset, int len, byte[] checksum, int ckoff, int cklen) throws IOException { TraceScope scope = dfsClient.getPathTraceScope("DFSOutputStream#writeChunk", src); try { writeChunkImpl(b, offset, len, checksum, ckoff, cklen); } finally { scope.close(); } } private synchronized void writeChunkImpl(byte[] b, int offset, int len, byte[] checksum, int ckoff, int cklen) throws IOException { dfsClient.checkOpen(); checkClosed(); if (len > bytesPerChecksum) { throw new IOException("writeChunk() buffer size is " + len + " is larger than supported bytesPerChecksum " + bytesPerChecksum); } if (cklen != 0 && cklen != getChecksumSize()) { throw new IOException("writeChunk() checksum size is supposed to be " + getChecksumSize() + " but found to be " + cklen); } if (currentPacket == null) { currentPacket = createPacket(packetSize, chunksPerPacket, bytesCurBlock, currentSeqno++, false); if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("DFSClient writeChunk allocating new packet seqno=" + currentPacket.getSeqno() + ", src=" + src + ", packetSize=" + packetSize + ", chunksPerPacket=" + chunksPerPacket + ", bytesCurBlock=" + bytesCurBlock); } } currentPacket.writeChecksum(checksum, ckoff, cklen); currentPacket.writeData(b, offset, len); currentPacket.incNumChunks(); bytesCurBlock += len; // If packet is full, enqueue it for transmission // if (currentPacket.getNumChunks() == currentPacket.getMaxChunks() || bytesCurBlock == blockSize) { if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("DFSClient writeChunk packet full seqno=" + currentPacket.getSeqno() + ", src=" + src + ", bytesCurBlock=" + bytesCurBlock + ", blockSize=" + blockSize + ", appendChunk=" + appendChunk); } waitAndQueueCurrentPacket(); // If the reopened file did not end at chunk boundary and the above // write filled up its partial chunk. Tell the summer to generate full // crc chunks from now on. if (appendChunk && bytesCurBlock%bytesPerChecksum == 0) { appendChunk = false; resetChecksumBufSize(); } if (!appendChunk) { int psize = Math.min((int)(blockSize-bytesCurBlock), dfsClient.getConf().writePacketSize); computePacketChunkSize(psize, bytesPerChecksum); } // // if encountering a block boundary, send an empty packet to // indicate the end of block and reset bytesCurBlock. // if (bytesCurBlock == blockSize) { currentPacket = createPacket(0, 0, bytesCurBlock, currentSeqno++, true); currentPacket.setSyncBlock(shouldSyncBlock); waitAndQueueCurrentPacket(); bytesCurBlock = 0; lastFlushOffset = 0; } } } @Deprecated public void sync() throws IOException { hflush(); } /** * Flushes out to all replicas of the block. The data is in the buffers * of the DNs but not necessarily in the DN's OS buffers. * * It is a synchronous operation. When it returns, * it guarantees that flushed data become visible to new readers. * It is not guaranteed that data has been flushed to * persistent store on the datanode. * Block allocations are persisted on namenode. */ @Override public void hflush() throws IOException { TraceScope scope = dfsClient.getPathTraceScope("hflush", src); try { flushOrSync(false, EnumSet.noneOf(SyncFlag.class)); } finally { scope.close(); } } @Override public void hsync() throws IOException { TraceScope scope = dfsClient.getPathTraceScope("hsync", src); try { flushOrSync(true, EnumSet.noneOf(SyncFlag.class)); } finally { scope.close(); } } /** * The expected semantics is all data have flushed out to all replicas * and all replicas have done posix fsync equivalent - ie the OS has * flushed it to the disk device (but the disk may have it in its cache). * * Note that only the current block is flushed to the disk device. * To guarantee durable sync across block boundaries the stream should * be created with {@link CreateFlag#SYNC_BLOCK}. * * @param syncFlags * Indicate the semantic of the sync. Currently used to specify * whether or not to update the block length in NameNode. */ public void hsync(EnumSet<SyncFlag> syncFlags) throws IOException { TraceScope scope = dfsClient.getPathTraceScope("hsync", src); try { flushOrSync(true, syncFlags); } finally { scope.close(); } } /** * Flush/Sync buffered data to DataNodes. * * @param isSync * Whether or not to require all replicas to flush data to the disk * device * @param syncFlags * Indicate extra detailed semantic of the flush/sync. Currently * mainly used to specify whether or not to update the file length in * the NameNode * @throws IOException */ private void flushOrSync(boolean isSync, EnumSet<SyncFlag> syncFlags) throws IOException { dfsClient.checkOpen(); checkClosed(); try { long toWaitFor; long lastBlockLength = -1L; boolean updateLength = syncFlags.contains(SyncFlag.UPDATE_LENGTH); boolean endBlock = syncFlags.contains(SyncFlag.END_BLOCK); synchronized (this) { // flush checksum buffer, but keep checksum buffer intact if we do not // need to end the current block int numKept = flushBuffer(!endBlock, true); // bytesCurBlock potentially incremented if there was buffered data if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("DFSClient flush():" + " bytesCurBlock=" + bytesCurBlock + " lastFlushOffset=" + lastFlushOffset + " createNewBlock=" + endBlock); } // Flush only if we haven't already flushed till this offset. if (lastFlushOffset != bytesCurBlock) { assert bytesCurBlock > lastFlushOffset; // record the valid offset of this flush lastFlushOffset = bytesCurBlock; if (isSync && currentPacket == null && !endBlock) { // Nothing to send right now, // but sync was requested. // Send an empty packet if we do not end the block right now currentPacket = createPacket(packetSize, chunksPerPacket, bytesCurBlock, currentSeqno++, false); } } else { if (isSync && bytesCurBlock > 0 && !endBlock) { // Nothing to send right now, // and the block was partially written, // and sync was requested. // So send an empty sync packet if we do not end the block right now currentPacket = createPacket(packetSize, chunksPerPacket, bytesCurBlock, currentSeqno++, false); } else if (currentPacket != null) { // just discard the current packet since it is already been sent. currentPacket.releaseBuffer(byteArrayManager); currentPacket = null; } } if (currentPacket != null) { currentPacket.setSyncBlock(isSync); waitAndQueueCurrentPacket(); } if (endBlock && bytesCurBlock > 0) { // Need to end the current block, thus send an empty packet to // indicate this is the end of the block and reset bytesCurBlock currentPacket = createPacket(0, 0, bytesCurBlock, currentSeqno++, true); currentPacket.setSyncBlock(shouldSyncBlock || isSync); waitAndQueueCurrentPacket(); bytesCurBlock = 0; lastFlushOffset = 0; } else { // Restore state of stream. Record the last flush offset // of the last full chunk that was flushed. bytesCurBlock -= numKept; } toWaitFor = lastQueuedSeqno; } // end synchronized waitForAckedSeqno(toWaitFor); // update the block length first time irrespective of flag if (updateLength || persistBlocks.get()) { synchronized (this) { if (streamer != null && streamer.block != null) { lastBlockLength = streamer.block.getNumBytes(); } } } // If 1) any new blocks were allocated since the last flush, or 2) to // update length in NN is required, then persist block locations on // namenode. if (persistBlocks.getAndSet(false) || updateLength) { try { dfsClient.namenode.fsync(src, fileId, dfsClient.clientName, lastBlockLength); } catch (IOException ioe) { DFSClient.LOG.warn("Unable to persist blocks in hflush for " + src, ioe); // If we got an error here, it might be because some other thread called // close before our hflush completed. In that case, we should throw an // exception that the stream is closed. checkClosed(); // If we aren't closed but failed to sync, we should expose that to the // caller. throw ioe; } } synchronized(this) { if (streamer != null) { streamer.setHflush(); } } } catch (InterruptedIOException interrupt) { // This kind of error doesn't mean that the stream itself is broken - just the // flushing thread got interrupted. So, we shouldn't close down the writer, // but instead just propagate the error throw interrupt; } catch (IOException e) { DFSClient.LOG.warn("Error while syncing", e); synchronized (this) { if (!isClosed()) { lastException.set(new IOException("IOException flush: " + e)); closeThreads(true); } } throw e; } } /** * @deprecated use {@link HdfsDataOutputStream#getCurrentBlockReplication()}. */ @Deprecated public synchronized int getNumCurrentReplicas() throws IOException { return getCurrentBlockReplication(); } /** * Note that this is not a public API; * use {@link HdfsDataOutputStream#getCurrentBlockReplication()} instead. * * @return the number of valid replicas of the current block */ public synchronized int getCurrentBlockReplication() throws IOException { dfsClient.checkOpen(); checkClosed(); if (streamer == null) { return blockReplication; // no pipeline, return repl factor of file } DatanodeInfo[] currentNodes = streamer.getNodes(); if (currentNodes == null) { return blockReplication; // no pipeline, return repl factor of file } return currentNodes.length; } /** * Waits till all existing data is flushed and confirmations * received from datanodes. */ private void flushInternal() throws IOException { long toWaitFor; synchronized (this) { dfsClient.checkOpen(); checkClosed(); // // If there is data in the current buffer, send it across // queueCurrentPacket(); toWaitFor = lastQueuedSeqno; } waitForAckedSeqno(toWaitFor); } private void waitForAckedSeqno(long seqno) throws IOException { TraceScope scope = Trace.startSpan("waitForAckedSeqno", Sampler.NEVER); try { if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Waiting for ack for: " + seqno); } long begin = Time.monotonicNow(); try { synchronized (dataQueue) { while (!isClosed()) { checkClosed(); if (lastAckedSeqno >= seqno) { break; } try { dataQueue.wait(1000); // when we receive an ack, we notify on // dataQueue } catch (InterruptedException ie) { throw new InterruptedIOException( "Interrupted while waiting for data to be acknowledged by pipeline"); } } } checkClosed(); } catch (ClosedChannelException e) { } long duration = Time.monotonicNow() - begin; if (duration > dfsclientSlowLogThresholdMs) { DFSClient.LOG.warn("Slow waitForAckedSeqno took " + duration + "ms (threshold=" + dfsclientSlowLogThresholdMs + "ms)"); } } finally { scope.close(); } } private synchronized void start() { streamer.start(); } /** * Aborts this output stream and releases any system * resources associated with this stream. */ synchronized void abort() throws IOException { if (isClosed()) { return; } streamer.setLastException(new IOException("Lease timeout of " + (dfsClient.getHdfsTimeout()/1000) + " seconds expired.")); closeThreads(true); dfsClient.endFileLease(fileId); } boolean isClosed() { return closed; } void setClosed() { closed = true; synchronized (dataQueue) { releaseBuffer(dataQueue, byteArrayManager); releaseBuffer(ackQueue, byteArrayManager); } } private static void releaseBuffer(List<DFSPacket> packets, ByteArrayManager bam) { for (DFSPacket p : packets) { p.releaseBuffer(bam); } packets.clear(); } // shutdown datastreamer and responseprocessor threads. // interrupt datastreamer if force is true private void closeThreads(boolean force) throws IOException { try { streamer.close(force); streamer.join(); if (s != null) { s.close(); } } catch (InterruptedException e) { throw new IOException("Failed to shutdown streamer"); } finally { streamer = null; s = null; setClosed(); } } /** * Closes this output stream and releases any system * resources associated with this stream. */ @Override public synchronized void close() throws IOException { TraceScope scope = dfsClient.getPathTraceScope("DFSOutputStream#close", src); try { closeImpl(); } finally { scope.close(); } } private synchronized void closeImpl() throws IOException { if (isClosed()) { IOException e = lastException.getAndSet(null); if (e == null) return; else throw e; } try { flushBuffer(); // flush from all upper layers if (currentPacket != null) { waitAndQueueCurrentPacket(); } if (bytesCurBlock != 0) { // send an empty packet to mark the end of the block currentPacket = createPacket(0, 0, bytesCurBlock, currentSeqno++, true); currentPacket.setSyncBlock(shouldSyncBlock); } flushInternal(); // flush all data to Datanodes // get last block before destroying the streamer ExtendedBlock lastBlock = streamer.getBlock(); closeThreads(false); TraceScope scope = Trace.startSpan("completeFile", Sampler.NEVER); try { completeFile(lastBlock); } finally { scope.close(); } dfsClient.endFileLease(fileId); } catch (ClosedChannelException e) { } finally { setClosed(); } } // should be called holding (this) lock since setTestFilename() may // be called during unit tests private void completeFile(ExtendedBlock last) throws IOException { long localstart = Time.monotonicNow(); long localTimeout = 400; boolean fileComplete = false; int retries = dfsClient.getConf().nBlockWriteLocateFollowingRetry; while (!fileComplete) { fileComplete = dfsClient.namenode.complete(src, dfsClient.clientName, last, fileId); if (!fileComplete) { final int hdfsTimeout = dfsClient.getHdfsTimeout(); if (!dfsClient.clientRunning || (hdfsTimeout > 0 && localstart + hdfsTimeout < Time.monotonicNow())) { String msg = "Unable to close file because dfsclient " + " was unable to contact the HDFS servers." + " clientRunning " + dfsClient.clientRunning + " hdfsTimeout " + hdfsTimeout; DFSClient.LOG.info(msg); throw new IOException(msg); } try { if (retries == 0) { throw new IOException("Unable to close file because the last block" + " does not have enough number of replicas."); } retries--; Thread.sleep(localTimeout); localTimeout *= 2; if (Time.monotonicNow() - localstart > 5000) { DFSClient.LOG.info("Could not complete " + src + " retrying..."); } } catch (InterruptedException ie) { DFSClient.LOG.warn("Caught exception ", ie); } } } } @VisibleForTesting public void setArtificialSlowdown(long period) { artificialSlowdown = period; } @VisibleForTesting public synchronized void setChunksPerPacket(int value) { chunksPerPacket = Math.min(chunksPerPacket, value); packetSize = (bytesPerChecksum + getChecksumSize()) * chunksPerPacket; } synchronized void setTestFilename(String newname) { src = newname; } /** * Returns the size of a file as it was when this stream was opened */ public long getInitialLen() { return initialFileSize; } /** * @return the FileEncryptionInfo for this stream, or null if not encrypted. */ public FileEncryptionInfo getFileEncryptionInfo() { return fileEncryptionInfo; } /** * Returns the access token currently used by streamer, for testing only */ synchronized Token<BlockTokenIdentifier> getBlockToken() { return streamer.getBlockToken(); } @Override public void setDropBehind(Boolean dropBehind) throws IOException { CachingStrategy prevStrategy, nextStrategy; // CachingStrategy is immutable. So build a new CachingStrategy with the // modifications we want, and compare-and-swap it in. do { prevStrategy = this.cachingStrategy.get(); nextStrategy = new CachingStrategy.Builder(prevStrategy). setDropBehind(dropBehind).build(); } while (!this.cachingStrategy.compareAndSet(prevStrategy, nextStrategy)); } @VisibleForTesting ExtendedBlock getBlock() { return streamer.getBlock(); } @VisibleForTesting public long getFileId() { return fileId; } private static <T> void arraycopy(T[] srcs, T[] dsts, int skipIndex) { System.arraycopy(srcs, 0, dsts, 0, skipIndex); System.arraycopy(srcs, skipIndex+1, dsts, skipIndex, dsts.length-skipIndex); } }
apache-2.0
Drifftr/devstudio-tooling-bps
plugins/org.eclipse.bpel.ui/src/org/eclipse/bpel/ui/details/providers/ModelViewerSorter.java
1630
/******************************************************************************* * Copyright (c) 2005, 2012 IBM Corporation and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * IBM Corporation - initial API and implementation *******************************************************************************/ package org.eclipse.bpel.ui.details.providers; import org.eclipse.jface.viewers.ContentViewer; import org.eclipse.jface.viewers.ILabelProvider; import org.eclipse.jface.viewers.Viewer; import org.eclipse.jface.viewers.ViewerSorter; /** * A sorter which alphabetically sorts model objects based on the labels provided for them * by the viewer's installed label provider. */ public class ModelViewerSorter extends ViewerSorter { // clients should call getInstance() protected ModelViewerSorter() { } protected static ModelViewerSorter instance = new ModelViewerSorter(); public static ModelViewerSorter getInstance() { return instance; } @Override public int compare(Viewer viewer, Object e1, Object e2) { if (e1 == null && e2 != null) return -1; if (e1 != null && e2 == null) return +1; ILabelProvider labelProvider = (ILabelProvider)((ContentViewer)viewer).getLabelProvider(); String property1 = labelProvider.getText(e1); String property2 = labelProvider.getText(e2); return collator.compare(property1, property2); } }
apache-2.0
apurtell/hadoop
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
3431
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto; /** * This interface intends to align the state between client and server * via RPC communication. * * This should be implemented separately on the client side and server side * and can be used to pass state information on RPC responses from server * to client. */ @InterfaceAudience.Private @InterfaceStability.Evolving public interface AlignmentContext { /** * This is the intended server method call to implement to pass state info * during RPC response header construction. * * @param header The RPC response header builder. */ void updateResponseState(RpcResponseHeaderProto.Builder header); /** * This is the intended client method call to implement to recieve state info * during RPC response processing. * * @param header The RPC response header. */ void receiveResponseState(RpcResponseHeaderProto header); /** * This is the intended client method call to pull last seen state info * into RPC request processing. * * @param header The RPC request header builder. */ void updateRequestState(RpcRequestHeaderProto.Builder header); /** * This is the intended server method call to implement to receive * client state info during RPC response header processing. * * @param header The RPC request header. * @param threshold a parameter to verify a condition when server * should reject client request due to its state being too far * misaligned with the client state. * See implementation for more details. * @return state id required for the server to execute the call. * @throws IOException */ long receiveRequestState(RpcRequestHeaderProto header, long threshold) throws IOException; /** * Returns the last seen state id of the alignment context instance. * * @return the value of the last seen state id. */ long getLastSeenStateId(); /** * Return true if this method call does need to be synced, false * otherwise. sync meaning server state needs to have caught up with * client state. * * @param protocolName the name of the protocol * @param method the method call to check * @return true if this method is async, false otherwise. */ boolean isCoordinatedCall(String protocolName, String method); }
apache-2.0
Drifftr/devstudio-tooling-bps
plugins/org.eclipse.bpel.ui.noEmbeddedEditors/src/org/eclipse/bpel/ui/properties/MessageExchangeSection.java
10830
/** * <copyright> * Copyright (c) 2008, 2012 IBM Corporation and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * IBM Corporation - initial API and implementation * </copyright> */ package org.eclipse.bpel.ui.properties; import org.eclipse.bpel.common.ui.assist.FieldAssistAdapter; import org.eclipse.bpel.common.ui.details.IDetailsAreaConstants; import org.eclipse.bpel.common.ui.flatui.FlatFormAttachment; import org.eclipse.bpel.common.ui.flatui.FlatFormData; import org.eclipse.bpel.model.BPELFactory; import org.eclipse.bpel.model.MessageExchange; import org.eclipse.bpel.model.util.BPELUtils; import org.eclipse.bpel.ui.BPELUIPlugin; import org.eclipse.bpel.ui.IBPELUIConstants; import org.eclipse.bpel.ui.IHelpContextIds; import org.eclipse.bpel.ui.Messages; import org.eclipse.bpel.ui.adapters.ILabeledElement; import org.eclipse.bpel.ui.commands.AddMessageExchangeCommand; import org.eclipse.bpel.ui.commands.CompoundCommand; import org.eclipse.bpel.ui.commands.SetMessageExchangeCommand; import org.eclipse.bpel.ui.details.providers.MessageExchangeContentProvider; import org.eclipse.bpel.ui.details.providers.ModelLabelProvider; import org.eclipse.bpel.ui.proposal.providers.ModelContentProposalProvider; import org.eclipse.bpel.ui.proposal.providers.RunnableProposal; import org.eclipse.bpel.ui.proposal.providers.Separator; import org.eclipse.bpel.ui.util.BPELUtil; import org.eclipse.bpel.ui.util.ModelHelper; import org.eclipse.bpel.ui.util.MultiObjectAdapter; import org.eclipse.bpel.ui.util.NameDialog; import org.eclipse.emf.common.notify.Adapter; import org.eclipse.emf.common.notify.Notification; import org.eclipse.emf.ecore.EObject; import org.eclipse.gef.commands.Command; import org.eclipse.jface.fieldassist.ContentProposalAdapter; import org.eclipse.jface.fieldassist.IContentProposal; import org.eclipse.jface.fieldassist.IContentProposalListener; import org.eclipse.jface.fieldassist.IControlContentAdapter; import org.eclipse.jface.fieldassist.TextContentAdapter; import org.eclipse.jface.window.Window; import org.eclipse.swt.SWT; import org.eclipse.swt.graphics.Point; import org.eclipse.swt.widgets.Button; import org.eclipse.swt.widgets.Composite; import org.eclipse.swt.widgets.Control; import org.eclipse.swt.widgets.Event; import org.eclipse.swt.widgets.Label; import org.eclipse.swt.widgets.Listener; import org.eclipse.swt.widgets.Text; import org.eclipse.ui.PlatformUI; /** * @author Miriam Grundig (MGrundig@de.ibm.com) */ public class MessageExchangeSection extends BPELPropertySection { Composite parentComposite; //SWT widgets private Label messExLabel; private Text messExName; private Button messExBrowseButton; private IControlContentAdapter fTextContentAdapter = new TextContentAdapter() { @Override public void insertControlContents(Control control, String text, int cursorPosition) { if (text != null) { super.insertControlContents(control, text, cursorPosition); } } @Override public void setControlContents(Control control, String text, int cursorPosition) { if (text != null) { super.setControlContents(control, text, cursorPosition); } } }; //TODO check this @Override protected MultiObjectAdapter[] createAdapters() { return new MultiObjectAdapter[] { /* model object */ new MultiObjectAdapter() { @Override public void notify (Notification n) { try { if (ModelHelper.isMessageExchangeAffected(getInput(), n)) { updateMessExWidgets(); } } catch (Exception e) { BPELUIPlugin.log(e); } } }, }; } @SuppressWarnings("nls") @Override /** * Depending on the element selected a entry for the table will be defined */ protected void basicSetInput (EObject input) { super.basicSetInput(input); updateMessExWidgets(); } protected void createMessExWidgets(Composite composite) { // create items messExLabel = fWidgetFactory.createLabel(composite, Messages.MessageExchangeImplSection_1); messExName = fWidgetFactory.createText(composite, EMPTY_STRING, SWT.NONE); messExBrowseButton = fWidgetFactory.createButton(composite,EMPTY_STRING,SWT.ARROW | SWT.DOWN | SWT.RIGHT ); // Content Assist for Message Exchange RunnableProposal proposal = new RunnableProposal() { @Override public String getLabel() { return Messages.MessageExchangeImplSection_Create_Global; } public void run() { createMessageExchange ( BPELUtils.getProcess( getInput () ) , null ); } }; RunnableProposal proposal2 = new RunnableProposal() { @Override public String getLabel() { return Messages.MessageExchangeImplSection_Create_Local; } public void run() { createMessageExchange ( ModelHelper.getContainingScope( getInput()), null); } }; RunnableProposal proposal3 = new RunnableProposal() { @Override public String getLabel() { return Messages.MessageExchangeImplSection_Clear; } public void run() { Command cmd = new SetMessageExchangeCommand(getInput(), null) ; cmd.setLabel(IBPELUIConstants.CMD_ADD_MESSAGEEXCHANGE); runCommand(cmd); } }; MessageExchangeContentProvider provider = new MessageExchangeContentProvider(); ModelContentProposalProvider proposalProvider; proposalProvider = new ModelContentProposalProvider( new ModelContentProposalProvider.ValueProvider () { @Override public Object value() { return getInput(); } }, provider ); proposalProvider.addProposalToEnd( new Separator () ); proposalProvider.addProposalToEnd( proposal ); proposalProvider.addProposalToEnd( proposal2 ); proposalProvider.addProposalToEnd( proposal3 ); final FieldAssistAdapter contentAssist = new FieldAssistAdapter ( messExName, fTextContentAdapter, proposalProvider, null, null, false ); // installDecoration = false --> no icon that shows that we have content assist. contentAssist.setLabelProvider( new ModelLabelProvider () ); contentAssist.setPopupSize( new Point(300,100) ); contentAssist.setFilterStyle(ContentProposalAdapter.FILTER_CUMULATIVE); contentAssist.setProposalAcceptanceStyle( ContentProposalAdapter.PROPOSAL_REPLACE); contentAssist.addContentProposalListener( proposal ); contentAssist.addContentProposalListener( proposal2 ); contentAssist.addContentProposalListener( proposal3 ); contentAssist.addContentProposalListener(new IContentProposalListener () { public void proposalAccepted(IContentProposal chosenProposal) { if (chosenProposal.getContent() == null) { return ; } MessageExchange me = null; try { me = (MessageExchange) ((Adapter)chosenProposal).getTarget(); } catch (Throwable t) { return ; } Command cmd = new SetMessageExchangeCommand(getInput(),me); cmd.setLabel(IBPELUIConstants.CMD_ADD_MESSAGEEXCHANGE); runCommand(cmd); } }); // Open content assist window on button click messExBrowseButton.addListener(SWT.Selection, new Listener() { public void handleEvent(Event event) { contentAssist.openProposals(); } }); messExName.addListener(SWT.KeyDown, new Listener () { public void handleEvent(Event event) { if (event.keyCode == SWT.CR) { findAndSetOrCreateMessageExchange( messExName.getText() ); } } }); // End of content assist for message exchange // Layout FlatFormData data = new FlatFormData(); data.right = new FlatFormAttachment(100, 0); data.top = new FlatFormAttachment(messExName,+2,SWT.TOP); data.bottom = new FlatFormAttachment(messExName,-2,SWT.BOTTOM); messExBrowseButton.setLayoutData(data); data = new FlatFormData(); data.left = new FlatFormAttachment(0, BPELUtil.calculateLabelWidth(messExLabel, STANDARD_LABEL_WIDTH_SM)); data.right = new FlatFormAttachment(messExBrowseButton, 0); messExName.setLayoutData(data); data = new FlatFormData(); data.left = new FlatFormAttachment(0, 0); data.right = new FlatFormAttachment(messExName, -IDetailsAreaConstants.HSPACE); data.top = new FlatFormAttachment(messExName, 0, SWT.CENTER); messExLabel.setLayoutData(data); } @Override protected void createClient(Composite parent) { Composite composite = parentComposite = createFlatFormComposite(parent); createMessExWidgets(composite); PlatformUI.getWorkbench().getHelpSystem().setHelp(composite, IHelpContextIds.PROPERTY_PAGE_MESSAGE_EXCHANGE); } /** * @see org.eclipse.bpel.ui.properties.BPELPropertySection#getUserContext() */ @Override public Object getUserContext() { return null; } /** * @see org.eclipse.bpel.ui.properties.BPELPropertySection#restoreUserContext(java.lang.Object) */ @Override public void restoreUserContext(Object userContext) { messExName.setFocus(); } void findAndSetOrCreateMessageExchange ( String name ) { name = name.trim(); EObject model = getInput(); MessageExchange me = null; if (name.length() > 0) { me = (MessageExchange) ModelHelper.findElementByName(ModelHelper.getContainingScope(model), name, MessageExchange.class); // does not exist if (me == null) { createMessageExchange ( ModelHelper.getContainingScope(model), name ); return ; } } SetMessageExchangeCommand cmd = new SetMessageExchangeCommand(model, me); cmd.setLabel(IBPELUIConstants.CMD_ADD_MESSAGEEXCHANGE); runCommand (cmd); } private void createMessageExchange ( EObject ref , String name ) { MessageExchange me = BPELFactory.eINSTANCE.createMessageExchange(); if (name == null) { name = EMPTY_STRING; } // ask for the name, we know the type. NameDialog nameDialog = new NameDialog( parentComposite.getShell(), Messages.MessageExchange_Create_Dialog_Title, Messages.MessageExchange_Create_Dialog_NameField_Label, name, BPELUtil.getNCNameValidator()); if (nameDialog.open() == Window.CANCEL) return ; // set name me.setName ( nameDialog.getValue() ); CompoundCommand cmd = new CompoundCommand(); cmd.add(new AddMessageExchangeCommand(ref, me)); cmd.add(new SetMessageExchangeCommand(getInput(), me)); cmd.setLabel(IBPELUIConstants.CMD_ADD_MESSAGEEXCHANGE); runCommand( cmd ); } private void updateMessExWidgets() { MessageExchange messEx = ModelHelper.getMessageExchange(getInput()); if (messEx == null) { messExName.setText(EMPTY_STRING); } else { ILabeledElement labeledElement = BPELUtil.adapt(messEx, ILabeledElement.class); messExName.setText(labeledElement.getLabel(messEx)); } } }
apache-2.0
sdmcraft/jackrabbit
jackrabbit-core/src/main/java/org/apache/jackrabbit/core/AddMixinOperation.java
7576
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jackrabbit.core; import static org.apache.jackrabbit.core.ItemValidator.CHECK_CHECKED_OUT; import static org.apache.jackrabbit.core.ItemValidator.CHECK_CONSTRAINTS; import static org.apache.jackrabbit.core.ItemValidator.CHECK_HOLD; import static org.apache.jackrabbit.core.ItemValidator.CHECK_LOCK; import static org.apache.jackrabbit.spi.commons.name.NameConstants.MIX_SIMPLE_VERSIONABLE; import static org.apache.jackrabbit.spi.commons.name.NameConstants.MIX_VERSIONABLE; import java.util.HashSet; import java.util.Set; import javax.jcr.RepositoryException; import javax.jcr.nodetype.ConstraintViolationException; import javax.jcr.nodetype.NodeDefinition; import javax.jcr.nodetype.PropertyDefinition; import org.apache.jackrabbit.core.nodetype.EffectiveNodeType; import org.apache.jackrabbit.core.nodetype.NodeTypeConflictException; import org.apache.jackrabbit.core.nodetype.NodeTypeImpl; import org.apache.jackrabbit.core.nodetype.NodeTypeManagerImpl; import org.apache.jackrabbit.core.nodetype.NodeTypeRegistry; import org.apache.jackrabbit.core.security.authorization.Permission; import org.apache.jackrabbit.core.session.SessionContext; import org.apache.jackrabbit.core.session.SessionWriteOperation; import org.apache.jackrabbit.core.state.NodeState; import org.apache.jackrabbit.spi.Name; import org.apache.jackrabbit.spi.commons.nodetype.NodeDefinitionImpl; import org.apache.jackrabbit.spi.commons.nodetype.PropertyDefinitionImpl; /** * Session operation for adding a mixin type to a node. */ class AddMixinOperation implements SessionWriteOperation<Object> { private final NodeImpl node; private final Name mixinName; public AddMixinOperation(NodeImpl node, Name mixinName) { this.node = node; this.mixinName = mixinName; } public Object perform(SessionContext context) throws RepositoryException { int permissions = Permission.NODE_TYPE_MNGMT; // special handling of mix:(simple)versionable. since adding the // mixin alters the version storage jcr:versionManagement privilege // is required in addition. if (MIX_VERSIONABLE.equals(mixinName) || MIX_SIMPLE_VERSIONABLE.equals(mixinName)) { permissions |= Permission.VERSION_MNGMT; } context.getItemValidator().checkModify( node, CHECK_LOCK | CHECK_CHECKED_OUT | CHECK_CONSTRAINTS | CHECK_HOLD, permissions); NodeTypeManagerImpl ntMgr = context.getNodeTypeManager(); NodeTypeImpl mixin = ntMgr.getNodeType(mixinName); if (!mixin.isMixin()) { throw new RepositoryException( context.getJCRName(mixinName) + " is not a mixin node type"); } Name primaryTypeName = node.getNodeState().getNodeTypeName(); NodeTypeImpl primaryType = ntMgr.getNodeType(primaryTypeName); if (primaryType.isDerivedFrom(mixinName)) { // new mixin is already included in primary type return this; } // build effective node type of mixin's & primary type in order // to detect conflicts NodeTypeRegistry ntReg = context.getNodeTypeRegistry(); EffectiveNodeType entExisting; try { // existing mixin's Set<Name> mixins = new HashSet<Name>( node.getNodeState().getMixinTypeNames()); // build effective node type representing primary type including // existing mixin's entExisting = ntReg.getEffectiveNodeType(primaryTypeName, mixins); if (entExisting.includesNodeType(mixinName)) { // new mixin is already included in existing mixin type(s) return this; } // add new mixin mixins.add(mixinName); // try to build new effective node type (will throw in case // of conflicts) ntReg.getEffectiveNodeType(primaryTypeName, mixins); } catch (NodeTypeConflictException e) { throw new ConstraintViolationException(e.getMessage(), e); } // do the actual modifications implied by the new mixin; // try to revert the changes in case an exception occurs try { // modify the state of this node NodeState thisState = (NodeState) node.getOrCreateTransientItemState(); // add mixin name Set<Name> mixins = new HashSet<Name>(thisState.getMixinTypeNames()); mixins.add(mixinName); thisState.setMixinTypeNames(mixins); // set jcr:mixinTypes property node.setMixinTypesProperty(mixins); // add 'auto-create' properties defined in mixin type for (PropertyDefinition aPda : mixin.getAutoCreatedPropertyDefinitions()) { PropertyDefinitionImpl pd = (PropertyDefinitionImpl) aPda; // make sure that the property is not already defined // by primary type or existing mixin's NodeTypeImpl declaringNT = (NodeTypeImpl) pd.getDeclaringNodeType(); if (!entExisting.includesNodeType(declaringNT.getQName())) { node.createChildProperty( pd.unwrap().getName(), pd.getRequiredType(), pd); } } // recursively add 'auto-create' child nodes defined in mixin type for (NodeDefinition aNda : mixin.getAutoCreatedNodeDefinitions()) { NodeDefinitionImpl nd = (NodeDefinitionImpl) aNda; // make sure that the child node is not already defined // by primary type or existing mixin's NodeTypeImpl declaringNT = (NodeTypeImpl) nd.getDeclaringNodeType(); if (!entExisting.includesNodeType(declaringNT.getQName())) { node.createChildNode( nd.unwrap().getName(), (NodeTypeImpl) nd.getDefaultPrimaryType(), null); } } } catch (RepositoryException re) { // try to undo the modifications by removing the mixin try { node.removeMixin(mixinName); } catch (RepositoryException re1) { // silently ignore & fall through } throw re; } return this; } //--------------------------------------------------------------< Object > /** * Returns a string representation of this operation. */ public String toString() { return "node.addMixin(" + mixinName + ")"; } }
apache-2.0
apurtell/hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
7479
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.util; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.SysInfo; import org.apache.hadoop.yarn.conf.YarnConfiguration; /** * Plugin to calculate resource information on the system. */ @InterfaceAudience.LimitedPrivate({"YARN", "MAPREDUCE"}) @InterfaceStability.Unstable public class ResourceCalculatorPlugin extends Configured { private static final Logger LOG = LoggerFactory.getLogger(ResourceCalculatorPlugin.class); private final SysInfo sys; protected ResourceCalculatorPlugin() { this(SysInfo.newInstance()); } public ResourceCalculatorPlugin(SysInfo sys) { this.sys = sys; } /** * Obtain the total size of the virtual memory present in the system. * * @return virtual memory size in bytes. */ public long getVirtualMemorySize() { return sys.getVirtualMemorySize(); } /** * Obtain the total size of the physical memory present in the system. * * @return physical memory size bytes. */ public long getPhysicalMemorySize() { return sys.getPhysicalMemorySize(); } /** * Obtain the total size of the available virtual memory present * in the system. * * @return available virtual memory size in bytes. */ public long getAvailableVirtualMemorySize() { return sys.getAvailableVirtualMemorySize(); } /** * Obtain the total size of the available physical memory present * in the system. * * @return available physical memory size bytes. */ public long getAvailablePhysicalMemorySize() { return sys.getAvailablePhysicalMemorySize(); } /** * Obtain the total number of logical processors present on the system. * * @return number of logical processors */ public int getNumProcessors() { return sys.getNumProcessors(); } /** * Obtain total number of physical cores present on the system. * * @return number of physical cores */ public int getNumCores() { return sys.getNumCores(); } /** * Obtain the CPU frequency of on the system. * * @return CPU frequency in kHz */ public long getCpuFrequency() { return sys.getCpuFrequency(); } /** * Obtain the cumulative CPU time since the system is on. * * @return cumulative CPU time in milliseconds */ public long getCumulativeCpuTime() { return sys.getCumulativeCpuTime(); } /** * Obtain the CPU usage % of the machine. Return -1 if it is unavailable. * * @return CPU usage in % */ public float getCpuUsagePercentage() { return sys.getCpuUsagePercentage(); } /** * Obtain the number of VCores used. Return -1 if it is unavailable. * * @return Number of VCores used a percentage (from 0 to #VCores) */ public float getNumVCoresUsed() { return sys.getNumVCoresUsed(); } /** * Obtain the aggregated number of bytes read over the network. * @return total number of bytes read. */ public long getNetworkBytesRead() { return sys.getNetworkBytesRead(); } /** * Obtain the aggregated number of bytes written to the network. * @return total number of bytes written. */ public long getNetworkBytesWritten() { return sys.getNetworkBytesWritten(); } /** * Obtain the aggregated number of bytes read from disks. * * @return total number of bytes read. */ public long getStorageBytesRead() { return sys.getStorageBytesRead(); } /** * Obtain the aggregated number of bytes written to disks. * * @return total number of bytes written. */ public long getStorageBytesWritten() { return sys.getStorageBytesWritten(); } /** * Create the ResourceCalculatorPlugin from the class name and configure it. If * class name is null, this method will try and return a memory calculator * plugin available for this system. * * @param clazz ResourceCalculator plugin class-name * @param conf configure the plugin with this. * @return ResourceCalculatorPlugin or null if ResourceCalculatorPlugin is not * available for current system */ public static ResourceCalculatorPlugin getResourceCalculatorPlugin( Class<? extends ResourceCalculatorPlugin> clazz, Configuration conf) { if (clazz != null) { return ReflectionUtils.newInstance(clazz, conf); } try { return new ResourceCalculatorPlugin(); } catch (UnsupportedOperationException ue) { LOG.warn("Failed to instantiate default resource calculator. " + ue.getMessage()); } catch (Throwable t) { LOG.warn(t + ": Failed to instantiate default resource calculator.", t); } return null; } /** * Create the ResourceCalculatorPlugin for the containers monitor in the Node * Manager and configure it. If the plugin is not configured, this method * will try and return a memory calculator plugin available for this system. * * @param conf Configure the plugin with this. * @return ResourceCalculatorPlugin or null if ResourceCalculatorPlugin is * not available for current system. */ public static ResourceCalculatorPlugin getContainersMonitorPlugin( Configuration conf) { Class<? extends ResourceCalculatorPlugin> clazzNM = conf.getClass( YarnConfiguration.NM_MON_RESOURCE_CALCULATOR, null, ResourceCalculatorPlugin.class); Class<? extends ResourceCalculatorPlugin> clazz = conf.getClass( YarnConfiguration.NM_CONTAINER_MON_RESOURCE_CALCULATOR, clazzNM, ResourceCalculatorPlugin.class); return ResourceCalculatorPlugin.getResourceCalculatorPlugin(clazz, conf); } /** * Create the ResourceCalculatorPlugin for the node resource monitor in the * Node Manager and configure it. If the plugin is not configured, this * method will try and return a memory calculator plugin available for this * system. * * @param conf Configure the plugin with this. * @return ResourceCalculatorPlugin or null if ResourceCalculatorPlugin is * not available for current system. */ public static ResourceCalculatorPlugin getNodeResourceMonitorPlugin( Configuration conf) { Class<? extends ResourceCalculatorPlugin> clazz = conf.getClass( YarnConfiguration.NM_MON_RESOURCE_CALCULATOR, null, ResourceCalculatorPlugin.class); return ResourceCalculatorPlugin.getResourceCalculatorPlugin(clazz, conf); } }
apache-2.0
dturanski/spring-cloud-data
spring-cloud-dataflow-shell-core/src/main/java/org/springframework/cloud/dataflow/shell/package-info.java
729
/* * Copyright 2017 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Root package for the Spring Cloud Data Flow Shell. */ package org.springframework.cloud.dataflow.shell;
apache-2.0
tkpanther/ignite
modules/core/src/test/java/org/apache/ignite/testframework/junits/IgniteCacheConfigVariationsAbstractTest.java
19428
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.testframework.junits; import javax.cache.Cache; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteTransactions; import org.apache.ignite.Ignition; import org.apache.ignite.cache.CacheAtomicityMode; import org.apache.ignite.cache.CacheMemoryMode; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.cache.CachePeekMode; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgniteKernal; import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.H2CacheStoreStrategy; import org.apache.ignite.internal.processors.cache.MapCacheStoreStrategy; import org.apache.ignite.internal.processors.cache.TestCacheStoreStrategy; import org.apache.ignite.internal.util.lang.GridAbsPredicateX; import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.testframework.configvariations.CacheStartMode; import org.apache.ignite.transactions.Transaction; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; import static org.apache.ignite.cache.CacheMemoryMode.OFFHEAP_TIERED; import static org.apache.ignite.cache.CacheMemoryMode.ONHEAP_TIERED; /** * Abstract class for cache configuration variations tests. */ public abstract class IgniteCacheConfigVariationsAbstractTest extends IgniteConfigVariationsAbstractTest { /** */ protected static final int CLIENT_NEAR_ONLY_IDX = 2; /** Test timeout. */ private static final long TEST_TIMEOUT = 30 * 1000; /** */ protected static TestCacheStoreStrategy storeStgy; /** {@inheritDoc} */ @Override protected long getTestTimeout() { return TEST_TIMEOUT; } /** {@inheritDoc} */ @Override protected final void beforeTestsStarted() throws Exception { initStoreStrategy(); assert testsCfg != null; assert !testsCfg.withClients() || testsCfg.gridCount() >= 3; assert testsCfg.testedNodeIndex() >= 0 : "testedNodeIdx: " + testedNodeIdx; testedNodeIdx = testsCfg.testedNodeIndex(); if (testsCfg.isStartCache()) { final CacheStartMode cacheStartMode = testsCfg.cacheStartMode(); final int cnt = testsCfg.gridCount(); if (cacheStartMode == CacheStartMode.STATIC) { info("All nodes will be stopped, new " + cnt + " nodes will be started."); Ignition.stopAll(true); for (int i = 0; i < cnt; i++) { String gridName = getTestGridName(i); IgniteConfiguration cfg = optimize(getConfiguration(gridName)); if (i != CLIENT_NODE_IDX && i != CLIENT_NEAR_ONLY_IDX) { CacheConfiguration cc = cacheConfiguration(); cc.setName(cacheName()); cfg.setCacheConfiguration(cc); } startGrid(gridName, cfg, null); } if (testsCfg.withClients() && testsCfg.gridCount() > CLIENT_NEAR_ONLY_IDX) grid(CLIENT_NEAR_ONLY_IDX).createNearCache(cacheName(), new NearCacheConfiguration()); } else if (cacheStartMode == null || cacheStartMode == CacheStartMode.DYNAMIC) { super.beforeTestsStarted(); startCachesDinamically(); } else throw new IllegalArgumentException("Unknown cache start mode: " + cacheStartMode); } if (testsCfg.gridCount() > 1) checkTopology(testsCfg.gridCount()); awaitPartitionMapExchange(); for (int i = 0; i < gridCount(); i++) info("Grid " + i + ": " + grid(i).localNode().id()); if (testsCfg.withClients()) { if (testedNodeIdx != SERVER_NODE_IDX) assertEquals(testedNodeIdx == CLIENT_NEAR_ONLY_IDX, nearEnabled()); info(">>> Starting set of tests [testedNodeIdx=" + testedNodeIdx + ", id=" + grid(testedNodeIdx).localNode().id() + ", isClient=" + isClientMode() + ", nearEnabled=" + nearEnabled() + "]"); } } /** Initialize {@link #storeStgy} with respect to the nature of the test */ void initStoreStrategy() throws IgniteCheckedException { if (storeStgy == null) storeStgy = isMultiJvm() ? new H2CacheStoreStrategy() : new MapCacheStoreStrategy(); } /** * Starts caches dynamically. * * @throws Exception If failed. */ private void startCachesDinamically() throws Exception { for (int i = 0; i < gridCount(); i++) { info("Starting cache dinamically on grid: " + i); IgniteEx grid = grid(i); if (i != CLIENT_NODE_IDX && i != CLIENT_NEAR_ONLY_IDX) { CacheConfiguration cc = cacheConfiguration(); cc.setName(cacheName()); grid.getOrCreateCache(cc); } if (testsCfg.withClients() && i == CLIENT_NEAR_ONLY_IDX && grid(i).configuration().isClientMode()) grid(CLIENT_NEAR_ONLY_IDX).createNearCache(cacheName(), new NearCacheConfiguration()); } awaitPartitionMapExchange(); for (int i = 0; i < gridCount(); i++) assertNotNull(jcache(i)); for (int i = 0; i < gridCount(); i++) assertEquals("Cache is not empty [idx=" + i + ", entrySet=" + jcache(i).localEntries() + ']', 0, jcache(i).localSize(CachePeekMode.ALL)); } /** {@inheritDoc} */ @Override protected boolean expectedClient(String testGridName) { return getTestGridName(CLIENT_NODE_IDX).equals(testGridName) || getTestGridName(CLIENT_NEAR_ONLY_IDX).equals(testGridName); } /** {@inheritDoc} */ @Override protected void afterTestsStopped() throws Exception { if (testsCfg.isStopCache()) { for (int i = 0; i < gridCount(); i++) { info("Destroing cache on grid: " + i); IgniteCache<String, Integer> cache = jcache(i); assert i != 0 || cache != null; if (cache != null) cache.destroy(); } } storeStgy.resetStore(); super.afterTestsStopped(); } /** {@inheritDoc} */ @Override protected void beforeTest() throws Exception { super.beforeTest(); if (testsCfg.awaitPartitionMapExchange()) awaitPartitionMapExchange(); assert jcache().unwrap(Ignite.class).transactions().tx() == null; assertEquals(0, jcache().localSize()); assertEquals(0, jcache().size()); } /** {@inheritDoc} */ @Override protected void afterTest() throws Exception { Transaction tx = jcache().unwrap(Ignite.class).transactions().tx(); if (tx != null) { tx.close(); fail("Cache transaction remained after test completion: " + tx); } String cacheIsNotEmptyMsg = null; for (int i = 0; i < gridCount(); i++) { info("Checking grid: " + i); while (true) { try { final int fi = i; boolean cacheIsEmpty = GridTestUtils.waitForCondition( // Preloading may happen as nodes leave, so we need to wait. new GridAbsPredicateX() { @Override public boolean applyx() throws IgniteCheckedException { jcache(fi).removeAll(); if (jcache(fi).size(CachePeekMode.ALL) > 0) { for (Cache.Entry<?, ?> k : jcache(fi).localEntries()) jcache(fi).remove(k.getKey()); } int locSize = jcache(fi).localSize(CachePeekMode.ALL); if (locSize != 0) { info(">>>>> Debug localSize for grid: " + fi + " is " + locSize); info(">>>>> Debug ONHEAP localSize for grid: " + fi + " is " + jcache(fi).localSize(CachePeekMode.ONHEAP)); info(">>>>> Debug OFFHEAP localSize for grid: " + fi + " is " + jcache(fi).localSize(CachePeekMode.OFFHEAP)); info(">>>>> Debug PRIMARY localSize for grid: " + fi + " is " + jcache(fi).localSize(CachePeekMode.PRIMARY)); info(">>>>> Debug BACKUP localSize for grid: " + fi + " is " + jcache(fi).localSize(CachePeekMode.BACKUP)); info(">>>>> Debug NEAR localSize for grid: " + fi + " is " + jcache(fi).localSize(CachePeekMode.NEAR)); info(">>>>> Debug SWAP localSize for grid: " + fi + " is " + jcache(fi).localSize(CachePeekMode.SWAP)); } return locSize == 0; } }, 10_000); if (cacheIsEmpty) assertTrue("Cache is not empty: " + " localSize = " + jcache(fi).localSize(CachePeekMode.ALL) + ", local entries " + entrySet(jcache(fi).localEntries()), cacheIsEmpty); int primaryKeySize = jcache(i).localSize(CachePeekMode.PRIMARY); int keySize = jcache(i).localSize(); int size = jcache(i).localSize(); int globalSize = jcache(i).size(); int globalPrimarySize = jcache(i).size(CachePeekMode.PRIMARY); info("Size after [idx=" + i + ", size=" + size + ", keySize=" + keySize + ", primarySize=" + primaryKeySize + ", globalSize=" + globalSize + ", globalPrimarySize=" + globalPrimarySize + ", entrySet=" + jcache(i).localEntries() + ']'); if (!cacheIsEmpty) { cacheIsNotEmptyMsg = "Cache is not empty: localSize = " + jcache(fi).localSize(CachePeekMode.ALL) + ", local entries " + entrySet(jcache(fi).localEntries()); break; } assertEquals("Cache is not empty [idx=" + i + ", entrySet=" + jcache(i).localEntries() + ']', 0, jcache(i).localSize(CachePeekMode.ALL)); break; } catch (Exception e) { if (X.hasCause(e, ClusterTopologyCheckedException.class)) { info("Got topology exception while tear down (will retry in 1000ms)."); U.sleep(1000); } else throw e; } } if (cacheIsNotEmptyMsg != null) break; for (Cache.Entry entry : jcache(i).localEntries(CachePeekMode.SWAP)) jcache(i).remove(entry.getKey()); } assert jcache().unwrap(Ignite.class).transactions().tx() == null; if (cacheIsNotEmptyMsg == null) assertEquals("Cache is not empty", 0, jcache().localSize(CachePeekMode.ALL)); storeStgy.resetStore(); // Restore cache if current cache has garbage. if (cacheIsNotEmptyMsg != null) { for (int i = 0; i < gridCount(); i++) { info("Destroing cache on grid: " + i); IgniteCache<String, Integer> cache = jcache(i); assert i != 0 || cache != null; if (cache != null) cache.destroy(); } assertTrue(GridTestUtils.waitForCondition(new GridAbsPredicateX() { @Override public boolean applyx() { for (int i = 0; i < gridCount(); i++) { if (jcache(i) != null) return false; } return true; } }, 10_000)); startCachesDinamically(); log.warning(cacheIsNotEmptyMsg); throw new IllegalStateException(cacheIsNotEmptyMsg); } assertEquals(0, jcache().localSize()); assertEquals(0, jcache().size()); } /** * Put entry to cache store. * * @param key Key. * @param val Value. */ protected void putToStore(Object key, Object val) { if (!storeEnabled()) throw new IllegalStateException("Failed to put to store because store is disabled."); storeStgy.putToStore(key, val); } /** * @return Default cache mode. */ protected CacheMode cacheMode() { CacheMode mode = cacheConfiguration().getCacheMode(); return mode == null ? CacheConfiguration.DFLT_CACHE_MODE : mode; } /** * @return Load previous value flag. */ protected boolean isLoadPreviousValue() { return cacheConfiguration().isLoadPreviousValue(); } /** * @return Cache atomicity mode. */ protected CacheAtomicityMode atomicityMode() { return cacheConfiguration().getAtomicityMode(); } /** * @return {@code True} if values should be stored off-heap. */ protected CacheMemoryMode memoryMode() { return cacheConfiguration().getMemoryMode(); } /** * @return {@code True} if swap should happend after localEvict() call. */ protected boolean swapAfterLocalEvict() { if (memoryMode() == OFFHEAP_TIERED) return false; return memoryMode() == ONHEAP_TIERED ? (!offheapEnabled() && swapEnabled()) : swapEnabled(); } /** * @return {@code True} if store is enabled. */ protected boolean storeEnabled() { return cacheConfiguration().getCacheStoreFactory() != null; } /** * @return {@code True} if offheap memory is enabled. */ protected boolean offheapEnabled() { return cacheConfiguration().getOffHeapMaxMemory() >= 0; } /** * @return {@code True} if swap is enabled. */ protected boolean swapEnabled() { return cacheConfiguration().isSwapEnabled(); } /** * @return {@code true} if near cache should be enabled. */ protected boolean nearEnabled() { return grid(testedNodeIdx).cachex(cacheName()).context().isNear(); } /** * @return {@code True} if transactions are enabled. * @see #txShouldBeUsed() */ protected boolean txEnabled() { return atomicityMode() == TRANSACTIONAL; } /** * @return Cache configuration. */ protected CacheConfiguration cacheConfiguration() { return testsCfg.configurationFactory().cacheConfiguration(getTestGridName(testedNodeIdx)); } /** * @return {@code True} if transactions should be used. */ protected boolean txShouldBeUsed() { return txEnabled() && !isMultiJvm(); } /** * @return {@code True} if locking is enabled. */ protected boolean lockingEnabled() { return txEnabled(); } /** * @return Default cache instance. */ @SuppressWarnings({"unchecked"}) @Override protected <K, V> IgniteCache<K, V> jcache() { return jcache(testedNodeIdx); } /** * @return A not near-only cache. */ protected IgniteCache<String, Integer> serverNodeCache() { return jcache(SERVER_NODE_IDX); } /** * @return Cache name. */ protected String cacheName() { return "testcache-" + testsCfg.description().hashCode(); } /** * @return Transactions instance. */ protected IgniteTransactions transactions() { return grid(0).transactions(); } /** * @param idx Index of grid. * @return Default cache. */ @SuppressWarnings({"unchecked"}) @Override protected <K, V> IgniteCache<K, V> jcache(int idx) { return ignite(idx).cache(cacheName()); } /** * @param idx Index of grid. * @return Cache context. */ protected GridCacheContext<String, Integer> context(final int idx) { if (isRemoteJvm(idx) && !isRemoteJvm()) throw new UnsupportedOperationException("Operation can't be done automatically via proxy. " + "Send task with this logic on remote jvm instead."); return ((IgniteKernal)grid(idx)).<String, Integer>internalCache(cacheName()).context(); } /** * @param cache Cache. * @return {@code True} if cache has OFFHEAP_TIERED memory mode. */ protected static <K, V> boolean offheapTiered(IgniteCache<K, V> cache) { return cache.getConfiguration(CacheConfiguration.class).getMemoryMode() == OFFHEAP_TIERED; } /** * Executes regular peek or peek from swap. * * @param cache Cache projection. * @param key Key. * @return Value. */ @Nullable protected static <K, V> V peek(IgniteCache<K, V> cache, K key) { return offheapTiered(cache) ? cache.localPeek(key, CachePeekMode.SWAP, CachePeekMode.OFFHEAP) : cache.localPeek(key, CachePeekMode.ONHEAP); } /** * @param cache Cache. * @param key Key. * @return {@code True} if cache contains given key. * @throws Exception If failed. */ @SuppressWarnings("unchecked") protected static boolean containsKey(IgniteCache cache, Object key) throws Exception { return offheapTiered(cache) ? cache.localPeek(key, CachePeekMode.OFFHEAP) != null : cache.containsKey(key); } }
apache-2.0
lincoln-lil/flink
flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/util/collections/binary/BytesMultiMapTestBase.java
5640
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.runtime.util.collections.binary; import org.apache.flink.runtime.memory.MemoryManager; import org.apache.flink.runtime.memory.MemoryManagerBuilder; import org.apache.flink.table.data.RowData; import org.apache.flink.table.data.StringData; import org.apache.flink.table.data.binary.BinaryRowData; import org.apache.flink.table.data.writer.BinaryRowWriter; import org.apache.flink.table.runtime.typeutils.BinaryRowDataSerializer; import org.apache.flink.table.runtime.typeutils.PagedTypeSerializer; import org.apache.flink.table.runtime.util.KeyValueIterator; import org.apache.flink.table.types.logical.BigIntType; import org.apache.flink.table.types.logical.BooleanType; import org.apache.flink.table.types.logical.DoubleType; import org.apache.flink.table.types.logical.FloatType; import org.apache.flink.table.types.logical.IntType; import org.apache.flink.table.types.logical.LogicalType; import org.apache.flink.table.types.logical.RowType; import org.apache.flink.table.types.logical.SmallIntType; import org.apache.flink.table.types.logical.VarCharType; import org.junit.Assert; import org.junit.Test; import java.util.Iterator; import java.util.Random; /** Base test class for {@link BytesMultiMap} and {@link WindowBytesMultiMap}. */ public abstract class BytesMultiMapTestBase<K> extends BytesMapTestBase { protected static final int NUM_VALUE_PER_KEY = 50; static final LogicalType[] KEY_TYPES = new LogicalType[] { new IntType(), VarCharType.STRING_TYPE, new DoubleType(), new BigIntType(), new BooleanType(), new FloatType(), new SmallIntType() }; static final LogicalType[] VALUE_TYPES = new LogicalType[] { VarCharType.STRING_TYPE, new IntType(), }; protected final PagedTypeSerializer<K> keySerializer; protected final BinaryRowDataSerializer valueSerializer; public BytesMultiMapTestBase(PagedTypeSerializer<K> keySerializer) { this.keySerializer = keySerializer; this.valueSerializer = new BinaryRowDataSerializer(VALUE_TYPES.length); } /** * Creates the specific BytesHashMap, either {@link BytesMultiMap} or {@link * WindowBytesMultiMap}. */ public abstract AbstractBytesMultiMap<K> createBytesMultiMap( MemoryManager memoryManager, int memorySize, LogicalType[] keyTypes, LogicalType[] valueTypes); /** * Generates {@code num} random keys, the types of key fields are defined in {@link #KEY_TYPES}. */ public abstract K[] generateRandomKeys(int num); // ------------------------------------------------------------------------------------------ // Tests // ------------------------------------------------------------------------------------------ @Test public void testBuildAndRetrieve() throws Exception { final int numMemSegments = needNumMemSegments( NUM_ENTRIES, rowLength(RowType.of(VALUE_TYPES)), rowLength(RowType.of(KEY_TYPES)), PAGE_SIZE); int memorySize = numMemSegments * PAGE_SIZE; MemoryManager memoryManager = MemoryManagerBuilder.newBuilder().setMemorySize(numMemSegments * PAGE_SIZE).build(); AbstractBytesMultiMap<K> table = createBytesMultiMap(memoryManager, memorySize, KEY_TYPES, VALUE_TYPES); K[] keys = generateRandomKeys(NUM_ENTRIES / 10); BinaryRowData[] values = genValues(NUM_VALUE_PER_KEY); for (K key : keys) { BytesMap.LookupInfo<K, Iterator<RowData>> lookupInfo; for (BinaryRowData value : values) { lookupInfo = table.lookup(key); table.append(lookupInfo, value); } } KeyValueIterator<K, Iterator<RowData>> iter = table.getEntryIterator(false); while (iter.advanceNext()) { int i = 0; Iterator<RowData> valueIter = iter.getValue(); while (valueIter.hasNext()) { Assert.assertEquals(valueIter.next(), values[i++]); } } } private BinaryRowData[] genValues(int num) { BinaryRowData[] values = new BinaryRowData[num]; final Random rnd = new Random(RANDOM_SEED); for (int i = 0; i < num; i++) { values[i] = new BinaryRowData(2); BinaryRowWriter writer = new BinaryRowWriter(values[i]); writer.writeString(0, StringData.fromString("string" + rnd.nextInt())); writer.writeInt(1, rnd.nextInt()); writer.complete(); } return values; } }
apache-2.0
iloveyou416068/CookNIOServer
netty_source_4_0_25/handler/src/main/java/io/netty/handler/timeout/ReadTimeoutHandler.java
7083
/* * Copyright 2012 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package io.netty.handler.timeout; import io.netty.bootstrap.ServerBootstrap; import io.netty.channel.Channel; import io.netty.channel.ChannelDuplexHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelInitializer; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; /** * Raises a {@link ReadTimeoutException} when no data was read within a certain * period of time. * * <pre> * // The connection is closed when there is no inbound traffic * // for 30 seconds. * * public class MyChannelInitializer extends {@link ChannelInitializer}&lt;{@link Channel}&gt; { * public void initChannel({@link Channel} channel) { * channel.pipeline().addLast("readTimeoutHandler", new {@link ReadTimeoutHandler}(30); * channel.pipeline().addLast("myHandler", new MyHandler()); * } * } * * // Handler should handle the {@link ReadTimeoutException}. * public class MyHandler extends {@link ChannelDuplexHandler} { * {@code @Override} * public void exceptionCaught({@link ChannelHandlerContext} ctx, {@link Throwable} cause) * throws {@link Exception} { * if (cause instanceof {@link ReadTimeoutException}) { * // do something * } else { * super.exceptionCaught(ctx, cause); * } * } * } * * {@link ServerBootstrap} bootstrap = ...; * ... * bootstrap.childHandler(new MyChannelInitializer()); * ... * </pre> * @see WriteTimeoutHandler * @see IdleStateHandler */ public class ReadTimeoutHandler extends ChannelInboundHandlerAdapter { private static final long MIN_TIMEOUT_NANOS = TimeUnit.MILLISECONDS.toNanos(1); private final long timeoutNanos; private volatile ScheduledFuture<?> timeout; private volatile long lastReadTime; private volatile int state; // 0 - none, 1 - Initialized, 2 - Destroyed; private boolean closed; /** * Creates a new instance. * * @param timeoutSeconds * read timeout in seconds */ public ReadTimeoutHandler(int timeoutSeconds) { this(timeoutSeconds, TimeUnit.SECONDS); } /** * Creates a new instance. * * @param timeout * read timeout * @param unit * the {@link TimeUnit} of {@code timeout} */ public ReadTimeoutHandler(long timeout, TimeUnit unit) { if (unit == null) { throw new NullPointerException("unit"); } if (timeout <= 0) { timeoutNanos = 0; } else { timeoutNanos = Math.max(unit.toNanos(timeout), MIN_TIMEOUT_NANOS); } } @Override public void handlerAdded(ChannelHandlerContext ctx) throws Exception { if (ctx.channel().isActive() && ctx.channel().isRegistered()) { // channelActvie() event has been fired already, which means this.channelActive() will // not be invoked. We have to initialize here instead. initialize(ctx); } else { // channelActive() event has not been fired yet. this.channelActive() will be invoked // and initialization will occur there. } } @Override public void handlerRemoved(ChannelHandlerContext ctx) throws Exception { destroy(); } @Override public void channelRegistered(ChannelHandlerContext ctx) throws Exception { // Initialize early if channel is active already. if (ctx.channel().isActive()) { initialize(ctx); } super.channelRegistered(ctx); } @Override public void channelActive(ChannelHandlerContext ctx) throws Exception { // This method will be invoked only if this handler was added // before channelActive() event is fired. If a user adds this handler // after the channelActive() event, initialize() will be called by beforeAdd(). initialize(ctx); super.channelActive(ctx); } @Override public void channelInactive(ChannelHandlerContext ctx) throws Exception { destroy(); super.channelInactive(ctx); } @Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { lastReadTime = System.nanoTime(); ctx.fireChannelRead(msg); } private void initialize(ChannelHandlerContext ctx) { // Avoid the case where destroy() is called before scheduling timeouts. // See: https://github.com/netty/netty/issues/143 switch (state) { case 1: case 2: return; } state = 1; lastReadTime = System.nanoTime(); if (timeoutNanos > 0) { timeout = ctx.executor().schedule( new ReadTimeoutTask(ctx), timeoutNanos, TimeUnit.NANOSECONDS); } } private void destroy() { state = 2; if (timeout != null) { timeout.cancel(false); timeout = null; } } /** * Is called when a read timeout was detected. */ protected void readTimedOut(ChannelHandlerContext ctx) throws Exception { if (!closed) { ctx.fireExceptionCaught(ReadTimeoutException.INSTANCE); ctx.close(); closed = true; } } private final class ReadTimeoutTask implements Runnable { private final ChannelHandlerContext ctx; ReadTimeoutTask(ChannelHandlerContext ctx) { this.ctx = ctx; } @Override public void run() { if (!ctx.channel().isOpen()) { return; } long currentTime = System.nanoTime(); long nextDelay = timeoutNanos - (currentTime - lastReadTime); if (nextDelay <= 0) { // Read timed out - set a new timeout and notify the callback. timeout = ctx.executor().schedule(this, timeoutNanos, TimeUnit.NANOSECONDS); try { readTimedOut(ctx); } catch (Throwable t) { ctx.fireExceptionCaught(t); } } else { // Read occurred before the timeout - set a new timeout with shorter delay. timeout = ctx.executor().schedule(this, nextDelay, TimeUnit.NANOSECONDS); } } } }
apache-2.0
koshalt/modules
odk/src/main/java/org/motechproject/odk/tasks/ActionBuilder.java
6771
package org.motechproject.odk.tasks; import org.motechproject.odk.constant.DisplayNames; import org.motechproject.odk.constant.EventParameters; import org.motechproject.odk.constant.EventSubjects; import org.motechproject.odk.constant.TasksDataTypes; import org.motechproject.odk.domain.FormDefinition; import org.motechproject.odk.domain.FormElement; import org.motechproject.tasks.contract.ActionEventRequest; import org.motechproject.tasks.contract.builder.ActionEventRequestBuilder; import org.motechproject.tasks.contract.ActionParameterRequest; import org.motechproject.tasks.contract.builder.ActionParameterRequestBuilder; import java.util.ArrayList; import java.util.List; import java.util.SortedSet; import java.util.TreeSet; /** * Builds a list of {@link ActionEventRequest} from a list of {@link FormDefinition} */ public class ActionBuilder { private List<FormDefinition> formDefinitions; private int count; public ActionBuilder(List<FormDefinition> formDefinitions) { this.count = 0; this.formDefinitions = formDefinitions; } public List<ActionEventRequest> build() { List<ActionEventRequest> actionEventRequests = new ArrayList<>(); for (FormDefinition formDefinition : formDefinitions) { SortedSet<ActionParameterRequest> actionParameterRequests = createParameterRequestsForFormDef(formDefinition); ActionEventRequestBuilder builder = new ActionEventRequestBuilder(); builder .setDisplayName(DisplayNames.SAVE_FORM_INSTANCE + " [Configuration : " + formDefinition.getConfigurationName() + "]" + "[Title: " + formDefinition.getTitle() + "]") .setActionParameters(actionParameterRequests) .setSubject(EventSubjects.PERSIST_FORM_INSTANCE) .setName(EventSubjects.PERSIST_FORM_INSTANCE + "." + formDefinition.getConfigurationName() + "." + formDefinition.getTitle()); actionEventRequests.add(builder.createActionEventRequest()); } actionEventRequests.add(createFormFailureAction()); return actionEventRequests; } private ActionEventRequest createFormFailureAction() { SortedSet<ActionParameterRequest> actionParameterRequests = createFormFailureParameters(); ActionEventRequestBuilder builder = new ActionEventRequestBuilder(); builder .setDisplayName(DisplayNames.SAVE_FORM_FAILURE) .setActionParameters(actionParameterRequests) .setSubject(EventSubjects.FORM_FAIL) .setName(EventSubjects.FORM_FAIL); return builder.createActionEventRequest(); } private SortedSet<ActionParameterRequest> createFormFailureParameters() { SortedSet<ActionParameterRequest> actionParameterRequests = new TreeSet<ActionParameterRequest>(); actionParameterRequests.addAll(createTitleAndConfigParameters()); ActionParameterRequestBuilder builder = new ActionParameterRequestBuilder(); builder .setDisplayName(DisplayNames.MESSAGE) .setOrder(count++) .setType(TasksDataTypes.UNICODE) .setKey(EventParameters.MESSAGE); actionParameterRequests.add(builder.createActionParameterRequest()); builder = new ActionParameterRequestBuilder(); builder .setDisplayName(DisplayNames.EXCEPTION) .setOrder(count++) .setType(TasksDataTypes.UNICODE) .setKey(EventParameters.EXCEPTION); actionParameterRequests.add(builder.createActionParameterRequest()); builder = new ActionParameterRequestBuilder(); builder .setDisplayName(DisplayNames.JSON_CONTENT) .setOrder(count++) .setType(TasksDataTypes.UNICODE) .setKey(EventParameters.JSON_CONTENT); actionParameterRequests.add(builder.createActionParameterRequest()); return actionParameterRequests; } private SortedSet<ActionParameterRequest> createParameterRequestsForFormDef(FormDefinition formDefinition) { SortedSet<ActionParameterRequest> actionParameterRequests = createRequiredFields(); List<FormElement> formElements = formDefinition.getFormElements(); ActionParameterRequestBuilder builder; for (FormElement formElement : formElements) { if (!formElement.isPartOfRepeatGroup() && !formElement.getName().equals(EventParameters.INSTANCE_ID)) { builder = new ActionParameterRequestBuilder(); builder .setDisplayName(formElement.getLabel()) .setKey(formElement.getName()) .setOrder(count++) .setType(TypeMapper.getType(formElement.getType())); actionParameterRequests.add(builder.createActionParameterRequest()); } } return actionParameterRequests; } private SortedSet<ActionParameterRequest> createRequiredFields() { SortedSet<ActionParameterRequest> actionParameterRequests = new TreeSet<ActionParameterRequest>(); actionParameterRequests.addAll(createTitleAndConfigParameters()); ActionParameterRequestBuilder builder = new ActionParameterRequestBuilder(); builder .setDisplayName(DisplayNames.INSTANCE_ID) .setOrder(count++) .setType(TasksDataTypes.UNICODE) .setKey(EventParameters.INSTANCE_ID) .setRequired(true); actionParameterRequests.add(builder.createActionParameterRequest()); return actionParameterRequests; } private SortedSet<ActionParameterRequest> createTitleAndConfigParameters() { SortedSet<ActionParameterRequest> actionParameterRequests = new TreeSet<ActionParameterRequest>(); ActionParameterRequestBuilder builder = new ActionParameterRequestBuilder(); builder .setDisplayName(DisplayNames.FORM_TITLE) .setType(TasksDataTypes.UNICODE) .setKey(EventParameters.FORM_TITLE) .setOrder(count++) .setRequired(true); actionParameterRequests.add(builder.createActionParameterRequest()); builder = new ActionParameterRequestBuilder(); builder .setDisplayName(DisplayNames.CONFIG_NAME) .setOrder(count++) .setType(TasksDataTypes.UNICODE) .setKey(EventParameters.CONFIGURATION_NAME) .setRequired(true); actionParameterRequests.add(builder.createActionParameterRequest()); return actionParameterRequests; } }
bsd-3-clause
stachon/XChange
xchange-poloniex/src/main/java/org/knowm/xchange/poloniex/dto/marketdata/PoloniexCurrencyInfo.java
1919
package org.knowm.xchange.poloniex.dto.marketdata; import com.fasterxml.jackson.annotation.JsonProperty; import java.math.BigDecimal; public class PoloniexCurrencyInfo { private final int id; private final String name; private final BigDecimal txFee; private final int minConf; private final String depositAddress; private final boolean disabled; private final boolean frozen; private final boolean delisted; public PoloniexCurrencyInfo( @JsonProperty("id") int id, @JsonProperty("name") String name, @JsonProperty("txFee") BigDecimal txFee, @JsonProperty("minConf") int minConf, @JsonProperty("depositAddress") String depositAddress, @JsonProperty("disabled") boolean disabled, @JsonProperty("frozen") boolean frozen, @JsonProperty("delisted") boolean delisted) { this.id = id; this.name = name; this.txFee = txFee; this.minConf = minConf; this.depositAddress = depositAddress; this.disabled = disabled; this.frozen = frozen; this.delisted = delisted; } public String getDepositAddress() { return depositAddress; } public String getName() { return name; } public int getId() { return id; } public BigDecimal getTxFee() { return txFee; } public int getMinConf() { return minConf; } public boolean isDisabled() { return disabled; } public boolean isFrozen() { return frozen; } public boolean isDelisted() { return delisted; } @Override public String toString() { return "PoloniexCurrencyInfo [id=" + id + ", name=" + name + ", txFee=" + txFee + ", minConf=" + minConf + ", depositAddress=" + depositAddress + ", disabled=" + disabled + ", frozen=" + frozen + ", delisted=" + delisted + "]"; } }
mit
syl20bnr/jenkins
core/src/test/java/hudson/model/DisplayNameListenerTest.java
3056
/* * The MIT License * * Copyright (c) 2004-2011, Yahoo!, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package hudson.model; import junit.framework.Assert; import org.junit.Test; public class DisplayNameListenerTest { @Test public void testOnCopied() throws Exception { DisplayNameListener listener = new DisplayNameListener(); StubJob src = new StubJob(); src.doSetName("src"); StubJob dest = new StubJob(); dest.doSetName("dest"); dest.setDisplayName("this should be cleared"); // make sure the displayname and the name are different at this point Assert.assertFalse(dest.getName().equals(dest.getDisplayName())); listener.onCopied(src, dest); // make sure the displayname is equals to the name as it should be null Assert.assertEquals(dest.getName(), dest.getDisplayName()); } @Test public void testOnRenamedOldNameEqualsDisplayName() throws Exception { DisplayNameListener listener = new DisplayNameListener(); final String oldName = "old job name"; final String newName = "new job name"; StubJob src = new StubJob(); src.doSetName(newName); src.setDisplayName(oldName); listener.onRenamed(src, oldName, newName); Assert.assertEquals(newName, src.getDisplayName()); } @Test public void testOnRenamedOldNameNotEqualDisplayName() throws Exception { DisplayNameListener listener = new DisplayNameListener(); final String oldName = "old job name"; final String newName = "new job name"; final String displayName = "the display name"; StubJob src = new StubJob(); src.doSetName(newName); src.setDisplayName(displayName); listener.onRenamed(src, oldName, oldName); // make sure displayname is still intact Assert.assertEquals(displayName, src.getDisplayName()); } }
mit
Mr-Eskildsen/openhab2-addons
addons/binding/org.openhab.binding.lgwebos/src/main/java/org/openhab/binding/lgwebos/internal/ChannelHandler.java
2697
/** * Copyright (c) 2010-2018 by the respective copyright holders. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html */ package org.openhab.binding.lgwebos.internal; import org.eclipse.smarthome.core.types.Command; import org.openhab.binding.lgwebos.handler.LGWebOSHandler; import com.connectsdk.device.ConnectableDevice; /** * Channel Handler mediates between connect sdk device state changes and openhab channel events. * * @author Sebastian Prehn - initial contribution */ public interface ChannelHandler { /** * This method will be called whenever a command is received for this handler. * All implementations provide custom logic here. * * @param device may be <code>null</code> in case the device is currently offline * @param channelId must not be <code>null</code> * @param handler must not be <code>null</code> * @param command must not be <code>null</code> */ void onReceiveCommand(ConnectableDevice device, String channelId, LGWebOSHandler handler, Command command); /** * Handle underlying subscription status if device changes online state, capabilities or channel gets linked or * unlinked. * * Implementation first removes any subscription via refreshSubscription and subsequently establishes any required * subscription on this device channel * and handler. * * @param device must not be <code>null</code> * @param channelId must not be <code>null</code> * @param handler must not be <code>null</code> */ void refreshSubscription(ConnectableDevice device, String channelId, LGWebOSHandler handler); /** * Removes subscriptions if there are any. * * @param device must not be <code>null</code> */ void removeAnySubscription(ConnectableDevice device); /** * Callback method whenever a device disappears. * * @param device must not be <code>null</code> * @param channelId must not be <code>null</code> * @param handler must not be <code>null</code> */ void onDeviceRemoved(ConnectableDevice device, String channelId, LGWebOSHandler handler); /** * Callback method whenever a device is discovered and ready to operate. * * @param device must not be <code>null</code> * @param channelId must not be <code>null</code> * @param handler must not be <code>null</code> */ void onDeviceReady(ConnectableDevice device, String channelId, LGWebOSHandler handler); }
epl-1.0
Mr-Eskildsen/openhab2-addons
addons/binding/org.openhab.binding.samsungtv/src/main/java/org/openhab/binding/samsungtv/internal/config/SamsungTvConfiguration.java
846
/** * Copyright (c) 2010-2018 by the respective copyright holders. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html */ package org.openhab.binding.samsungtv.internal.config; import org.openhab.binding.samsungtv.handler.SamsungTvHandler; /** * Configuration class for {@link SamsungTvHandler}. * * @author Pauli Anttila - Initial contribution */ public class SamsungTvConfiguration { public static final String HOST_NAME = "hostName"; public static final String PORT = "port"; public static final String REFRESH_INTERVAL = "refreshInterval"; public String hostName; public int port; public int refreshInterval; }
epl-1.0
md-5/jdk10
test/langtools/tools/javac/lvti/SelfRefTest.java
693
/* * @test /nodynamiccopyright/ * @bug 8177466 * @summary Add compiler support for local variable type-inference * @compile/fail/ref=SelfRefTest.out -XDrawDiagnostics SelfRefTest.java */ import java.util.function.Function; class SelfRefTest { int q() { return 42; } int m(int t) { return t; } void test(boolean cond) { var x = cond ? x : x; //error - self reference var y = (Function<Integer, Integer>)(Integer y) -> y; //error - bad shadowing var z = (Runnable)() -> { int z2 = m(z); }; //error - self reference var w = new Object() { int w = 42; void test() { int w2 = w; } }; //ok int u = u; //ok int q = q(); //ok } }
gpl-2.0
Gatopo/DemosS3SP1
Demo4S3/src/com/ug/telescopio/data/ImageAdapter.java
3195
package com.ug.telescopio.data; import android.content.Context; import android.content.res.Resources; import android.graphics.Bitmap; import android.graphics.BitmapFactory; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.BaseAdapter; import android.widget.ImageView; import android.widget.TextView; import com.ug.telescopio.R; public class ImageAdapter extends BaseAdapter { int[] arrayFlags = new int[]{R.drawable.brasil, R.drawable.mexico, R.drawable.colombia, R.drawable.argentina, R.drawable.peru, R.drawable.venezuela, R.drawable.chile, R.drawable.ecuador, R.drawable.guatemala, R.drawable.cuba}; String[] arrayCountries = new String[]{"Brasil", "MŽxico", "Colombia", "Argentina", "Perœ", "Venezuela", "Chile", "Ecuador", "Guatemala", "Cuba"}; private Resources resources; private LayoutInflater inflater; public ImageAdapter(Context context) { this.resources = context.getResources(); this.inflater = LayoutInflater.from(context); } @Override public int getCount() { return arrayFlags.length; } @Override public Object getItem(int position) { return null; } @Override public long getItemId(int position) { return 0; } @Override public View getView(int position, View convertView, ViewGroup parent) { ViewHolder holder; if (convertView == null) { convertView = inflater.inflate(R.layout.grid_image, null); holder = new ViewHolder(); holder.imgFlag = (ImageView) convertView.findViewById(R.id.imgFlag); holder.txtName = (TextView) convertView.findViewById(R.id.txtName); convertView.setTag(holder); } else { holder = (ViewHolder) convertView.getTag(); } holder.imgFlag.setImageBitmap( decodeSampledBitmapFromResource(resources, arrayFlags[position], 400, 200)); holder.txtName.setText(arrayCountries[position]); return convertView; } public static Bitmap decodeSampledBitmapFromResource(Resources res, int resId, int reqWidth, int reqHeight) { final BitmapFactory.Options options = new BitmapFactory.Options(); options.inJustDecodeBounds = true; BitmapFactory.decodeResource(res, resId, options); options.inSampleSize = calculateInSampleSize(options, reqWidth, reqHeight); options.inJustDecodeBounds = false; return BitmapFactory.decodeResource(res, resId, options); } public static int calculateInSampleSize( BitmapFactory.Options options, int reqWidth, int reqHeight) { final int height = options.outHeight; final int width = options.outWidth; int inSampleSize = 1; if (height > reqHeight || width > reqWidth) { final int heightRatio = Math.round((float) height / (float) reqHeight); final int widthRatio = Math.round((float) width / (float) reqWidth); inSampleSize = heightRatio < widthRatio ? heightRatio : widthRatio; } return inSampleSize; } static class ViewHolder { public ImageView imgFlag; public TextView txtName; } }
gpl-2.0
md-5/jdk10
test/hotspot/jtreg/vmTestbase/vm/share/gc/TriggerUnloadingHelper.java
1233
/* * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package vm.share.gc; import nsk.share.test.ExecutionController; public interface TriggerUnloadingHelper { public void triggerUnloading(ExecutionController stresser); }
gpl-2.0
mrmook/libs-for-android
src/com/google/android/accounts/CupcakeContentSyncer.java
8188
/*- * Copyright (C) 2010 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.android.accounts; import org.xmlpull.v1.XmlPullParser; import org.xmlpull.v1.XmlPullParserException; import android.app.AlarmManager; import android.app.PendingIntent; import android.content.Context; import android.content.Intent; import android.content.pm.PackageManager; import android.content.pm.ResolveInfo; import android.content.pm.ServiceInfo; import android.content.res.Resources; import android.content.res.XmlResourceParser; import android.os.Bundle; import android.os.SystemClock; import android.text.format.DateUtils; import android.util.Log; import java.io.IOException; import java.util.List; import java.util.Map; /** * A Cupcake-compatibile implementation of {@link ContentSyncer} where sync is * executed by a per-application {@link AbstractSyncService} and sync * preferences are stored in a database managed by a * {@link DatabaseAuthenticator} declared in the manifest. */ class CupcakeContentSyncer extends ContentSyncer { private static final String LOG_TAG = "CupcakeContentSyncer"; private static final String NAMESPACE = "http://schemas.android.com/apk/res/android"; // Adapted from com.android.internal.util.XmlUtils private static final void beginDocument(XmlPullParser parser, String firstElementName) throws XmlPullParserException, IOException { int type; while ((type = parser.next()) != XmlPullParser.START_TAG && type != XmlPullParser.END_DOCUMENT) { } if (type != XmlPullParser.START_TAG) { throw new XmlPullParserException("No start tag found"); } if (!parser.getName().equals(firstElementName)) { throw new XmlPullParserException("Unexpected start tag: found " + parser.getName() + ", expected " + firstElementName); } } private Context mContext; private Map<String, DatabaseAuthenticator> mAuthenticators; public CupcakeContentSyncer(Context context) { mContext = context; mAuthenticators = DatabaseAuthenticator.createDatabaseAuthenticators(context); } private DatabaseAuthenticator getAuthenticator(String type) { return mAuthenticators.get(type); } @Override public void setIsSyncable(Account account, String authority, int syncable) { if (account == null || authority == null) { throw new NullPointerException(); } // All accounts are syncable by default } @Override public int getIsSyncable(Account account, String authority) { if (account == null || authority == null) { throw new NullPointerException(); } // All accounts are syncable by default return 1; } @Override public void setSyncAutomatically(Account account, String authority, boolean sync) { if (account == null || authority == null) { throw new NullPointerException(); } DatabaseAuthenticator authenticator = getAuthenticator(account.type); if (authenticator != null) { authenticator.setSyncAutomatically(account, authority, sync); } } @Override public boolean getSyncAutomatically(Account account, String authority) { if (account == null || authority == null) { throw new NullPointerException(); } DatabaseAuthenticator authenticator = getAuthenticator(account.type); if (authenticator != null) { return authenticator.getSyncAutomatically(account, authority); } else { return false; } } private List<ResolveInfo> querySyncAdapterServices() { PackageManager pm = mContext.getPackageManager(); Intent intent = new Intent("android.content.SyncAdapter"); int flags = PackageManager.GET_META_DATA; return pm.queryIntentServices(intent, flags); } @Override public void requestSync(Account account, String authority, Bundle extras) { if (account == null || authority == null) { throw new NullPointerException(); } for (ResolveInfo service : querySyncAdapterServices()) { try { ServiceInfo info = service.serviceInfo; PackageManager pm = mContext.getPackageManager(); Resources resources = pm.getResourcesForApplication(info.packageName); Bundle metaData = info.metaData; int resId = metaData.getInt("android.content.SyncAdapter"); XmlResourceParser xml = resources.getXml(resId); try { beginDocument(xml, "sync-adapter"); String contentAuthority = xml.getAttributeValue(NAMESPACE, "contentAuthority"); String accountType = xml.getAttributeValue(NAMESPACE, "accountType"); String supportsUploadingValue = xml.getAttributeValue(NAMESPACE, "supportsUploading"); boolean supportsUploading = "true".equals(supportsUploadingValue); if (contentAuthority.equals(authority)) { if (accountType.equals(account.type)) { Intent serviceIntent = new Intent( AbstractSyncService.ACTION_REQUEST_SYNC); serviceIntent.setClassName(info.packageName, info.name); serviceIntent.putExtra(AbstractSyncService.EXTRA_ACCOUNT_NAME, account.name); serviceIntent.putExtra(AbstractSyncService.EXTRA_ACCOUNT_TYPE, account.type); serviceIntent.putExtra(AbstractSyncService.EXTRA_AUTHORITY, authority); serviceIntent.putExtra(AbstractSyncService.EXTRA_BUNDLE, extras); serviceIntent.putExtra(AbstractSyncService.EXTRA_SUPPORTS_UPLOADING, supportsUploading); mContext.startService(serviceIntent); } } } finally { xml.close(); } } catch (Exception e) { Log.e(LOG_TAG, "Could not read SyncAdapter meta-data", e); } } } private AlarmManager getAlarmManager() { return (AlarmManager) mContext.getSystemService(Context.ALARM_SERVICE); } private PendingIntent createOperation(Account account, String authority, Bundle extras) { return PeriodicSyncReceiver.createPendingIntent(mContext, account, authority, extras); } @Override public void addPeriodicSync(Account account, String authority, Bundle extras, long pollFrequency) { long pollFrequencyMsec = pollFrequency * DateUtils.SECOND_IN_MILLIS; AlarmManager manager = getAlarmManager(); int type = AlarmManager.ELAPSED_REALTIME_WAKEUP; long triggerAtTime = SystemClock.elapsedRealtime() + pollFrequencyMsec; long interval = pollFrequencyMsec; PendingIntent operation = createOperation(account, authority, extras); manager.setInexactRepeating(type, triggerAtTime, interval, operation); } @Override public void removePeriodicSync(Account account, String authority, Bundle extras) { AlarmManager manager = getAlarmManager(); PendingIntent operation = createOperation(account, authority, extras); manager.cancel(operation); } }
apache-2.0
akuznetsov-gridgain/ignite
modules/visor-plugins/src/main/java/org/apache/ignite/visor/plugin/VisorPlugin.java
1673
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.visor.plugin; import ro.fortsoft.pf4j.*; /** * Base class for Visor plugins. */ public abstract class VisorPlugin extends Plugin { /** * Constructor to be used by plugin manager for plugin instantiation. * Your plugins have to provide constructor with this exact signature to * be successfully loaded by manager. * * @param wrapper A wrapper over plugin instance. */ protected VisorPlugin(PluginWrapper wrapper) { super(wrapper); } /** * @return Plugin name. */ public abstract String name(); /** {@inheritDoc} */ @Override public void start() throws PluginException { log.info("Plugin Started: " + name()); } /** {@inheritDoc} */ @Override public void stop() throws PluginException { log.info("Plugin stopped: " + name()); } }
apache-2.0
myroch/drill
exec/java-exec/src/test/java/org/apache/drill/exec/physical/unit/BasicPhysicalOpUnitTest.java
13486
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p/> * http://www.apache.org/licenses/LICENSE-2.0 * <p/> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.drill.exec.physical.unit; import com.google.common.collect.Lists; import org.apache.calcite.rel.RelFieldCollation; import org.apache.calcite.rel.core.JoinRelType; import org.apache.drill.exec.physical.MinorFragmentEndpoint; import org.apache.drill.exec.physical.base.GroupScan; import org.apache.drill.exec.physical.base.PhysicalOperator; import org.apache.drill.exec.physical.base.SubScan; import org.apache.drill.exec.physical.config.ComplexToJson; import org.apache.drill.exec.physical.config.ExternalSort; import org.apache.drill.exec.physical.config.Filter; import org.apache.drill.exec.physical.config.HashAggregate; import org.apache.drill.exec.physical.config.HashJoinPOP; import org.apache.drill.exec.physical.config.MergeJoinPOP; import org.apache.drill.exec.physical.config.MergingReceiverPOP; import org.apache.drill.exec.physical.config.Project; import org.apache.drill.exec.physical.config.StreamingAggregate; import org.apache.drill.exec.physical.config.TopN; import org.junit.Ignore; import org.junit.Test; import java.lang.reflect.Constructor; import java.util.IdentityHashMap; import java.util.List; import java.util.Set; import static org.apache.drill.TestBuilder.mapOf; public class BasicPhysicalOpUnitTest extends PhysicalOpUnitTestBase { @Test public void testSimpleProject() { Project projectConf = new Project(parseExprs("x+5", "x"), null); List<String> jsonBatches = Lists.newArrayList( "[{\"x\": 5 },{\"x\": 10 }]", "[{\"x\": 20 },{\"x\": 30 },{\"x\": 40 }]"); opTestBuilder() .physicalOperator(projectConf) .inputDataStreamJson(jsonBatches) .baselineColumns("x") .baselineValues(10l) .baselineValues(15l) .baselineValues(25l) .baselineValues(35l) .baselineValues(45l) .go(); } @Test public void testProjectComplexOutput() { Project projectConf = new Project(parseExprs("convert_from(json_col, 'JSON')", "complex_col"), null); List<String> jsonBatches = Lists.newArrayList( "[{\"json_col\": \"{ \\\"a\\\" : 1 }\"}]", "[{\"json_col\": \"{ \\\"a\\\" : 5 }\"}]"); opTestBuilder() .physicalOperator(projectConf) .inputDataStreamJson(jsonBatches) .baselineColumns("complex_col") .baselineValues(mapOf("a", 1l)) .baselineValues(mapOf("a", 5l)) .go(); } @Test public void testSimpleHashJoin() { HashJoinPOP joinConf = new HashJoinPOP(null, null, Lists.newArrayList(joinCond("x", "EQUALS", "x1")), JoinRelType.LEFT); // TODO - figure out where to add validation, column names must be unique, even between the two batches, // for all columns, not just the one in the join condition // TODO - if any are common between the two, it is failing in the generated setup method in HashJoinProbeGen List<String> leftJsonBatches = Lists.newArrayList( "[{\"x\": 5, \"a\" : \"a string\"}]", "[{\"x\": 5, \"a\" : \"a different string\"},{\"x\": 5, \"a\" : \"meh\"}]"); List<String> rightJsonBatches = Lists.newArrayList( "[{\"x1\": 5, \"a2\" : \"asdf\"}]", "[{\"x1\": 6, \"a2\" : \"qwerty\"},{\"x1\": 5, \"a2\" : \"12345\"}]"); opTestBuilder() .physicalOperator(joinConf) .inputDataStreamsJson(Lists.newArrayList(leftJsonBatches, rightJsonBatches)) .baselineColumns("x", "a", "a2", "x1") .baselineValues(5l, "a string", "asdf", 5l) .baselineValues(5l, "a string", "12345", 5l) .baselineValues(5l, "a different string", "asdf", 5l) .baselineValues(5l, "a different string", "12345", 5l) .baselineValues(5l, "meh", "asdf", 5l) .baselineValues(5l, "meh", "12345", 5l) .go(); } @Test public void testSimpleMergeJoin() { MergeJoinPOP joinConf = new MergeJoinPOP(null, null, Lists.newArrayList(joinCond("x", "EQUALS", "x1")), JoinRelType.LEFT); // TODO - figure out where to add validation, column names must be unique, even between the two batches, // for all columns, not just the one in the join condition List<String> leftJsonBatches = Lists.newArrayList( "[{\"x\": 5, \"a\" : \"a string\"}]", "[{\"x\": 5, \"a\" : \"a different string\"},{\"x\": 5, \"a\" : \"meh\"}]"); List<String> rightJsonBatches = Lists.newArrayList( "[{\"x1\": 5, \"a2\" : \"asdf\"}]", "[{\"x1\": 5, \"a2\" : \"12345\"}, {\"x1\": 6, \"a2\" : \"qwerty\"}]"); opTestBuilder() .physicalOperator(joinConf) .inputDataStreamsJson(Lists.newArrayList(leftJsonBatches, rightJsonBatches)) .baselineColumns("x", "a", "a2", "x1") .baselineValues(5l, "a string", "asdf", 5l) .baselineValues(5l, "a string", "12345", 5l) .baselineValues(5l, "a different string", "asdf", 5l) .baselineValues(5l, "a different string", "12345", 5l) .baselineValues(5l, "meh", "asdf", 5l) .baselineValues(5l, "meh", "12345", 5l) .go(); } @Test public void testSimpleHashAgg() { HashAggregate aggConf = new HashAggregate(null, parseExprs("a", "a"), parseExprs("sum(b)", "b_sum"), 1.0f); List<String> inputJsonBatches = Lists.newArrayList( "[{\"a\": 5, \"b\" : 1 }]", "[{\"a\": 5, \"b\" : 5},{\"a\": 3, \"b\" : 8}]"); opTestBuilder() .physicalOperator(aggConf) .inputDataStreamJson(inputJsonBatches) .baselineColumns("b_sum", "a") .baselineValues(6l, 5l) .baselineValues(8l, 3l) .go(); } @Test public void testSimpleStreamAgg() { StreamingAggregate aggConf = new StreamingAggregate(null, parseExprs("a", "a"), parseExprs("sum(b)", "b_sum"), 1.0f); List<String> inputJsonBatches = Lists.newArrayList( "[{\"a\": 5, \"b\" : 1 }]", "[{\"a\": 5, \"b\" : 5},{\"a\": 3, \"b\" : 8}]"); opTestBuilder() .physicalOperator(aggConf) .inputDataStreamJson(inputJsonBatches) .baselineColumns("b_sum", "a") .baselineValues(6l, 5l) .baselineValues(8l, 3l) .go(); } @Test public void testComplexToJson() { ComplexToJson complexToJson = new ComplexToJson(null); List<String> inputJsonBatches = Lists.newArrayList( "[{\"a\": {\"b\" : 1 }}]", "[{\"a\": {\"b\" : 5}},{\"a\": {\"b\" : 8}}]"); opTestBuilder() .physicalOperator(complexToJson) .inputDataStreamJson(inputJsonBatches) .baselineColumns("a") .baselineValues("{\n \"b\" : 1\n}") .baselineValues("{\n \"b\" : 5\n}") .baselineValues("{\n \"b\" : 8\n}") .go(); } @Test public void testFilter() { Filter filterConf = new Filter(null, parseExpr("a=5"), 1.0f); List<String> inputJsonBatches = Lists.newArrayList( "[{\"a\": 5, \"b\" : 1 }]", "[{\"a\": 5, \"b\" : 5},{\"a\": 3, \"b\" : 8}]", "[{\"a\": 40, \"b\" : 3},{\"a\": 13, \"b\" : 100}]"); opTestBuilder() .physicalOperator(filterConf) .inputDataStreamJson(inputJsonBatches) .baselineColumns("a", "b") .baselineValues(5l, 1l) .baselineValues(5l, 5l) .go(); } @Test public void testExternalSort() { ExternalSort sortConf = new ExternalSort(null, Lists.newArrayList(ordering("b", RelFieldCollation.Direction.ASCENDING, RelFieldCollation.NullDirection.FIRST)), false); List<String> inputJsonBatches = Lists.newArrayList( "[{\"a\": 5, \"b\" : 1 }]", "[{\"a\": 5, \"b\" : 5},{\"a\": 3, \"b\" : 8}]", "[{\"a\": 40, \"b\" : 3},{\"a\": 13, \"b\" : 100}]"); opTestBuilder() .physicalOperator(sortConf) .inputDataStreamJson(inputJsonBatches) .baselineColumns("a", "b") .baselineValues(5l, 1l) .baselineValues(40l, 3l) .baselineValues(5l, 5l) .baselineValues(3l, 8l) .baselineValues(13l, 100l) .go(); } private void externalSortLowMemoryHelper(int batchSize, int numberOfBatches, long initReservation, long maxAllocation) { ExternalSort sortConf = new ExternalSort(null, Lists.newArrayList(ordering("b", RelFieldCollation.Direction.ASCENDING, RelFieldCollation.NullDirection.FIRST)), false); List<String> inputJsonBatches = Lists.newArrayList(); StringBuilder batchString = new StringBuilder(); for (int j = 0; j < numberOfBatches; j++) { batchString.append("["); for (int i = 0; i < batchSize; i++) { batchString.append("{\"a\": 5, \"b\" : 5},{\"a\": 3, \"b\" : 8},"); } batchString.append("{\"a\": 5, \"b\" : 1 }"); batchString.append("]"); inputJsonBatches.add(batchString.toString()); } OperatorTestBuilder opTestBuilder = opTestBuilder() .initReservation(initReservation) .maxAllocation(maxAllocation) .physicalOperator(sortConf) .inputDataStreamJson(inputJsonBatches) .baselineColumns("a", "b"); for (int i = 0; i < numberOfBatches; i++) { opTestBuilder.baselineValues(5l, 1l); } for (int i = 0; i < batchSize * numberOfBatches; i++) { opTestBuilder.baselineValues(5l, 5l); } for (int i = 0; i < batchSize * numberOfBatches; i++) { opTestBuilder.baselineValues(3l, 8l); } opTestBuilder.go(); } // TODO - Failing with - org.apache.drill.exec.exception.OutOfMemoryException: Unable to allocate buffer of size 262144 (rounded from 147456) due to memory limit. Current allocation: 16422656 // look in ExternalSortBatch for this JIRA number, changing this percentage of the allocator limit that is // the threshold for spilling (it worked with 0.65 for me) "fixed" the problem but hurt perf, will want // to find a better solutions to this problem. When it is fixed this threshold will likely become unnecessary @Test @Ignore("DRILL-4438") public void testExternalSortLowMemory1() { externalSortLowMemoryHelper(4960, 100, 10000000, 16500000); } // TODO- believe this was failing in the scan not the sort, may not require a fix @Test @Ignore("DRILL-4438") public void testExternalSortLowMemory2() { externalSortLowMemoryHelper(4960, 100, 10000000, 15000000); } // TODO - believe this was failing in the scan not the sort, may not require a fix @Test @Ignore("DRILL-4438") public void testExternalSortLowMemory3() { externalSortLowMemoryHelper(40960, 10, 10000000, 10000000); } // TODO - Failing with - org.apache.drill.exec.exception.OutOfMemoryException: Unable to allocate sv2 buffer after repeated attempts // see comment above testExternalSortLowMemory1 about TODO left in ExternalSortBatch @Test @Ignore("DRILL-4438") public void testExternalSortLowMemory4() { externalSortLowMemoryHelper(15960, 30, 10000000, 14500000); } @Test public void testTopN() { TopN sortConf = new TopN(null, Lists.newArrayList(ordering("b", RelFieldCollation.Direction.ASCENDING, RelFieldCollation.NullDirection.FIRST)), false, 3); List<String> inputJsonBatches = Lists.newArrayList( "[{\"a\": 5, \"b\" : 1 }]", "[{\"a\": 5, \"b\" : 5},{\"a\": 3, \"b\" : 8}]", "[{\"a\": 40, \"b\" : 3},{\"a\": 13, \"b\" : 100}]"); opTestBuilder() .physicalOperator(sortConf) .inputDataStreamJson(inputJsonBatches) .baselineColumns("a", "b") .baselineValues(5l, 1l) .baselineValues(40l, 3l) .baselineValues(5l, 5l) .go(); } // TODO(DRILL-4439) - doesn't expect incoming batches, uses instead RawFragmentBatch // need to figure out how to mock these @Ignore @Test public void testSimpleMergingReceiver() { MergingReceiverPOP mergeConf = new MergingReceiverPOP(-1, Lists.<MinorFragmentEndpoint>newArrayList(), Lists.newArrayList(ordering("x", RelFieldCollation.Direction.ASCENDING, RelFieldCollation.NullDirection.FIRST)), false); List<String> leftJsonBatches = Lists.newArrayList( "[{\"x\": 5, \"a\" : \"a string\"}]", "[{\"x\": 5, \"a\" : \"a different string\"},{\"x\": 5, \"a\" : \"meh\"}]"); List<String> rightJsonBatches = Lists.newArrayList( "[{\"x\": 5, \"a\" : \"asdf\"}]", "[{\"x\": 5, \"a\" : \"12345\"}, {\"x\": 6, \"a\" : \"qwerty\"}]"); opTestBuilder() .physicalOperator(mergeConf) .inputDataStreamsJson(Lists.newArrayList(leftJsonBatches, rightJsonBatches)) .baselineColumns("x", "a") .baselineValues(5l, "a string") .baselineValues(5l, "a different string") .baselineValues(5l, "meh") .baselineValues(5l, "asdf") .baselineValues(5l, "12345") .baselineValues(6l, "qwerty") .go(); } }
apache-2.0
zuoyebushiwo/apache-mina-2.0.9
src/mina-core/src/test/java/org/apache/mina/core/DefaultIoFilterChainBuilderTest.java
4834
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * */ package org.apache.mina.core; import org.apache.mina.core.filterchain.DefaultIoFilterChainBuilder; import org.apache.mina.core.filterchain.IoFilter; import org.apache.mina.core.filterchain.IoFilterAdapter; import org.apache.mina.core.filterchain.IoFilterChain.Entry; import org.apache.mina.filter.util.NoopFilter; import org.junit.After; import org.junit.Before; import org.junit.Test; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertSame; /** * Tests {@link DefaultIoFilterChainBuilder}. * * @author <a href="http://mina.apache.org">Apache MINA Project</a> */ public class DefaultIoFilterChainBuilderTest { @Before public void setUp() throws Exception { // Do nothing } @After public void tearDown() throws Exception { // Do nothing } @Test public void testAdd() throws Exception { DefaultIoFilterChainBuilder builder = new DefaultIoFilterChainBuilder(); builder.addFirst("A", new NoopFilter()); builder.addLast("B", new NoopFilter()); builder.addFirst("C", new NoopFilter()); builder.addLast("D", new NoopFilter()); builder.addBefore("B", "E", new NoopFilter()); builder.addBefore("C", "F", new NoopFilter()); builder.addAfter("B", "G", new NoopFilter()); builder.addAfter("D", "H", new NoopFilter()); String actual = ""; for (Entry e : builder.getAll()) { actual += e.getName(); } assertEquals("FCAEBGDH", actual); } @Test public void testGet() throws Exception { DefaultIoFilterChainBuilder builder = new DefaultIoFilterChainBuilder(); IoFilter filterA = new NoopFilter(); IoFilter filterB = new NoopFilter(); IoFilter filterC = new NoopFilter(); IoFilter filterD = new NoopFilter(); builder.addFirst("A", filterA); builder.addLast("B", filterB); builder.addBefore("B", "C", filterC); builder.addAfter("A", "D", filterD); assertSame(filterA, builder.get("A")); assertSame(filterB, builder.get("B")); assertSame(filterC, builder.get("C")); assertSame(filterD, builder.get("D")); } @Test public void testRemove() throws Exception { DefaultIoFilterChainBuilder builder = new DefaultIoFilterChainBuilder(); builder.addLast("A", new NoopFilter()); builder.addLast("B", new NoopFilter()); builder.addLast("C", new NoopFilter()); builder.addLast("D", new NoopFilter()); builder.addLast("E", new NoopFilter()); builder.remove("A"); builder.remove("E"); builder.remove("C"); builder.remove("B"); builder.remove("D"); assertEquals(0, builder.getAll().size()); } @Test public void testClear() throws Exception { DefaultIoFilterChainBuilder builder = new DefaultIoFilterChainBuilder(); builder.addLast("A", new NoopFilter()); builder.addLast("B", new NoopFilter()); builder.addLast("C", new NoopFilter()); builder.addLast("D", new NoopFilter()); builder.addLast("E", new NoopFilter()); builder.clear(); assertEquals(0, builder.getAll().size()); } @Test public void testToString() { DefaultIoFilterChainBuilder builder = new DefaultIoFilterChainBuilder(); // When the chain is empty assertEquals("{ empty }", builder.toString()); // When there's one filter builder.addLast("A", new IoFilterAdapter() { @Override public String toString() { return "B"; } }); assertEquals("{ (A:B) }", builder.toString()); // When there are two builder.addLast("C", new IoFilterAdapter() { @Override public String toString() { return "D"; } }); assertEquals("{ (A:B), (C:D) }", builder.toString()); } }
apache-2.0
flowable/flowable-engine
modules/flowable-dmn-model/src/main/java/org/flowable/dmn/model/InputClause.java
1360
/* Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.flowable.dmn.model; /** * @author Yvo Swillens */ public class InputClause extends DmnElement { protected LiteralExpression inputExpression; protected UnaryTests inputValues; protected int inputNumber; public LiteralExpression getInputExpression() { return inputExpression; } public void setInputExpression(LiteralExpression inputExpression) { this.inputExpression = inputExpression; } public UnaryTests getInputValues() { return inputValues; } public void setInputValues(UnaryTests inputValues) { this.inputValues = inputValues; } public int getInputNumber() { return inputNumber; } public void setInputNumber(int inputNumber) { this.inputNumber = inputNumber; } }
apache-2.0
ameybarve15/incubator-geode
gemfire-core/src/main/java/com/gemstone/gemfire/cache/CustomExpiry.java
1316
/*========================================================================= * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved. * This product is protected by U.S. and international copyright * and intellectual property laws. Pivotal products are covered by * more patents listed at http://www.pivotal.io/patents. *========================================================================= */ package com.gemstone.gemfire.cache; /** * This is the contract that a <code>custom-expiry</code> element must honor. * It determines the expiration characteristics for a specific entry in a region. * <p>Note that if you wish to refer to an implementation of this interface in XML, * the implementation must also implement the Declarable interface. * * @author jpenney * */ public interface CustomExpiry<K,V> extends CacheCallback { /** * Calculate the expiration for a given entry. * Returning null indicates that the * default for the region should be used. * <p> * The entry parameter should not be used after this method invocation completes. * @param entry the entry to calculate the expiration for * @return the expiration to be used, null if the region's defaults should be * used. */ public ExpirationAttributes getExpiry(Region.Entry<K,V> entry); }
apache-2.0
OnePaaS/droolsjbpm-integration
droolsjbpm-integration-examples/src/main/java/org/drools/examples/broker/misc/BrokerUtils.java
1233
/* * Copyright 2010 JBoss Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.drools.examples.broker.misc; import java.text.DecimalFormat; import java.util.Random; import org.drools.examples.broker.model.Action; public class BrokerUtils { private static final Random rand = new Random(System.currentTimeMillis()); public static String percent( double number ) { return new DecimalFormat( "0.00%" ).format( number ); } public static Action selectAction() { int action = rand.nextInt( 3 ); switch (action) { case 1 : return Action.BUY; case 2 : return Action.SELL; default : return Action.NOACTION; } } }
apache-2.0
ern/elasticsearch
x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/RunAsIntegTests.java
5315
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ package org.elasticsearch.xpack.security.authc; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.ResponseException; import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.test.SecuritySettingsSource; import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.junit.BeforeClass; import static org.elasticsearch.test.SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING; import static org.hamcrest.Matchers.is; public class RunAsIntegTests extends SecurityIntegTestCase { private static final String RUN_AS_USER = "run_as_user"; private static final String CLIENT_USER = "transport_user"; private static final String ROLES = "run_as_role:\n" + " run_as: [ '" + SecuritySettingsSource.TEST_USER_NAME + "', 'idontexist' ]\n"; // indicates whether the RUN_AS_USER that is being authenticated is also a superuser private static boolean runAsHasSuperUserRole; @BeforeClass public static void configureRunAsHasSuperUserRole() { runAsHasSuperUserRole = randomBoolean(); } @Override protected boolean addMockHttpTransport() { return false; // enable http } @Override public String configRoles() { return ROLES + super.configRoles(); } @Override public String configUsers() { return super.configUsers() + RUN_AS_USER + ":" + SecuritySettingsSource.TEST_PASSWORD_HASHED + "\n" + CLIENT_USER + ":" + SecuritySettingsSource.TEST_PASSWORD_HASHED + "\n"; } @Override public String configUsersRoles() { String roles = super.configUsersRoles() + "run_as_role:" + RUN_AS_USER + "\n" + "transport_client:" + CLIENT_USER; if (runAsHasSuperUserRole) { roles = roles + "\n" + "superuser:" + RUN_AS_USER; } return roles; } @Override protected boolean transportSSLEnabled() { return false; } public void testUserImpersonationUsingHttp() throws Exception { // use the http user and try to run as try { Request request = new Request("GET", "/_nodes"); RequestOptions.Builder options = request.getOptions().toBuilder(); options.addHeader("Authorization", UsernamePasswordToken.basicAuthHeaderValue(CLIENT_USER, TEST_PASSWORD_SECURE_STRING)); options.addHeader(AuthenticationServiceField.RUN_AS_USER_HEADER, SecuritySettingsSource.TEST_USER_NAME); request.setOptions(options); getRestClient().performRequest(request); fail("request should have failed"); } catch(ResponseException e) { assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); } if (runAsHasSuperUserRole == false) { try { //the run as user shouldn't have access to the nodes api Request request = new Request("GET", "/_nodes"); RequestOptions.Builder options = request.getOptions().toBuilder(); options.addHeader("Authorization", UsernamePasswordToken.basicAuthHeaderValue(RUN_AS_USER, TEST_PASSWORD_SECURE_STRING)); request.setOptions(options); getRestClient().performRequest(request); fail("request should have failed"); } catch (ResponseException e) { assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); } } // but when running as a different user it should work getRestClient().performRequest(requestForUserRunAsUser(SecuritySettingsSource.TEST_USER_NAME)); } public void testEmptyHeaderUsingHttp() throws Exception { try { getRestClient().performRequest(requestForUserRunAsUser("")); fail("request should have failed"); } catch(ResponseException e) { assertThat(e.getResponse().getStatusLine().getStatusCode(), is(401)); } } public void testNonExistentRunAsUserUsingHttp() throws Exception { try { getRestClient().performRequest(requestForUserRunAsUser("idontexist")); fail("request should have failed"); } catch (ResponseException e) { assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); } } private static Request requestForUserRunAsUser(String user) { Request request = new Request("GET", "/_nodes"); RequestOptions.Builder options = request.getOptions().toBuilder(); options.addHeader("Authorization", UsernamePasswordToken.basicAuthHeaderValue(RUN_AS_USER, TEST_PASSWORD_SECURE_STRING)); options.addHeader(AuthenticationServiceField.RUN_AS_USER_HEADER, user); request.setOptions(options); return request; } }
apache-2.0
renatoathaydes/checker-framework
checker/jdk/nullness/src/java/util/regex/Matcher.java
3047
package java.util.regex; import org.checkerframework.dataflow.qual.Pure; import org.checkerframework.dataflow.qual.SideEffectFree; import org.checkerframework.checker.nullness.qual.Nullable; public final class Matcher implements MatchResult { protected Matcher() {} public Pattern pattern() { throw new RuntimeException("skeleton method"); } public MatchResult toMatchResult() { throw new RuntimeException("skeleton method"); } public Matcher usePattern(Pattern a1) { throw new RuntimeException("skeleton method"); } public Matcher reset() { throw new RuntimeException("skeleton method"); } public Matcher reset(CharSequence a1) { throw new RuntimeException("skeleton method"); } @Pure public int start() { throw new RuntimeException("skeleton method"); } @Pure public int start(int a1) { throw new RuntimeException("skeleton method"); } @Pure public int end() { throw new RuntimeException("skeleton method"); } @Pure public int end(int a1) { throw new RuntimeException("skeleton method"); } @SideEffectFree public String group() { throw new RuntimeException("skeleton method"); } @SideEffectFree public @Nullable String group(int a1) { throw new RuntimeException("skeleton method"); } @Pure public int groupCount() { throw new RuntimeException("skeleton method"); } public boolean matches() { throw new RuntimeException("skeleton method"); } public boolean find() { throw new RuntimeException("skeleton method"); } public boolean find(int a1) { throw new RuntimeException("skeleton method"); } public boolean lookingAt() { throw new RuntimeException("skeleton method"); } public static String quoteReplacement(String a1) { throw new RuntimeException("skeleton method"); } public Matcher appendReplacement(StringBuffer a1, String a2) { throw new RuntimeException("skeleton method"); } public StringBuffer appendTail(StringBuffer a1) { throw new RuntimeException("skeleton method"); } public String replaceAll(String a1) { throw new RuntimeException("skeleton method"); } public String replaceFirst(String a1) { throw new RuntimeException("skeleton method"); } public Matcher region(int a1, int a2) { throw new RuntimeException("skeleton method"); } @Pure public int regionStart() { throw new RuntimeException("skeleton method"); } @Pure public int regionEnd() { throw new RuntimeException("skeleton method"); } @Pure public boolean hasTransparentBounds() { throw new RuntimeException("skeleton method"); } public Matcher useTransparentBounds(boolean a1) { throw new RuntimeException("skeleton method"); } @Pure public boolean hasAnchoringBounds() { throw new RuntimeException("skeleton method"); } public Matcher useAnchoringBounds(boolean a1) { throw new RuntimeException("skeleton method"); } @SideEffectFree public String toString() { throw new RuntimeException("skeleton method"); } @Pure public boolean hitEnd() { throw new RuntimeException("skeleton method"); } @Pure public boolean requireEnd() { throw new RuntimeException("skeleton method"); } }
gpl-2.0
2q1w2007/BlackLight
blacklight-base/src/main/java/info/papdt/blacklight/ui/main/MultiUserFragment.java
3343
/* * Copyright (C) 2015 Peter Cai * * This file is part of BlackLight * * BlackLight is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * BlackLight is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with BlackLight. If not, see <http://www.gnu.org/licenses/>. */ package info.papdt.blacklight.ui.main; import android.app.Activity; import android.app.AlarmManager; import android.app.Fragment; import android.app.PendingIntent; import android.app.ProgressDialog; import android.content.Context; import android.content.Intent; import android.os.Bundle; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.AdapterView; import android.widget.ArrayAdapter; import android.widget.ListView; import android.widget.Toast; import info.papdt.blacklight.R; import info.papdt.blacklight.cache.login.LoginApiCache; import info.papdt.blacklight.support.AsyncTask; import info.papdt.blacklight.support.Utility; public class MultiUserFragment extends Fragment implements AdapterView.OnItemClickListener { private ListView mList; private ArrayAdapter<String> mAdapter; private LoginApiCache mCache; @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { View v = inflater.inflate(R.layout.main_drawer_group, container, false); mList = Utility.findViewById(v, R.id.drawer_group_list); if (mAdapter != null) { mList.setAdapter(mAdapter); } mList.setOnItemClickListener(this); return v; } @Override public void onAttach(Activity activity) { super.onAttach(activity); mCache = new LoginApiCache(activity); reload(activity); } @Override public void onItemClick(AdapterView<?> parent, View view, int pos, long id) { new SwitchTask().execute(pos); } public void reload() { mCache.reloadMultiUser(); reload(getActivity()); } private void reload(Activity activity) { mAdapter = new ArrayAdapter<String>(activity, R.layout.main_drawer_group_item, R.id.group_title, mCache.getUserNames()); if (mList != null) mList.setAdapter(mAdapter); } private class SwitchTask extends AsyncTask<Integer, Void, Void> { ProgressDialog prog; @Override protected void onPreExecute() { mMuCallBack.closeDrawer(); prog = new ProgressDialog(getActivity()); prog.setMessage(getResources().getString(R.string.plz_wait)); prog.setCancelable(false); prog.show(); } @Override protected Void doInBackground(Integer... params) { mCache.switchToUser(params[0]); return null; } @Override protected void onPostExecute(Void result) { super.onPostExecute(result); prog.dismiss(); mMuCallBack.syncAccount(); } } interface MuCallBack{ void syncAccount(); void closeDrawer(); } static MuCallBack mMuCallBack; static void setMuCallBack(MuCallBack muCallBack){ mMuCallBack=muCallBack; } }
gpl-3.0
feilaoda/runnerup
app/src/org/runnerup/tracker/filter/PersistentGpsLoggerListener.java
3844
/* * Copyright (C) 2012 jonas.oreland@gmail.com * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package org.runnerup.tracker.filter; import android.annotation.TargetApi; import android.content.ContentValues; import android.database.sqlite.SQLiteDatabase; import android.location.Location; import android.os.Build; import android.os.Bundle; import org.runnerup.common.util.Constants; import org.runnerup.tracker.LocationListenerBase; @TargetApi(Build.VERSION_CODES.FROYO) public class PersistentGpsLoggerListener extends LocationListenerBase implements Constants { private final java.lang.Object mLock; private SQLiteDatabase mDB; private java.lang.String mTable; private ContentValues mKey; public PersistentGpsLoggerListener(SQLiteDatabase _db, String _table, ContentValues _key) { this.mLock = new java.lang.Object(); this.mDB = _db; this.mTable = _table; setKey(_key); } public SQLiteDatabase getDB() { return mDB; } public void setDB(SQLiteDatabase _db) { mDB = _db; } public String getTable() { return mTable; } public void setTable(String _tab) { mTable = _tab; } public ContentValues getKey() { synchronized (mLock) { if (mKey == null) return null; return new ContentValues(mKey); } } public void setKey(ContentValues key) { synchronized (mLock) { if (key == null) mKey = null; else mKey = new ContentValues(key); } } @Override public void onLocationChanged(Location arg0) { super.onLocationChanged(arg0); onLocationChanged(arg0, null); } public void onLocationChanged(Location arg0, Integer hrValue) { ContentValues values; synchronized (mLock) { if (mKey == null) values = new ContentValues(); else values = new ContentValues(mKey); } values.put(DB.LOCATION.TIME, arg0.getTime()); values.put(DB.LOCATION.LATITUDE, (float) arg0.getLatitude()); values.put(DB.LOCATION.LONGITUDE, (float) arg0.getLongitude()); if (arg0.hasAccuracy()) { values.put(DB.LOCATION.ACCURANCY, arg0.getAccuracy()); } if (arg0.hasSpeed()) { values.put(DB.LOCATION.SPEED, arg0.getSpeed()); } if (arg0.hasAltitude()) { values.put(DB.LOCATION.ALTITUDE, (float) arg0.getAltitude()); } if (arg0.hasBearing()) { values.put(DB.LOCATION.BEARING, arg0.getBearing()); } if (hrValue != null) { values.put(DB.LOCATION.HR, hrValue); } if (mDB != null) { mDB.insert(mTable, null, values); } } @Override public void onProviderDisabled(String arg0) { super.onProviderDisabled(arg0); } @Override public void onProviderEnabled(String arg0) { super.onProviderEnabled(arg0); } @Override public void onStatusChanged(String arg0, int arg1, Bundle arg2) { super.onStatusChanged(arg0, arg1, arg2); } }
gpl-3.0
r3gis3r/CSipSimple
src/com/csipsimple/pjsip/PjSipService.java
97755
/** * Copyright (C) 2010-2015 Regis Montoya (aka r3gis - www.r3gis.fr) * Copyright (C) 2012-2013 Dennis Guse (http://dennisguse.de) * Copyright (C) 2015 Antonio Eugenio Burriel * This file is part of CSipSimple. * * CSipSimple is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * If you own a pjsip commercial license you can also redistribute it * and/or modify it under the terms of the GNU Lesser General Public License * as an android library. * * CSipSimple is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CSipSimple. If not, see <http://www.gnu.org/licenses/>. */ package com.csipsimple.pjsip; import android.content.ContentUris; import android.content.ContentValues; import android.content.Context; import android.content.Intent; import android.database.Cursor; import android.media.AudioManager; import android.net.ConnectivityManager; import android.net.NetworkInfo; import android.os.Bundle; import android.telephony.TelephonyManager; import android.text.TextUtils; import android.util.SparseArray; import android.view.KeyCharacterMap; import android.view.KeyEvent; import android.view.SurfaceView; import com.csipsimple.R; import com.csipsimple.api.SipCallSession; import com.csipsimple.api.SipConfigManager; import com.csipsimple.api.SipManager; import com.csipsimple.api.SipManager.PresenceStatus; import com.csipsimple.api.SipProfile; import com.csipsimple.api.SipProfileState; import com.csipsimple.api.SipUri.ParsedSipContactInfos; import com.csipsimple.pjsip.earlylock.EarlyLockModule; import com.csipsimple.pjsip.player.IPlayerHandler; import com.csipsimple.pjsip.player.impl.SimpleWavPlayerHandler; import com.csipsimple.pjsip.recorder.IRecorderHandler; import com.csipsimple.pjsip.recorder.impl.SimpleWavRecorderHandler; import com.csipsimple.pjsip.reghandler.RegHandlerModule; import com.csipsimple.pjsip.sipclf.SipClfModule; import com.csipsimple.service.MediaManager; import com.csipsimple.service.SipService; import com.csipsimple.service.SipService.SameThreadException; import com.csipsimple.service.SipService.SipRunnable; import com.csipsimple.service.SipService.ToCall; import com.csipsimple.utils.ExtraPlugins; import com.csipsimple.utils.ExtraPlugins.DynCodecInfos; import com.csipsimple.utils.Log; import com.csipsimple.utils.PreferencesProviderWrapper; import com.csipsimple.utils.PreferencesWrapper; import com.csipsimple.utils.TimerWrapper; import com.csipsimple.utils.video.VideoUtilsWrapper; import com.csipsimple.utils.video.VideoUtilsWrapper.VideoCaptureCapability; import com.csipsimple.utils.video.VideoUtilsWrapper.VideoCaptureDeviceInfo; import com.csipsimple.wizards.WizardUtils; import org.pjsip.pjsua.SWIGTYPE_p_pj_stun_auth_cred; import org.pjsip.pjsua.csipsimple_config; import org.pjsip.pjsua.dynamic_factory; import org.pjsip.pjsua.pj_ice_sess_options; import org.pjsip.pjsua.pj_pool_t; import org.pjsip.pjsua.pj_qos_params; import org.pjsip.pjsua.pj_str_t; import org.pjsip.pjsua.pj_turn_tp_type; import org.pjsip.pjsua.pjmedia_srtp_use; import org.pjsip.pjsua.pjsip_ssl_method; import org.pjsip.pjsua.pjsip_timer_setting; import org.pjsip.pjsua.pjsip_tls_setting; import org.pjsip.pjsua.pjsip_transport_type_e; import org.pjsip.pjsua.pjsua; import org.pjsip.pjsua.pjsuaConstants; import org.pjsip.pjsua.pjsua_acc_info; import org.pjsip.pjsua.pjsua_buddy_config; import org.pjsip.pjsua.pjsua_call_flag; import org.pjsip.pjsua.pjsua_call_setting; import org.pjsip.pjsua.pjsua_call_vid_strm_op; import org.pjsip.pjsua.pjsua_config; import org.pjsip.pjsua.pjsua_logging_config; import org.pjsip.pjsua.pjsua_media_config; import org.pjsip.pjsua.pjsua_msg_data; import org.pjsip.pjsua.pjsua_transport_config; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Timer; import java.util.TimerTask; public class PjSipService { private static final String THIS_FILE = "PjService"; private static int DTMF_TONE_PAUSE_LENGTH = 300; private static int DTMF_TONE_WAIT_LENGTH = 2000; public SipService service; private boolean created = false; private boolean hasSipStack = false; private boolean sipStackIsCorrupted = false; private Integer localUdpAccPjId, localUdp6AccPjId, localTcpAccPjId, localTcp6AccPjId, localTlsAccPjId, localTls6AccPjId; public PreferencesProviderWrapper prefsWrapper; private Integer hasBeenHoldByGSM = null; private Integer hasBeenChangedRingerMode = null; public UAStateReceiver userAgentReceiver; public ZrtpStateReceiver zrtpReceiver; public MediaManager mediaManager; private Timer tasksTimer; private SparseArray<String> dtmfToAutoSend = new SparseArray<String>(5); private SparseArray<TimerTask> dtmfTasks = new SparseArray<TimerTask>(5); private SparseArray<PjStreamDialtoneGenerator> dtmfDialtoneGenerators = new SparseArray<PjStreamDialtoneGenerator>(5); private SparseArray<PjStreamDialtoneGenerator> waittoneGenerators = new SparseArray<PjStreamDialtoneGenerator>(5); private String mNatDetected = ""; // ------- // Locks // ------- public PjSipService() { } public void setService(SipService aService) { service = aService; prefsWrapper = service.getPrefs(); } public boolean isCreated() { return created; } public boolean tryToLoadStack() { if (hasSipStack) { return true; } // File stackFile = NativeLibManager.getStackLibFile(service); if (!sipStackIsCorrupted) { try { // Try to load the stack // System.load(NativeLibManager.getBundledStackLibFile(service, // "libcrypto.so").getAbsolutePath()); // System.load(NativeLibManager.getBundledStackLibFile(service, // "libssl.so").getAbsolutePath()); // System.loadLibrary("crypto"); // System.loadLibrary("ssl"); System.loadLibrary(NativeLibManager.STD_LIB_NAME); System.loadLibrary(NativeLibManager.STACK_NAME); hasSipStack = true; return true; } catch (UnsatisfiedLinkError e) { // If it fails we probably are running on a special hardware Log.e(THIS_FILE, "We have a problem with the current stack.... NOT YET Implemented", e); hasSipStack = false; sipStackIsCorrupted = true; service.notifyUserOfMessage("Can't load native library. CPU arch invalid for this build"); return false; } catch (Exception e) { Log.e(THIS_FILE, "We have a problem with the current stack....", e); } } return false; } // Start the sip stack according to current settings /** * Start the sip stack Thread safing of this method must be ensured by upper * layer Every calls from pjsip that require start/stop/getInfos from the * underlying stack must be done on the same thread */ public boolean sipStart() throws SameThreadException { Log.setLogLevel(prefsWrapper.getLogLevel()); if (!hasSipStack) { Log.e(THIS_FILE, "We have no sip stack, we can't start"); return false; } // Ensure the stack is not already created or is being created if (!created) { Log.d(THIS_FILE, "Starting sip stack"); // Pj timer TimerWrapper.create(service); int status; status = pjsua.create(); Log.i(THIS_FILE, "Created " + status); // General config { pj_str_t[] stunServers = null; int stunServersCount = 0; pjsua_config cfg = new pjsua_config(); pjsua_logging_config logCfg = new pjsua_logging_config(); pjsua_media_config mediaCfg = new pjsua_media_config(); csipsimple_config cssCfg = new csipsimple_config(); // SERVICE CONFIG if (userAgentReceiver == null) { Log.d(THIS_FILE, "create ua receiver"); userAgentReceiver = new UAStateReceiver(); userAgentReceiver.initService(this); } userAgentReceiver.reconfigure(service); if (zrtpReceiver == null) { Log.d(THIS_FILE, "create zrtp receiver"); zrtpReceiver = new ZrtpStateReceiver(this); } if (mediaManager == null) { mediaManager = new MediaManager(service); } mediaManager.startService(); initModules(); DTMF_TONE_PAUSE_LENGTH = prefsWrapper .getPreferenceIntegerValue(SipConfigManager.DTMF_PAUSE_TIME); DTMF_TONE_WAIT_LENGTH = prefsWrapper .getPreferenceIntegerValue(SipConfigManager.DTMF_WAIT_TIME); pjsua.setCallbackObject(userAgentReceiver); pjsua.setZrtpCallbackObject(zrtpReceiver); Log.d(THIS_FILE, "Attach is done to callback"); // CSS CONFIG pjsua.csipsimple_config_default(cssCfg); cssCfg.setUse_compact_form_headers(prefsWrapper .getPreferenceBooleanValue(SipConfigManager.USE_COMPACT_FORM) ? pjsua.PJ_TRUE : pjsua.PJ_FALSE); cssCfg.setUse_compact_form_sdp(prefsWrapper .getPreferenceBooleanValue(SipConfigManager.USE_COMPACT_FORM) ? pjsua.PJ_TRUE : pjsua.PJ_FALSE); cssCfg.setUse_no_update(prefsWrapper .getPreferenceBooleanValue(SipConfigManager.FORCE_NO_UPDATE) ? pjsua.PJ_TRUE : pjsua.PJ_FALSE); cssCfg.setUse_noise_suppressor(prefsWrapper .getPreferenceBooleanValue(SipConfigManager.ENABLE_NOISE_SUPPRESSION) ? pjsua.PJ_TRUE : pjsua.PJ_FALSE); cssCfg.setTcp_keep_alive_interval(prefsWrapper.getTcpKeepAliveInterval()); cssCfg.setTls_keep_alive_interval(prefsWrapper.getTlsKeepAliveInterval()); cssCfg.setDisable_tcp_switch(prefsWrapper .getPreferenceBooleanValue(SipConfigManager.DISABLE_TCP_SWITCH) ? pjsuaConstants.PJ_TRUE : pjsuaConstants.PJ_FALSE); cssCfg.setDisable_rport(prefsWrapper .getPreferenceBooleanValue(SipConfigManager.DISABLE_RPORT) ? pjsuaConstants.PJ_TRUE : pjsuaConstants.PJ_FALSE); cssCfg.setAdd_bandwidth_tias_in_sdp(prefsWrapper .getPreferenceBooleanValue(SipConfigManager.ADD_BANDWIDTH_TIAS_IN_SDP) ? pjsuaConstants.PJ_TRUE : pjsuaConstants.PJ_FALSE); // Transaction timeouts int tsx_to = prefsWrapper .getPreferenceIntegerValue(SipConfigManager.TSX_T1_TIMEOUT); if (tsx_to > 0) { cssCfg.setTsx_t1_timeout(tsx_to); } tsx_to = prefsWrapper.getPreferenceIntegerValue(SipConfigManager.TSX_T2_TIMEOUT); if (tsx_to > 0) { cssCfg.setTsx_t2_timeout(tsx_to); } tsx_to = prefsWrapper.getPreferenceIntegerValue(SipConfigManager.TSX_T4_TIMEOUT); if (tsx_to > 0) { cssCfg.setTsx_t4_timeout(tsx_to); } tsx_to = prefsWrapper.getPreferenceIntegerValue(SipConfigManager.TSX_TD_TIMEOUT); if (tsx_to > 0) { cssCfg.setTsx_td_timeout(tsx_to); } // -- USE_ZRTP 1 is no_zrtp, 2 is create_zrtp File zrtpFolder = PreferencesWrapper.getZrtpFolder(service); if (zrtpFolder != null) { cssCfg.setUse_zrtp((prefsWrapper .getPreferenceIntegerValue(SipConfigManager.USE_ZRTP) > 1) ? pjsua.PJ_TRUE : pjsua.PJ_FALSE); cssCfg.setStorage_folder(pjsua.pj_str_copy(zrtpFolder.getAbsolutePath())); } else { cssCfg.setUse_zrtp(pjsua.PJ_FALSE); cssCfg.setStorage_folder(pjsua.pj_str_copy("")); } Map<String, DynCodecInfos> availableCodecs = ExtraPlugins.getDynCodecPlugins( service, SipManager.ACTION_GET_EXTRA_CODECS); dynamic_factory[] cssCodecs = cssCfg.getExtra_aud_codecs(); int i = 0; for (Entry<String, DynCodecInfos> availableCodec : availableCodecs.entrySet()) { DynCodecInfos dyn = availableCodec.getValue(); if (!TextUtils.isEmpty(dyn.libraryPath)) { cssCodecs[i].setShared_lib_path(pjsua.pj_str_copy(dyn.libraryPath)); cssCodecs[i++].setInit_factory_name(pjsua .pj_str_copy(dyn.factoryInitFunction)); } } cssCfg.setExtra_aud_codecs_cnt(i); // Audio implementation int implementation = prefsWrapper .getPreferenceIntegerValue(SipConfigManager.AUDIO_IMPLEMENTATION); if (implementation == SipConfigManager.AUDIO_IMPLEMENTATION_OPENSLES) { dynamic_factory audImp = cssCfg.getAudio_implementation(); audImp.setInit_factory_name(pjsua.pj_str_copy("pjmedia_opensl_factory")); File openslLib = NativeLibManager.getBundledStackLibFile(service, "libpj_opensl_dev.so"); audImp.setShared_lib_path(pjsua.pj_str_copy(openslLib.getAbsolutePath())); cssCfg.setAudio_implementation(audImp); Log.d(THIS_FILE, "Use OpenSL-ES implementation"); } // Video implementation if (prefsWrapper.getPreferenceBooleanValue(SipConfigManager.USE_VIDEO)) { // TODO :: Have plugins per capture / render / video codec / // converter Map<String, DynCodecInfos> videoPlugins = ExtraPlugins.getDynCodecPlugins( service, SipManager.ACTION_GET_VIDEO_PLUGIN); if (videoPlugins.size() > 0) { DynCodecInfos videoPlugin = videoPlugins.values().iterator().next(); pj_str_t pjVideoFile = pjsua.pj_str_copy(videoPlugin.libraryPath); Log.d(THIS_FILE, "Load video plugin at " + videoPlugin.libraryPath); // Render { dynamic_factory vidImpl = cssCfg.getVideo_render_implementation(); vidImpl.setInit_factory_name(pjsua .pj_str_copy("pjmedia_webrtc_vid_render_factory")); vidImpl.setShared_lib_path(pjVideoFile); } // Capture { dynamic_factory vidImpl = cssCfg.getVideo_capture_implementation(); vidImpl.setInit_factory_name(pjsua .pj_str_copy("pjmedia_webrtc_vid_capture_factory")); vidImpl.setShared_lib_path(pjVideoFile); /* * -- For testing video screen -- Not yet released * try { ComponentName cmp = new * ComponentName("com.csipsimple.plugins.video", * "com.csipsimple.plugins.video.CaptureReceiver"); * DynCodecInfos screenCapt = new * ExtraPlugins.DynCodecInfos(service, cmp); * vidImpl.setInit_factory_name(pjsua * .pj_str_copy(screenCapt.factoryInitFunction)); * vidImpl.setShared_lib_path(pjsua * .pj_str_copy(screenCapt.libraryPath)); } catch * (NameNotFoundException e) { Log.e(THIS_FILE, * "Not found capture plugin"); } */ } // Video codecs availableCodecs = ExtraPlugins.getDynCodecPlugins(service, SipManager.ACTION_GET_EXTRA_VIDEO_CODECS); cssCodecs = cssCfg.getExtra_vid_codecs(); dynamic_factory[] cssCodecsDestroy = cssCfg.getExtra_vid_codecs_destroy(); i = 0; for (Entry<String, DynCodecInfos> availableCodec : availableCodecs .entrySet()) { DynCodecInfos dyn = availableCodec.getValue(); if (!TextUtils.isEmpty(dyn.libraryPath)) { // Create cssCodecs[i].setShared_lib_path(pjsua.pj_str_copy(dyn.libraryPath)); cssCodecs[i].setInit_factory_name(pjsua .pj_str_copy(dyn.factoryInitFunction)); // Destroy cssCodecsDestroy[i].setShared_lib_path(pjsua .pj_str_copy(dyn.libraryPath)); cssCodecsDestroy[i].setInit_factory_name(pjsua .pj_str_copy(dyn.factoryDeinitFunction)); } i++; } cssCfg.setExtra_vid_codecs_cnt(i); // Converter dynamic_factory convertImpl = cssCfg.getVid_converter(); convertImpl.setShared_lib_path(pjVideoFile); convertImpl.setInit_factory_name(pjsua .pj_str_copy("pjmedia_libswscale_converter_init")); } } // MAIN CONFIG pjsua.config_default(cfg); cfg.setCb(pjsuaConstants.WRAPPER_CALLBACK_STRUCT); cfg.setUser_agent(pjsua.pj_str_copy(prefsWrapper.getUserAgent(service))); // We need at least one thread int threadCount = prefsWrapper .getPreferenceIntegerValue(SipConfigManager.THREAD_COUNT); if (threadCount <= 0) { threadCount = 1; } cfg.setThread_cnt(threadCount); cfg.setUse_srtp(getUseSrtp()); cfg.setSrtp_secure_signaling(0); cfg.setNat_type_in_sdp(0); pjsip_timer_setting timerSetting = cfg.getTimer_setting(); int minSe = prefsWrapper.getPreferenceIntegerValue(SipConfigManager.TIMER_MIN_SE); int sessExp = prefsWrapper .getPreferenceIntegerValue(SipConfigManager.TIMER_SESS_EXPIRES); if (minSe <= sessExp && minSe >= 90) { timerSetting.setMin_se(minSe); timerSetting.setSess_expires(sessExp); cfg.setTimer_setting(timerSetting); } // DNS if (prefsWrapper.enableDNSSRV() && !prefsWrapper.useIPv6()) { pj_str_t[] nameservers = getNameservers(); if (nameservers != null) { cfg.setNameserver_count(nameservers.length); cfg.setNameserver(nameservers); } else { cfg.setNameserver_count(0); } } // STUN boolean isStunEnabled = prefsWrapper.getPreferenceBooleanValue(SipConfigManager.ENABLE_STUN); if (isStunEnabled) { String[] servers = prefsWrapper.getPreferenceStringValue( SipConfigManager.STUN_SERVER).split(","); cfg.setStun_srv_cnt(servers.length); stunServers = cfg.getStun_srv(); for (String server : servers) { Log.d(THIS_FILE, "add server " + server.trim()); stunServers[stunServersCount] = pjsua.pj_str_copy(server.trim()); stunServersCount++; } cfg.setStun_srv(stunServers); cfg.setStun_map_use_stun2(boolToPjsuaConstant(prefsWrapper .getPreferenceBooleanValue(SipConfigManager.ENABLE_STUN2))); } // LOGGING CONFIG pjsua.logging_config_default(logCfg); logCfg.setConsole_level(prefsWrapper.getLogLevel()); logCfg.setLevel(prefsWrapper.getLogLevel()); logCfg.setMsg_logging(pjsuaConstants.PJ_TRUE); if (prefsWrapper.getPreferenceBooleanValue(SipConfigManager.LOG_USE_DIRECT_FILE, false)) { File outFile = PreferencesWrapper.getLogsFile(service, true); if (outFile != null) { logCfg.setLog_filename(pjsua.pj_str_copy(outFile.getAbsolutePath())); logCfg.setLog_file_flags(0x1108 /* PJ_O_APPEND */); } } // MEDIA CONFIG pjsua.media_config_default(mediaCfg); // For now only this cfg is supported mediaCfg.setChannel_count(1); mediaCfg.setSnd_auto_close_time(prefsWrapper.getAutoCloseTime()); // Echo cancellation mediaCfg.setEc_tail_len(prefsWrapper.getEchoCancellationTail()); int echoMode = prefsWrapper.getPreferenceIntegerValue(SipConfigManager.ECHO_MODE); long clockRate = prefsWrapper.getClockRate(mediaManager); if (clockRate > 16000 && echoMode == SipConfigManager.ECHO_MODE_WEBRTC_M) { // WebRTC mobile does not allow higher that 16kHz for now // TODO : warn user about this point echoMode = SipConfigManager.ECHO_MODE_SIMPLE; } mediaCfg.setEc_options(echoMode); mediaCfg.setNo_vad(boolToPjsuaConstant(!prefsWrapper .getPreferenceBooleanValue(SipConfigManager.ENABLE_VAD))); mediaCfg.setQuality(prefsWrapper.getMediaQuality()); mediaCfg.setClock_rate(clockRate); mediaCfg.setAudio_frame_ptime(prefsWrapper .getPreferenceIntegerValue(SipConfigManager.SND_PTIME)); // Disabled ? because only one thread enabled now for battery // perfs on normal state int mediaThreadCount = prefsWrapper .getPreferenceIntegerValue(SipConfigManager.MEDIA_THREAD_COUNT); mediaCfg.setThread_cnt(mediaThreadCount); boolean hasOwnIoQueue = prefsWrapper .getPreferenceBooleanValue(SipConfigManager.HAS_IO_QUEUE); if (threadCount <= 0) { // Global thread count is 0, so don't use sip one anyway hasOwnIoQueue = false; } mediaCfg.setHas_ioqueue(boolToPjsuaConstant(hasOwnIoQueue)); // ICE boolean iceEnabled = prefsWrapper.getPreferenceBooleanValue(SipConfigManager.ENABLE_ICE); mediaCfg.setEnable_ice(boolToPjsuaConstant(iceEnabled)); if(iceEnabled) { pj_ice_sess_options iceOpts = mediaCfg.getIce_opt(); boolean aggressiveIce = prefsWrapper.getPreferenceBooleanValue(SipConfigManager.ICE_AGGRESSIVE); iceOpts.setAggressive(boolToPjsuaConstant(aggressiveIce)); } // TURN boolean isTurnEnabled = prefsWrapper.getPreferenceBooleanValue(SipConfigManager.ENABLE_TURN); if (isTurnEnabled) { SWIGTYPE_p_pj_stun_auth_cred creds = mediaCfg.getTurn_auth_cred(); mediaCfg.setEnable_turn(boolToPjsuaConstant(isTurnEnabled)); mediaCfg.setTurn_server(pjsua.pj_str_copy(prefsWrapper.getTurnServer())); pjsua.set_turn_credentials( pjsua.pj_str_copy(prefsWrapper .getPreferenceStringValue(SipConfigManager.TURN_USERNAME)), pjsua.pj_str_copy(prefsWrapper .getPreferenceStringValue(SipConfigManager.TURN_PASSWORD)), pjsua.pj_str_copy("*"), creds); // Normally this step is useless as manipulating a pointer in C memory at this point, but in case this changes reassign mediaCfg.setTurn_auth_cred(creds); int turnTransport = prefsWrapper.getPreferenceIntegerValue(SipConfigManager.TURN_TRANSPORT); if(turnTransport != 0) { switch (turnTransport) { case 1: mediaCfg.setTurn_conn_type(pj_turn_tp_type.PJ_TURN_TP_UDP); break; case 2: mediaCfg.setTurn_conn_type(pj_turn_tp_type.PJ_TURN_TP_TCP); break; case 3: mediaCfg.setTurn_conn_type(pj_turn_tp_type.PJ_TURN_TP_TLS); break; default: break; } } //mediaCfg.setTurn_conn_type(value); } else { mediaCfg.setEnable_turn(pjsua.PJ_FALSE); } // INITIALIZE status = pjsua.csipsimple_init(cfg, logCfg, mediaCfg, cssCfg, service); if (status != pjsuaConstants.PJ_SUCCESS) { String msg = "Fail to init pjsua " + pjStrToString(pjsua.get_error_message(status)); Log.e(THIS_FILE, msg); service.notifyUserOfMessage(msg); cleanPjsua(); return false; } } // Add transports { // TODO : allow to configure local accounts. // We need a local account for each transport // to not have the // application lost when direct call to the IP // UDP if (prefsWrapper.isUDPEnabled()) { int udpPort = prefsWrapper.getUDPTransportPort(); localUdpAccPjId = createLocalTransportAndAccount( pjsip_transport_type_e.PJSIP_TRANSPORT_UDP, udpPort); if (localUdpAccPjId == null) { cleanPjsua(); return false; } // UDP v6 if (prefsWrapper.useIPv6()) { localUdp6AccPjId = createLocalTransportAndAccount( pjsip_transport_type_e.PJSIP_TRANSPORT_UDP6, udpPort == 0 ? udpPort : udpPort + 10); } } // TCP if (prefsWrapper.isTCPEnabled()) { int tcpPort = prefsWrapper.getTCPTransportPort(); localTcpAccPjId = createLocalTransportAndAccount( pjsip_transport_type_e.PJSIP_TRANSPORT_TCP, tcpPort); if (localTcpAccPjId == null) { cleanPjsua(); return false; } // TCP v6 if (prefsWrapper.useIPv6()) { localTcp6AccPjId = createLocalTransportAndAccount( pjsip_transport_type_e.PJSIP_TRANSPORT_TCP6, tcpPort == 0 ? tcpPort : tcpPort + 10); } } // TLS if (prefsWrapper.isTLSEnabled()) { int tlsPort = prefsWrapper.getTLSTransportPort(); localTlsAccPjId = createLocalTransportAndAccount( pjsip_transport_type_e.PJSIP_TRANSPORT_TLS, tlsPort); if (localTlsAccPjId == null) { cleanPjsua(); return false; } // TLS v6 if (prefsWrapper.useIPv6()) { localTls6AccPjId = createLocalTransportAndAccount( pjsip_transport_type_e.PJSIP_TRANSPORT_TLS6, tlsPort == 0 ? tlsPort : tlsPort + 10); } } } // Add pjsip modules for (PjsipModule mod : pjsipModules.values()) { mod.onBeforeStartPjsip(); } // Initialization is done, now start pjsua status = pjsua.start(); if (status != pjsua.PJ_SUCCESS) { String msg = "Fail to start pjsip " + pjStrToString(pjsua.get_error_message(status)); Log.e(THIS_FILE, msg); service.notifyUserOfMessage(msg); cleanPjsua(); return false; } // Init media codecs initCodecs(); setCodecsPriorities(); created = true; return true; } return false; } /** * Stop sip service * * @return true if stop has been performed */ public boolean sipStop() throws SameThreadException { Log.d(THIS_FILE, ">> SIP STOP <<"); if (getActiveCallInProgress() != null) { Log.e(THIS_FILE, "We have a call in progress... DO NOT STOP !!!"); // TODO : queue quit on end call; return false; } if (service.notificationManager != null) { service.notificationManager.cancelRegisters(); } if (created) { cleanPjsua(); } if (tasksTimer != null) { tasksTimer.cancel(); tasksTimer.purge(); tasksTimer = null; } return true; } private void cleanPjsua() throws SameThreadException { Log.d(THIS_FILE, "Detroying..."); // This will destroy all accounts so synchronize with accounts // management lock // long flags = 1; /*< Lazy disconnect : only RX */ // Try with TX & RX if network is considered as available long flags = 0; if (!prefsWrapper.isValidConnectionForOutgoing(false)) { // If we are current not valid for outgoing, // it means that we don't want the network for SIP now // so don't use RX | TX to not consume data at all flags = 3; } pjsua.csipsimple_destroy(flags); service.getContentResolver().delete(SipProfile.ACCOUNT_STATUS_URI, null, null); if (userAgentReceiver != null) { userAgentReceiver.stopService(); userAgentReceiver = null; } if (mediaManager != null) { mediaManager.stopService(); mediaManager = null; } TimerWrapper.destroy(); created = false; } /** * Utility to create a transport * * @return transport id or -1 if failed */ private Integer createTransport(pjsip_transport_type_e type, int port) throws SameThreadException { pjsua_transport_config cfg = new pjsua_transport_config(); int[] tId = new int[1]; int status; pjsua.transport_config_default(cfg); cfg.setPort(port); if (type.equals(pjsip_transport_type_e.PJSIP_TRANSPORT_TLS)) { pjsip_tls_setting tlsSetting = cfg.getTls_setting(); /* * TODO : THIS IS OBSOLETE -- remove from UI String serverName = * prefsWrapper * .getPreferenceStringValue(SipConfigManager.TLS_SERVER_NAME); if * (!TextUtils.isEmpty(serverName)) { * tlsSetting.setServer_name(pjsua.pj_str_copy(serverName)); } */ String caListFile = prefsWrapper .getPreferenceStringValue(SipConfigManager.CA_LIST_FILE); if (!TextUtils.isEmpty(caListFile)) { tlsSetting.setCa_list_file(pjsua.pj_str_copy(caListFile)); } String certFile = prefsWrapper.getPreferenceStringValue(SipConfigManager.CERT_FILE); if (!TextUtils.isEmpty(certFile)) { tlsSetting.setCert_file(pjsua.pj_str_copy(certFile)); } String privKey = prefsWrapper.getPreferenceStringValue(SipConfigManager.PRIVKEY_FILE); if (!TextUtils.isEmpty(privKey)) { tlsSetting.setPrivkey_file(pjsua.pj_str_copy(privKey)); } String tlsPwd = prefsWrapper.getPreferenceStringValue(SipConfigManager.TLS_PASSWORD); if (!TextUtils.isEmpty(tlsPwd)) { tlsSetting.setPassword(pjsua.pj_str_copy(tlsPwd)); } boolean checkClient = prefsWrapper .getPreferenceBooleanValue(SipConfigManager.TLS_VERIFY_CLIENT); tlsSetting.setVerify_client(checkClient ? 1 : 0); tlsSetting.setMethod(pjsip_ssl_method.swigToEnum(prefsWrapper.getTLSMethod())); tlsSetting.setProto(0); boolean checkServer = prefsWrapper .getPreferenceBooleanValue(SipConfigManager.TLS_VERIFY_SERVER); tlsSetting.setVerify_server(checkServer ? 1 : 0); cfg.setTls_setting(tlsSetting); } if (prefsWrapper.getPreferenceBooleanValue(SipConfigManager.ENABLE_QOS)) { Log.d(THIS_FILE, "Activate qos for this transport"); pj_qos_params qosParam = cfg.getQos_params(); qosParam.setDscp_val((short) prefsWrapper .getPreferenceIntegerValue(SipConfigManager.DSCP_VAL)); qosParam.setFlags((short) 1); // DSCP cfg.setQos_params(qosParam); } status = pjsua.transport_create(type, cfg, tId); if (status != pjsuaConstants.PJ_SUCCESS) { String errorMsg = pjStrToString(pjsua.get_error_message(status)); String msg = "Fail to create transport " + errorMsg + " (" + status + ")"; Log.e(THIS_FILE, msg); if (status == 120098) { /* Already binded */ msg = service.getString(R.string.another_application_use_sip_port); } service.notifyUserOfMessage(msg); return null; } return tId[0]; } private Integer createLocalAccount(Integer transportId) throws SameThreadException { if (transportId == null) { return null; } int[] p_acc_id = new int[1]; pjsua.acc_add_local(transportId, pjsua.PJ_FALSE, p_acc_id); return p_acc_id[0]; } private Integer createLocalTransportAndAccount(pjsip_transport_type_e type, int port) throws SameThreadException { Integer transportId = createTransport(type, port); return createLocalAccount(transportId); } public boolean addAccount(SipProfile profile) throws SameThreadException { int status = pjsuaConstants.PJ_FALSE; if (!created) { Log.e(THIS_FILE, "PJSIP is not started here, nothing can be done"); return status == pjsuaConstants.PJ_SUCCESS; } PjSipAccount account = new PjSipAccount(profile); account.applyExtraParams(service); // Force the use of a transport /* * switch (account.transport) { case SipProfile.TRANSPORT_UDP: if * (udpTranportId != null) { * //account.cfg.setTransport_id(udpTranportId); } break; case * SipProfile.TRANSPORT_TCP: if (tcpTranportId != null) { // * account.cfg.setTransport_id(tcpTranportId); } break; case * SipProfile.TRANSPORT_TLS: if (tlsTransportId != null) { // * account.cfg.setTransport_id(tlsTransportId); } break; default: break; * } */ SipProfileState currentAccountStatus = getProfileState(profile); account.cfg.setRegister_on_acc_add(pjsuaConstants.PJ_FALSE); if (currentAccountStatus.isAddedToStack()) { pjsua.csipsimple_set_acc_user_data(currentAccountStatus.getPjsuaId(), account.css_cfg); status = pjsua.acc_modify(currentAccountStatus.getPjsuaId(), account.cfg); beforeAccountRegistration(currentAccountStatus.getPjsuaId(), profile); ContentValues cv = new ContentValues(); cv.put(SipProfileState.ADDED_STATUS, status); service.getContentResolver().update( ContentUris.withAppendedId(SipProfile.ACCOUNT_STATUS_ID_URI_BASE, profile.id), cv, null, null); if (!account.wizard.equalsIgnoreCase(WizardUtils.LOCAL_WIZARD_TAG)) { // Re register if (status == pjsuaConstants.PJ_SUCCESS) { status = pjsua.acc_set_registration(currentAccountStatus.getPjsuaId(), 1); if (status == pjsuaConstants.PJ_SUCCESS) { pjsua.acc_set_online_status(currentAccountStatus.getPjsuaId(), 1); } } } } else { int[] accId = new int[1]; if (account.wizard.equalsIgnoreCase(WizardUtils.LOCAL_WIZARD_TAG)) { // We already have local account by default // For now consider we are talking about UDP one // In the future local account should be set per transport switch (account.transport) { case SipProfile.TRANSPORT_UDP: accId[0] = prefsWrapper.useIPv6() ? localUdp6AccPjId : localUdpAccPjId; break; case SipProfile.TRANSPORT_TCP: accId[0] = prefsWrapper.useIPv6() ? localTcp6AccPjId : localTcpAccPjId; break; case SipProfile.TRANSPORT_TLS: accId[0] = prefsWrapper.useIPv6() ? localTls6AccPjId : localTlsAccPjId; break; default: // By default use UDP accId[0] = localUdpAccPjId; break; } pjsua.csipsimple_set_acc_user_data(accId[0], account.css_cfg); // TODO : use video cfg here // nCfg.setVid_in_auto_show(pjsuaConstants.PJ_TRUE); // nCfg.setVid_out_auto_transmit(pjsuaConstants.PJ_TRUE); // status = pjsua.acc_modify(accId[0], nCfg); } else { // Cause of standard account different from local account :) status = pjsua.acc_add(account.cfg, pjsuaConstants.PJ_FALSE, accId); pjsua.csipsimple_set_acc_user_data(accId[0], account.css_cfg); beforeAccountRegistration(accId[0], profile); pjsua.acc_set_registration(accId[0], 1); } if (status == pjsuaConstants.PJ_SUCCESS) { SipProfileState ps = new SipProfileState(profile); ps.setAddedStatus(status); ps.setPjsuaId(accId[0]); service.getContentResolver().insert( ContentUris.withAppendedId(SipProfile.ACCOUNT_STATUS_ID_URI_BASE, account.id), ps.getAsContentValue()); pjsua.acc_set_online_status(accId[0], 1); } } return status == pjsuaConstants.PJ_SUCCESS; } void beforeAccountRegistration(int pjId, SipProfile profile) { for (PjsipModule mod : pjsipModules.values()) { mod.onBeforeAccountStartRegistration(pjId, profile); } } /** * Synchronize content provider backend from pjsip stack * * @param pjsuaId the pjsua id of the account to synchronize * @throws SameThreadException */ public void updateProfileStateFromService(int pjsuaId) throws SameThreadException { if (!created) { return; } long accId = getAccountIdForPjsipId(service, pjsuaId); Log.d(THIS_FILE, "Update profile from service for " + pjsuaId + " aka in db " + accId); if (accId != SipProfile.INVALID_ID) { int success = pjsuaConstants.PJ_FALSE; pjsua_acc_info pjAccountInfo; pjAccountInfo = new pjsua_acc_info(); success = pjsua.acc_get_info(pjsuaId, pjAccountInfo); if (success == pjsuaConstants.PJ_SUCCESS && pjAccountInfo != null) { ContentValues cv = new ContentValues(); try { // Should be fine : status code are coherent with RFC // status codes cv.put(SipProfileState.STATUS_CODE, pjAccountInfo.getStatus().swigValue()); } catch (IllegalArgumentException e) { cv.put(SipProfileState.STATUS_CODE, SipCallSession.StatusCode.INTERNAL_SERVER_ERROR); } cv.put(SipProfileState.STATUS_TEXT, pjStrToString(pjAccountInfo.getStatus_text())); cv.put(SipProfileState.EXPIRES, pjAccountInfo.getExpires()); service.getContentResolver().update( ContentUris.withAppendedId(SipProfile.ACCOUNT_STATUS_ID_URI_BASE, accId), cv, null, null); Log.d(THIS_FILE, "Profile state UP : " + cv); } } else { Log.e(THIS_FILE, "Trying to update not added account " + pjsuaId); } } /** * Get the dynamic state of the profile * * @param account the sip profile from database. Important field is id. * @return the dynamic sip profile state */ public SipProfileState getProfileState(SipProfile account) { if (!created || account == null) { return null; } if (account.id == SipProfile.INVALID_ID) { return null; } SipProfileState accountInfo = new SipProfileState(account); Cursor c = service.getContentResolver().query( ContentUris.withAppendedId(SipProfile.ACCOUNT_STATUS_ID_URI_BASE, account.id), null, null, null, null); if (c != null) { try { if (c.getCount() > 0) { c.moveToFirst(); accountInfo.createFromDb(c); } } catch (Exception e) { Log.e(THIS_FILE, "Error on looping over sip profiles states", e); } finally { c.close(); } } return accountInfo; } private static ArrayList<String> codecs = new ArrayList<String>(); private static ArrayList<String> video_codecs = new ArrayList<String>(); private static boolean codecs_initialized = false; /** * Reset the list of codecs stored */ public static void resetCodecs() { synchronized (codecs) { if (codecs_initialized) { codecs.clear(); video_codecs.clear(); codecs_initialized = false; } } } /** * Retrieve codecs from pjsip stack and store it inside preference storage * so that it can be retrieved in the interface view * * @throws SameThreadException */ private void initCodecs() throws SameThreadException { synchronized (codecs) { if (!codecs_initialized) { int nbrCodecs, i; // Audio codecs nbrCodecs = pjsua.codecs_get_nbr(); for (i = 0; i < nbrCodecs; i++) { String codecId = pjStrToString(pjsua.codecs_get_id(i)); codecs.add(codecId); // Log.d(THIS_FILE, "Added codec " + codecId); } // Set it in prefs if not already set correctly prefsWrapper.setCodecList(codecs); // Video codecs nbrCodecs = pjsua.codecs_vid_get_nbr(); for (i = 0; i < nbrCodecs; i++) { String codecId = pjStrToString(pjsua.codecs_vid_get_id(i)); video_codecs.add(codecId); Log.d(THIS_FILE, "Added video codec " + codecId); } // Set it in prefs if not already set correctly prefsWrapper.setVideoCodecList(video_codecs); codecs_initialized = true; // We are now always capable of tls and srtp ! prefsWrapper.setLibCapability(PreferencesProviderWrapper.LIB_CAP_TLS, true); prefsWrapper.setLibCapability(PreferencesProviderWrapper.LIB_CAP_SRTP, true); } } } /** * Append log for the codec in String builder * * @param sb the buffer to be appended with the codec info * @param codec the codec name * @param prio the priority of the codec */ private void buffCodecLog(StringBuilder sb, String codec, short prio) { if (prio > 0 && Log.getLogLevel() >= 4) { sb.append(codec); sb.append(" ("); sb.append(prio); sb.append(") - "); } } /** * Set the codec priority in pjsip stack layer based on preference store * * @throws SameThreadException */ private void setCodecsPriorities() throws SameThreadException { ConnectivityManager cm = ((ConnectivityManager) service .getSystemService(Context.CONNECTIVITY_SERVICE)); synchronized (codecs) { if (codecs_initialized) { NetworkInfo ni = cm.getActiveNetworkInfo(); if (ni != null) { StringBuilder audioSb = new StringBuilder(); StringBuilder videoSb = new StringBuilder(); audioSb.append("Audio codecs : "); videoSb.append("Video codecs : "); String currentBandType = prefsWrapper.getPreferenceStringValue( SipConfigManager.getBandTypeKey(ni.getType(), ni.getSubtype()), SipConfigManager.CODEC_WB); synchronized (codecs) { for (String codec : codecs) { short aPrio = prefsWrapper.getCodecPriority(codec, currentBandType, "-1"); buffCodecLog(audioSb, codec, aPrio); pj_str_t codecStr = pjsua.pj_str_copy(codec); if (aPrio >= 0) { pjsua.codec_set_priority(codecStr, aPrio); } String codecKey = SipConfigManager.getCodecKey(codec, SipConfigManager.FRAMES_PER_PACKET_SUFFIX); Integer frmPerPacket = SipConfigManager.getPreferenceIntegerValue( service, codecKey); if (frmPerPacket != null && frmPerPacket > 0) { Log.v(THIS_FILE, "Set codec " + codec + " fpp : " + frmPerPacket); pjsua.codec_set_frames_per_packet(codecStr, frmPerPacket); } } for (String codec : video_codecs) { short aPrio = prefsWrapper.getCodecPriority(codec, currentBandType, "-1"); buffCodecLog(videoSb, codec, aPrio); if (aPrio >= 0) { pjsua.vid_codec_set_priority(pjsua.pj_str_copy(codec), aPrio); } String videoSize = SipConfigManager.getPreferenceStringValue(service, SipConfigManager.VIDEO_CAPTURE_SIZE, ""); if (TextUtils.isEmpty(videoSize) || videoSize.equalsIgnoreCase("0x0@0")) { List<VideoCaptureDeviceInfo> cps = VideoUtilsWrapper.getInstance() .getVideoCaptureDevices(service); if (cps.size() > 0) { videoSize = cps.get(cps.size() - 1).bestCapability .toPreferenceValue(); } } VideoCaptureCapability videoCap = new VideoUtilsWrapper.VideoCaptureCapability( videoSize); if (codec.startsWith("H264")) { int h264profile = SipConfigManager.getPreferenceIntegerValue( service, SipConfigManager.H264_PROFILE, 66); int h264level = SipConfigManager.getPreferenceIntegerValue(service, SipConfigManager.H264_LEVEL, 30); int h264bitrate = SipConfigManager.getPreferenceIntegerValue( service, SipConfigManager.H264_BITRATE, 0); if (h264profile > 0) { pjsua.codec_h264_set_profile(h264profile, h264level, videoCap.width, videoCap.height, videoCap.fps, h264bitrate, 0); // pjsua.codec_h264_set_profile(h264profile, // h264level, 352, 480, 15, h264bitrate, 0); // // 352×480 Log.d(THIS_FILE, "Set h264 profile : " + h264profile + ", " + h264level + ", " + h264bitrate); } } } } Log.d(THIS_FILE, audioSb.toString()); Log.d(THIS_FILE, videoSb.toString()); } } } } // Call related /** * Answer a call * * @param callId the id of the call to answer to * @param code the status code to send in the response * @return */ public int callAnswer(int callId, int code) throws SameThreadException { if (created) { pjsua_call_setting cs = new pjsua_call_setting(); pjsua.call_setting_default(cs); cs.setAud_cnt(1); cs.setVid_cnt(prefsWrapper.getPreferenceBooleanValue(SipConfigManager.USE_VIDEO) ? 1 : 0); cs.setFlag(0); return pjsua.call_answer2(callId, cs, code, null, null); // return pjsua.call_answer(callId, code, null, null); } return -1; } /** * Hangup a call * * @param callId the id of the call to hangup * @param code the status code to send in the response * @return */ public int callHangup(int callId, int code) throws SameThreadException { if (created) { return pjsua.call_hangup(callId, code, null, null); } return -1; } public int callXfer(int callId, String callee) throws SameThreadException { if (created) { return pjsua.call_xfer(callId, pjsua.pj_str_copy(callee), null); } return -1; } public int callXferReplace(int callId, int otherCallId, int options) throws SameThreadException { if (created) { return pjsua.call_xfer_replaces(callId, otherCallId, options, null); } return -1; } /** * Make a call * * @param callee remote contact ot call If not well formated we try to add * domain name of the default account */ public int makeCall(String callee, int accountId, Bundle b) throws SameThreadException { if (!created) { return -1; } final ToCall toCall = sanitizeSipUri(callee, accountId); if (toCall != null) { pj_str_t uri = pjsua.pj_str_copy(toCall.getCallee()); // Nothing to do with this values byte[] userData = new byte[1]; int[] callId = new int[1]; pjsua_call_setting cs = new pjsua_call_setting(); pjsua_msg_data msgData = new pjsua_msg_data(); int pjsuaAccId = toCall.getPjsipAccountId(); // Call settings to add video pjsua.call_setting_default(cs); cs.setAud_cnt(1); cs.setVid_cnt(0); if (b != null && b.getBoolean(SipCallSession.OPT_CALL_VIDEO, false)) { cs.setVid_cnt(1); } cs.setFlag(0); pj_pool_t pool = pjsua.pool_create("call_tmp", 512, 512); // Msg data to add headers pjsua.msg_data_init(msgData); pjsua.csipsimple_init_acc_msg_data(pool, pjsuaAccId, msgData); if (b != null) { Bundle extraHeaders = b.getBundle(SipCallSession.OPT_CALL_EXTRA_HEADERS); if (extraHeaders != null) { for (String key : extraHeaders.keySet()) { try { String value = extraHeaders.getString(key); if (!TextUtils.isEmpty(value)) { int res = pjsua.csipsimple_msg_data_add_string_hdr(pool, msgData, pjsua.pj_str_copy(key), pjsua.pj_str_copy(value)); if (res == pjsuaConstants.PJ_SUCCESS) { Log.e(THIS_FILE, "Failed to add Xtra hdr (" + key + " : " + value + ") probably not X- header"); } } } catch (Exception e) { Log.e(THIS_FILE, "Invalid header value for key : " + key); } } } } int status = pjsua.call_make_call(pjsuaAccId, uri, cs, userData, msgData, callId); if (status == pjsuaConstants.PJ_SUCCESS) { dtmfToAutoSend.put(callId[0], toCall.getDtmf()); Log.d(THIS_FILE, "DTMF - Store for " + callId[0] + " - " + toCall.getDtmf()); } pjsua.pj_pool_release(pool); return status; } else { service.notifyUserOfMessage(service.getString(R.string.invalid_sip_uri) + " : " + callee); } return -1; } public int updateCallOptions(int callId, Bundle options) { // TODO : if more options we should redesign this part. if (options.containsKey(SipCallSession.OPT_CALL_VIDEO)) { boolean add = options.getBoolean(SipCallSession.OPT_CALL_VIDEO); SipCallSession ci = getCallInfo(callId); if (add && ci.mediaHasVideo()) { // We already have one video running -- refuse to send another return -1; } else if (!add && !ci.mediaHasVideo()) { // We have no current video, no way to remove. return -1; } pjsua_call_vid_strm_op op = add ? pjsua_call_vid_strm_op.PJSUA_CALL_VID_STRM_ADD : pjsua_call_vid_strm_op.PJSUA_CALL_VID_STRM_REMOVE; if (!add) { // TODO : manage remove case } return pjsua.call_set_vid_strm(callId, op, null); } return -1; } /** * Send a dtmf signal to a call * * @param callId the call to send the signal * @param keyCode the keyCode to send (android style) * @return */ public int sendDtmf(int callId, int keyCode) throws SameThreadException { if (!created) { return -1; } String keyPressed = ""; // Since some device (xoom...) are apparently buggy with key character // map loading... // we have to do crappy thing here if (keyCode >= KeyEvent.KEYCODE_0 && keyCode <= KeyEvent.KEYCODE_9) { keyPressed = Integer.toString(keyCode - KeyEvent.KEYCODE_0); } else if (keyCode == KeyEvent.KEYCODE_POUND) { keyPressed = "#"; } else if (keyCode == KeyEvent.KEYCODE_STAR) { keyPressed = "*"; } else { // Fallback... should never be there if using visible dialpad, but // possible using keyboard KeyCharacterMap km = KeyCharacterMap.load(KeyCharacterMap.NUMERIC); keyPressed = Integer.toString(km.getNumber(keyCode)); } return sendDtmf(callId, keyPressed); } private int sendDtmf(final int callId, String keyPressed) throws SameThreadException { if (TextUtils.isEmpty(keyPressed)) { return pjsua.PJ_SUCCESS; } if (pjsua.call_is_active(callId) != pjsuaConstants.PJ_TRUE) { return -1; } if(pjsua.call_has_media(callId) != pjsuaConstants.PJ_TRUE) { return -1; } String dtmfToDial = keyPressed; String remainingDtmf = ""; int pauseBeforeRemaining = 0; boolean foundSeparator = false; if (keyPressed.contains(",") || keyPressed.contains(";")) { dtmfToDial = ""; for (int i = 0; i < keyPressed.length(); i++) { char c = keyPressed.charAt(i); if (!foundSeparator) { if (c == ',' || c == ';') { pauseBeforeRemaining += (c == ',') ? DTMF_TONE_PAUSE_LENGTH : DTMF_TONE_WAIT_LENGTH; foundSeparator = true; } else { dtmfToDial += c; } } else { if ((c == ',' || c == ';') && TextUtils.isEmpty(remainingDtmf)) { pauseBeforeRemaining += (c == ',') ? DTMF_TONE_PAUSE_LENGTH : DTMF_TONE_WAIT_LENGTH; } else { remainingDtmf += c; } } } } int res = 0; if (!TextUtils.isEmpty(dtmfToDial)) { pj_str_t pjKeyPressed = pjsua.pj_str_copy(dtmfToDial); res = -1; if (prefsWrapper.useSipInfoDtmf()) { res = pjsua.send_dtmf_info(callId, pjKeyPressed); Log.d(THIS_FILE, "Has been sent DTMF INFO : " + res); } else { if (!prefsWrapper.forceDtmfInBand()) { // Generate using RTP res = pjsua.call_dial_dtmf(callId, pjKeyPressed); Log.d(THIS_FILE, "Has been sent in RTP DTMF : " + res); } if (res != pjsua.PJ_SUCCESS && !prefsWrapper.forceDtmfRTP()) { // Generate using analogic inband if (dtmfDialtoneGenerators.get(callId) == null) { dtmfDialtoneGenerators.put(callId, new PjStreamDialtoneGenerator(callId)); } res = dtmfDialtoneGenerators.get(callId).sendPjMediaDialTone(dtmfToDial); Log.d(THIS_FILE, "Has been sent DTMF analogic : " + res); } } } // Finally, push remaining DTMF in the future if (!TextUtils.isEmpty(remainingDtmf)) { dtmfToAutoSend.put(callId, remainingDtmf); if (tasksTimer == null) { tasksTimer = new Timer("com.csipsimple.PjSipServiceTasks"); } TimerTask tt = new TimerTask() { @Override public void run() { service.getExecutor().execute(new SipRunnable() { @Override protected void doRun() throws SameThreadException { Log.d(THIS_FILE, "Running pending DTMF send"); sendPendingDtmf(callId); } }); } }; dtmfTasks.put(callId, tt); Log.d(THIS_FILE, "Schedule DTMF " + remainingDtmf + " in " + pauseBeforeRemaining); tasksTimer.schedule(tt, pauseBeforeRemaining); } else { if (dtmfToAutoSend.get(callId) != null) { dtmfToAutoSend.put(callId, null); } if (dtmfTasks.get(callId) != null) { dtmfTasks.put(callId, null); } } return res; } /** * Send sms/message using SIP server */ public ToCall sendMessage(String callee, String message, long accountId) throws SameThreadException { if (!created) { return null; } ToCall toCall = sanitizeSipUri(callee, accountId); if (toCall != null) { pj_str_t uri = pjsua.pj_str_copy(toCall.getCallee()); pj_str_t text = pjsua.pj_str_copy(message); /* * Log.d(THIS_FILE, "get for outgoing"); int finalAccountId = * accountId; if (accountId == -1) { finalAccountId = * pjsua.acc_find_for_outgoing(uri); } */ // Nothing to do with this values byte[] userData = new byte[1]; int status = pjsua.im_send(toCall.getPjsipAccountId(), uri, null, text, null, userData); return (status == pjsuaConstants.PJ_SUCCESS) ? toCall : null; } return toCall; } /** * Add a buddy to buddies list * * @param buddyUri the uri to register to * @throws SameThreadException */ public int addBuddy(String buddyUri) throws SameThreadException { if (!created) { return -1; } int[] p_buddy_id = new int[1]; pjsua_buddy_config buddy_cfg = new pjsua_buddy_config(); pjsua.buddy_config_default(buddy_cfg); buddy_cfg.setSubscribe(1); buddy_cfg.setUri(pjsua.pj_str_copy(buddyUri)); pjsua.buddy_add(buddy_cfg, p_buddy_id); return p_buddy_id[0]; } /** * Remove one buddy from the buddy list managed by pjsip * * @param buddyUri he uri to unregister * @throws SameThreadException */ public void removeBuddy(String buddyUri) throws SameThreadException { if (!created) { return; } int buddyId = pjsua.buddy_find(pjsua.pj_str_copy(buddyUri)); if (buddyId >= 0) { pjsua.buddy_del(buddyId); } } public void sendPendingDtmf(int callId) throws SameThreadException { if (dtmfToAutoSend.get(callId) != null) { Log.d(THIS_FILE, "DTMF - Send pending dtmf " + dtmfToAutoSend.get(callId) + " for " + callId); sendDtmf(callId, dtmfToAutoSend.get(callId)); } } public void stopDialtoneGenerator(int callId) { if (dtmfDialtoneGenerators.get(callId) != null) { dtmfDialtoneGenerators.get(callId).stopDialtoneGenerator(); dtmfDialtoneGenerators.put(callId, null); } if (dtmfToAutoSend.get(callId) != null) { dtmfToAutoSend.put(callId, null); } if (dtmfTasks.get(callId) != null) { dtmfTasks.get(callId).cancel(); dtmfTasks.put(callId, null); } } public void startWaittoneGenerator(int callId) { if (waittoneGenerators.get(callId) == null) { waittoneGenerators.put(callId, new PjStreamDialtoneGenerator(callId, false)); } waittoneGenerators.get(callId).startPjMediaWaitingTone(); } public void stopWaittoneGenerator(int callId) { if (waittoneGenerators.get(callId) != null) { waittoneGenerators.get(callId).stopDialtoneGenerator(); waittoneGenerators.put(callId, null); } } public int callHold(int callId) throws SameThreadException { if (created) { return pjsua.call_set_hold(callId, null); } return -1; } public int callReinvite(int callId, boolean unhold) throws SameThreadException { if (created) { return pjsua.call_reinvite(callId, unhold ? pjsua_call_flag.PJSUA_CALL_UNHOLD.swigValue() : 0, null); } return -1; } public SipCallSession getCallInfo(int callId) { if (created/* && !creating */&& userAgentReceiver != null) { SipCallSession callInfo = userAgentReceiver.getCallInfo(callId); return callInfo; } return null; } public SipCallSession getPublicCallInfo(int callId) { SipCallSession internalCallSession = getCallInfo(callId); if( internalCallSession == null) { return null; } return new SipCallSession(internalCallSession); } public void setBluetoothOn(boolean on) throws SameThreadException { if (created && mediaManager != null) { mediaManager.setBluetoothOn(on); } } /** * Mute microphone * * @param on true if microphone has to be muted * @throws SameThreadException */ public void setMicrophoneMute(boolean on) throws SameThreadException { if (created && mediaManager != null) { mediaManager.setMicrophoneMute(on); } } /** * Change speaker phone mode * * @param on true if the speaker mode has to be on. * @throws SameThreadException */ public void setSpeakerphoneOn(boolean on) throws SameThreadException { if (created && mediaManager != null) { mediaManager.setSpeakerphoneOn(on); } } public SipCallSession[] getCalls() { if (created && userAgentReceiver != null) { SipCallSession[] callsInfo = userAgentReceiver.getCalls(); return callsInfo; } return new SipCallSession[0]; } public void confAdjustTxLevel(int port, float value) throws SameThreadException { if (created && userAgentReceiver != null) { pjsua.conf_adjust_tx_level(port, value); } } public void confAdjustRxLevel(int port, float value) throws SameThreadException { if (created && userAgentReceiver != null) { pjsua.conf_adjust_rx_level(port, value); } } public void setEchoCancellation(boolean on) throws SameThreadException { if (created && userAgentReceiver != null) { Log.d(THIS_FILE, "set echo cancelation " + on); pjsua.set_ec(on ? prefsWrapper.getEchoCancellationTail() : 0, prefsWrapper.getPreferenceIntegerValue(SipConfigManager.ECHO_MODE)); } } public void adjustStreamVolume(int stream, int direction, int flags) { if (mediaManager != null) { mediaManager.adjustStreamVolume(stream, direction, AudioManager.FLAG_SHOW_UI); } } public void silenceRinger() { if (mediaManager != null) { mediaManager.stopRingAndUnfocus(); } } /** * Change account registration / adding state * * @param account The account to modify registration * @param renew if 0 we ask for deletion of this account; if 1 we ask for * registration of this account (and add if necessary) * @param forceReAdd if true, we will first remove the account and then * re-add it * @return true if the operation get completed without problem * @throws SameThreadException */ public boolean setAccountRegistration(SipProfile account, int renew, boolean forceReAdd) throws SameThreadException { int status = -1; if (!created || account == null) { Log.e(THIS_FILE, "PJSIP is not started here, nothing can be done"); return false; } if (account.id == SipProfile.INVALID_ID) { Log.w(THIS_FILE, "Trying to set registration on a deleted account"); return false; } SipProfileState profileState = getProfileState(account); // If local account -- Ensure we are not deleting, because this would be // invalid if (profileState.getWizard().equalsIgnoreCase(WizardUtils.LOCAL_WIZARD_TAG)) { if (renew == 0) { return false; } } // In case of already added, we have to act finely // If it's local we can just consider that we have to re-add account // since it will actually just touch the account with a modify if (profileState != null && profileState.isAddedToStack() && !profileState.getWizard().equalsIgnoreCase(WizardUtils.LOCAL_WIZARD_TAG)) { // The account is already there in accounts list service.getContentResolver().delete( ContentUris.withAppendedId(SipProfile.ACCOUNT_STATUS_URI, account.id), null, null); Log.d(THIS_FILE, "Account already added to stack, remove and re-load or delete"); if (renew == 1) { if (forceReAdd) { status = pjsua.acc_del(profileState.getPjsuaId()); addAccount(account); } else { pjsua.acc_set_online_status(profileState.getPjsuaId(), getOnlineForStatus(service.getPresence())); status = pjsua.acc_set_registration(profileState.getPjsuaId(), renew); } } else { // if(status == pjsuaConstants.PJ_SUCCESS && renew == 0) { Log.d(THIS_FILE, "Delete account !!"); status = pjsua.acc_del(profileState.getPjsuaId()); } } else { if (renew == 1) { addAccount(account); } else { Log.w(THIS_FILE, "Ask to unregister an unexisting account !!" + account.id); } } // PJ_SUCCESS = 0 return status == 0; } /** * Set self presence * * @param presence the SipManager.SipPresence * @param statusText the text of the presence * @throws SameThreadException */ public void setPresence(PresenceStatus presence, String statusText, long accountId) throws SameThreadException { if (!created) { Log.e(THIS_FILE, "PJSIP is not started here, nothing can be done"); return; } SipProfile account = new SipProfile(); account.id = accountId; SipProfileState profileState = getProfileState(account); // In case of already added, we have to act finely // If it's local we can just consider that we have to re-add account // since it will actually just touch the account with a modify if (profileState != null && profileState.isAddedToStack()) { // The account is already there in accounts list pjsua.acc_set_online_status(profileState.getPjsuaId(), getOnlineForStatus(presence)); } } private int getOnlineForStatus(PresenceStatus presence) { return presence == PresenceStatus.ONLINE ? 1 : 0; } public static long getAccountIdForPjsipId(Context ctxt, int pjId) { long accId = SipProfile.INVALID_ID; Cursor c = ctxt.getContentResolver().query(SipProfile.ACCOUNT_STATUS_URI, null, null, null, null); if (c != null) { try { c.moveToFirst(); do { int pjsuaId = c.getInt(c.getColumnIndex(SipProfileState.PJSUA_ID)); Log.d(THIS_FILE, "Found pjsua " + pjsuaId + " searching " + pjId); if (pjsuaId == pjId) { accId = c.getInt(c.getColumnIndex(SipProfileState.ACCOUNT_ID)); break; } } while (c.moveToNext()); } catch (Exception e) { Log.e(THIS_FILE, "Error on looping over sip profiles", e); } finally { c.close(); } } return accId; } public SipProfile getAccountForPjsipId(int pjId) { long accId = getAccountIdForPjsipId(service, pjId); if (accId == SipProfile.INVALID_ID) { return null; } else { return service.getAccount(accId); } } public int validateAudioClockRate(int aClockRate) { if (mediaManager != null) { return mediaManager.validateAudioClockRate(aClockRate); } return -1; } public void setAudioInCall(int beforeInit) { if (mediaManager != null) { mediaManager.setAudioInCall(beforeInit == pjsuaConstants.PJ_TRUE); } } public void unsetAudioInCall() { if (mediaManager != null) { mediaManager.unsetAudioInCall(); } } public SipCallSession getActiveCallInProgress() { if (created && userAgentReceiver != null) { return userAgentReceiver.getActiveCallInProgress(); } return null; } public void refreshCallMediaState(final int callId) { service.getExecutor().execute(new SipRunnable() { @Override public void doRun() throws SameThreadException { if (created && userAgentReceiver != null) { userAgentReceiver.updateCallMediaState(callId); } } }); } /** * Transform a string callee into a valid sip uri in the context of an * account * * @param callee the callee string to call * @param accountId the context account * @return ToCall object representing what to call and using which account */ private ToCall sanitizeSipUri(String callee, long accountId) throws SameThreadException { // accountId is the id in term of csipsimple database // pjsipAccountId is the account id in term of pjsip adding int pjsipAccountId = (int) SipProfile.INVALID_ID; // Fake a sip profile empty to get it's profile state // Real get from db will be done later SipProfile account = new SipProfile(); account.id = accountId; SipProfileState profileState = getProfileState(account); long finalAccountId = accountId; // If this is an invalid account id if (accountId == SipProfile.INVALID_ID || !profileState.isAddedToStack()) { int defaultPjsipAccount = pjsua.acc_get_default(); boolean valid = false; account = getAccountForPjsipId(defaultPjsipAccount); if (account != null) { profileState = getProfileState(account); valid = profileState.isAddedToStack(); } // If default account is not active if (!valid) { Cursor c = service.getContentResolver().query(SipProfile.ACCOUNT_STATUS_URI, null, null, null, null); if (c != null) { try { if (c.getCount() > 0) { c.moveToFirst(); do { SipProfileState ps = new SipProfileState(c); if (ps.isValidForCall()) { finalAccountId = ps.getAccountId(); pjsipAccountId = ps.getPjsuaId(); break; } } while (c.moveToNext()); } } catch (Exception e) { Log.e(THIS_FILE, "Error on looping over sip profiles state", e); } finally { c.close(); } } } else { // Use the default account finalAccountId = profileState.getAccountId(); pjsipAccountId = profileState.getPjsuaId(); } } else { // If the account is valid pjsipAccountId = profileState.getPjsuaId(); } if (pjsipAccountId == SipProfile.INVALID_ID) { Log.e(THIS_FILE, "Unable to find a valid account for this call"); return null; } // Check integrity of callee field // Get real account information now account = service.getAccount((int) finalAccountId); ParsedSipContactInfos finalCallee = account.formatCalleeNumber(callee); String digitsToAdd = null; if (!TextUtils.isEmpty(finalCallee.userName) && (finalCallee.userName.contains(",") || finalCallee.userName.contains(";"))) { int commaIndex = finalCallee.userName.indexOf(","); int semiColumnIndex = finalCallee.userName.indexOf(";"); if (semiColumnIndex > 0 && semiColumnIndex < commaIndex) { commaIndex = semiColumnIndex; } digitsToAdd = finalCallee.userName.substring(commaIndex); finalCallee.userName = finalCallee.userName.substring(0, commaIndex); } Log.d(THIS_FILE, "will call " + finalCallee); if (pjsua.verify_sip_url(finalCallee.toString(false)) == 0) { // In worse worse case, find back the account id for uri.. but // probably useless case if (pjsipAccountId == SipProfile.INVALID_ID) { pjsipAccountId = pjsua.acc_find_for_outgoing(pjsua.pj_str_copy(finalCallee .toString(false))); } return new ToCall(pjsipAccountId, finalCallee.toString(true), digitsToAdd); } return null; } public void onGSMStateChanged(int state, String incomingNumber) throws SameThreadException { // Avoid ringing if new GSM state is not idle if (state != TelephonyManager.CALL_STATE_IDLE && mediaManager != null) { mediaManager.stopRingAndUnfocus(); } // If new call state is not idle if (state != TelephonyManager.CALL_STATE_IDLE && userAgentReceiver != null) { SipCallSession currentActiveCall = userAgentReceiver.getActiveCallOngoing(); // If we have a sip call on our side if (currentActiveCall != null) { AudioManager am = (AudioManager) service.getSystemService(Context.AUDIO_SERVICE); if (state == TelephonyManager.CALL_STATE_OFFHOOK) { // GSM is now off hook => hold current sip call hasBeenHoldByGSM = currentActiveCall.getCallId(); callHold(hasBeenHoldByGSM); pjsua.set_no_snd_dev(); am.setMode(AudioManager.MODE_IN_CALL); } else { // We have a ringing incoming call. // Avoid ringing hasBeenChangedRingerMode = am.getRingerMode(); am.setRingerMode(AudioManager.RINGER_MODE_SILENT); // And try to notify with tone if (mediaManager != null) { mediaManager.playInCallTone(MediaManager.TONE_CALL_WAITING); } } } } else { // GSM is now back to an IDLE state, resume previously stopped SIP // calls if (hasBeenHoldByGSM != null && isCreated()) { pjsua.set_snd_dev(0, 0); callReinvite(hasBeenHoldByGSM, true); hasBeenHoldByGSM = null; } // GSM is now back to an IDLE state, reset ringerMode if was // changed. if (hasBeenChangedRingerMode != null) { AudioManager am = (AudioManager) service.getSystemService(Context.AUDIO_SERVICE); am.setRingerMode(hasBeenChangedRingerMode); hasBeenChangedRingerMode = null; } } } /* * public void sendKeepAlivePackets() throws SameThreadException { * ArrayList<SipProfileState> accounts = getActiveProfilesState(); for * (SipProfileState acc : accounts) { * pjsua.send_keep_alive(acc.getPjsuaId()); } } */ public void zrtpSASVerified(int callId) throws SameThreadException { if (!created) { return; } pjsua.jzrtp_SASVerified(callId); } public void zrtpSASRevoke(int callId) throws SameThreadException { if (!created) { return; } pjsua.jzrtp_SASRevoked(callId); } protected void setDetectedNatType(String natName, int status) { // Maybe we will need to treat status to eliminate some set (depending of unknown string fine for 3rd part dev) mNatDetected = natName; } /** * @return nat type name detected by pjsip. Empty string if nothing detected */ public String getDetectedNatType() { return mNatDetected; } // Config subwrapper private pj_str_t[] getNameservers() { pj_str_t[] nameservers = null; if (prefsWrapper.enableDNSSRV()) { String prefsDNS = prefsWrapper .getPreferenceStringValue(SipConfigManager.OVERRIDE_NAMESERVER); if (TextUtils.isEmpty(prefsDNS)) { String ipv6Escape = "[ \\[\\]]"; String ipv4Matcher = "^\\d+(\\.\\d+){3}$"; String ipv6Matcher = "^[0-9a-f]+(:[0-9a-f]*)+:[0-9a-f]+$"; List<String> dnsServers; List<String> dnsServersAll = new ArrayList<String>(); List<String> dnsServersIpv4 = new ArrayList<String>(); for (int i = 1; i <= 2; i++) { String dnsName = prefsWrapper.getSystemProp("net.dns" + i); if (!TextUtils.isEmpty(dnsName)) { dnsName = dnsName.replaceAll(ipv6Escape, ""); if (!TextUtils.isEmpty(dnsName) && !dnsServersAll.contains(dnsName)) { if (dnsName.matches(ipv4Matcher) || dnsName.matches(ipv6Matcher)) { dnsServersAll.add(dnsName); } if (dnsName.matches(ipv4Matcher)) { dnsServersIpv4.add(dnsName); } } } } if (dnsServersIpv4.size() > 0) { // Prefer pure ipv4 list since pjsua doesn't manage ipv6 // resolution yet dnsServers = dnsServersIpv4; } else { dnsServers = dnsServersAll; } if (dnsServers.size() == 0) { // This is the ultimate fallback... we should never be there // ! nameservers = new pj_str_t[] { pjsua.pj_str_copy("127.0.0.1") }; } else if (dnsServers.size() == 1) { nameservers = new pj_str_t[] { pjsua.pj_str_copy(dnsServers.get(0)) }; } else { nameservers = new pj_str_t[] { pjsua.pj_str_copy(dnsServers.get(0)), pjsua.pj_str_copy(dnsServers.get(1)) }; } } else { nameservers = new pj_str_t[] { pjsua.pj_str_copy(prefsDNS) }; } } return nameservers; } private pjmedia_srtp_use getUseSrtp() { try { int use_srtp = Integer.parseInt(prefsWrapper .getPreferenceStringValue(SipConfigManager.USE_SRTP)); if (use_srtp >= 0) { return pjmedia_srtp_use.swigToEnum(use_srtp); } } catch (NumberFormatException e) { Log.e(THIS_FILE, "Transport port not well formated"); } return pjmedia_srtp_use.PJMEDIA_SRTP_DISABLED; } public void setNoSnd() throws SameThreadException { if (!created) { return; } pjsua.set_no_snd_dev(); } public void setSnd() throws SameThreadException { if (!created) { return; } pjsua.set_snd_dev(0, 0); } // Recorder private SparseArray<List<IRecorderHandler>> callRecorders = new SparseArray<List<IRecorderHandler>>(); /** * Start recording of a call. * * @param callId the call id of the call to record * @throws SameThreadException virtual exception to be sure we are calling * this from correct thread */ public void startRecording(int callId, int way) throws SameThreadException { // Make sure we are in a valid state for recording if (!canRecord(callId)) { return; } // Sanitize call way : if 0 assume all if (way == 0) { way = SipManager.BITMASK_ALL; } try { File recFolder = PreferencesProviderWrapper.getRecordsFolder(service); IRecorderHandler recoder = new SimpleWavRecorderHandler(getCallInfo(callId), recFolder, way); List<IRecorderHandler> recordersList = callRecorders.get(callId, new ArrayList<IRecorderHandler>()); recordersList.add(recoder); callRecorders.put(callId, recordersList); recoder.startRecording(); userAgentReceiver.updateRecordingStatus(callId, false, true); } catch (IOException e) { service.notifyUserOfMessage(R.string.cant_write_file); } catch (RuntimeException e) { Log.e(THIS_FILE, "Impossible to record ", e); } } /** * Stop recording of a call. * * @param callId the call to stop record for. * @throws SameThreadException virtual exception to be sure we are calling * this from correct thread */ public void stopRecording(int callId) throws SameThreadException { if (!created) { return; } List<IRecorderHandler> recoders = callRecorders.get(callId, null); if (recoders != null) { for (IRecorderHandler recoder : recoders) { recoder.stopRecording(); // Broadcast to other apps the a new sip record has been done SipCallSession callInfo = getPublicCallInfo(callId); Intent it = new Intent(SipManager.ACTION_SIP_CALL_RECORDED); it.putExtra(SipManager.EXTRA_CALL_INFO, callInfo); recoder.fillBroadcastWithInfo(it); service.sendBroadcast(it, SipManager.PERMISSION_USE_SIP); } // In first case we drop everything callRecorders.delete(callId); userAgentReceiver.updateRecordingStatus(callId, true, false); } } /** * Can we record for this call id ? * * @param callId The call id to record to a file * @return true if seems to be possible to record this call. */ public boolean canRecord(int callId) { if (!created) { // Not possible to record if service not here return false; } SipCallSession callInfo = getCallInfo(callId); if (callInfo == null) { // Not possible to record if no call info for given call id return false; } int ms = callInfo.getMediaStatus(); if (ms != SipCallSession.MediaState.ACTIVE && ms != SipCallSession.MediaState.REMOTE_HOLD) { // We can't record if media state not running on our side return false; } return true; } /** * Are we currently recording the call? * * @param callId The call id to test for a recorder presence * @return true if recording this call */ public boolean isRecording(int callId) throws SameThreadException { List<IRecorderHandler> recorders = callRecorders.get(callId, null); if (recorders == null) { return false; } return recorders.size() > 0; } // Stream players // We use a list for future possible extensions. For now api only manages // one private SparseArray<List<IPlayerHandler>> callPlayers = new SparseArray<List<IPlayerHandler>>(); /** * Play one wave file in call stream. * * @param filePath The path to the file we'd like to play * @param callId The call id we want to play to. Even if we only use * {@link SipManager#BITMASK_IN} this must correspond to some * call since it's used to identify internally created player. * @param way The way we want to play this file to. Bitmasked value that * could be compounded of {@link SipManager#BITMASK_IN} (read * local) and {@link SipManager#BITMASK_OUT} (read to remote * party of the call) * @throws SameThreadException virtual exception to be sure we are calling * this from correct thread */ public void playWaveFile(String filePath, int callId, int way) throws SameThreadException { if (!created) { return; } // Stop any current player stopPlaying(callId); if (TextUtils.isEmpty(filePath)) { // Nothing to do if we have not file path return; } if (way == 0) { way = SipManager.BITMASK_ALL; } // We create a new player conf port. try { IPlayerHandler player = new SimpleWavPlayerHandler(getCallInfo(callId), filePath, way); List<IPlayerHandler> playersList = callPlayers.get(callId, new ArrayList<IPlayerHandler>()); playersList.add(player); callPlayers.put(callId, playersList); player.startPlaying(); } catch (IOException e) { // TODO : add a can't read file txt service.notifyUserOfMessage(R.string.cant_write_file); } catch (RuntimeException e) { Log.e(THIS_FILE, "Impossible to play file", e); } } /** * Stop eventual player for a given call. * * @param callId the call id corresponding to player previously created with * {@link #playWaveFile(String, int, int)} * @throws SameThreadException virtual exception to be sure we are calling * this from correct thread */ public void stopPlaying(int callId) throws SameThreadException { List<IPlayerHandler> players = callPlayers.get(callId, null); if (players != null) { for (IPlayerHandler player : players) { player.stopPlaying(); } callPlayers.delete(callId); } } public void updateTransportIp(String oldIPAddress) throws SameThreadException { if (!created) { return; } Log.d(THIS_FILE, "Trying to update my address in the current call to " + oldIPAddress); pjsua.update_transport(pjsua.pj_str_copy(oldIPAddress)); } public static String pjStrToString(pj_str_t pjStr) { try { if (pjStr != null) { // If there's utf-8 ptr length is possibly lower than slen int len = pjStr.getSlen(); if (len > 0 && pjStr.getPtr() != null) { // Be robust to smaller length detected if (pjStr.getPtr().length() < len) { len = pjStr.getPtr().length(); } if (len > 0) { return pjStr.getPtr().substring(0, len); } } } } catch (StringIndexOutOfBoundsException e) { Log.e(THIS_FILE, "Impossible to retrieve string from pjsip ", e); } return ""; } /** * Get the signal level * @param port The pjsip port to get signal from * @return an encoded long with rx level on higher byte and tx level on lower byte */ public long getRxTxLevel(int port) { long[] rx_level = new long[1]; long[] tx_level = new long[1]; pjsua.conf_get_signal_level(port, tx_level, rx_level); return (rx_level[0] << 8 | tx_level[0]); } /** * Connect mic source to speaker output. * Usefull for tests. */ public void startLoopbackTest() { pjsua.conf_connect(0, 0); } /** * Stop connection between mic source to speaker output. * @see startLoopbackTest */ public void stopLoopbackTest() { pjsua.conf_disconnect(0, 0); } private Map<String, PjsipModule> pjsipModules = new HashMap<String, PjsipModule>(); private void initModules() { // TODO : this should be more modular and done from outside PjsipModule rModule = new RegHandlerModule(); pjsipModules.put(RegHandlerModule.class.getCanonicalName(), rModule); rModule = new SipClfModule(); pjsipModules.put(SipClfModule.class.getCanonicalName(), rModule); rModule = new EarlyLockModule(); pjsipModules.put(EarlyLockModule.class.getCanonicalName(), rModule); for (PjsipModule mod : pjsipModules.values()) { mod.setContext(service); } } public interface PjsipModule { /** * Set the android context for the module. Could be usefull to get * preferences for examples. * * @param ctxt android context */ void setContext(Context ctxt); /** * Here pjsip endpoint should have this module added. */ void onBeforeStartPjsip(); /** * This is fired just after account was added to pjsip and before will * be registered. Modules does not necessarily implement something here. * * @param pjId the pjsip id of the added account. * @param acc the profile account. */ void onBeforeAccountStartRegistration(int pjId, SipProfile acc); } /** * Provide video render surface to native code. * @param callId The call id for this video surface * @param window The video surface object */ public void setVideoAndroidRenderer(int callId, SurfaceView window) { pjsua.vid_set_android_renderer(callId, (Object) window); } /** * Provide video capturer surface view (the one binded to camera). * @param window The surface view object */ public void setVideoAndroidCapturer(SurfaceView window) { pjsua.vid_set_android_capturer((Object) window); } private static int boolToPjsuaConstant(boolean v) { return v ? pjsuaConstants.PJ_TRUE : pjsuaConstants.PJ_FALSE; } }
lgpl-3.0
siosio/intellij-community
python/src/com/jetbrains/python/console/ConsoleVisitorFilter.java
2023
/* * Copyright 2000-2014 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.jetbrains.python.console; import com.intellij.psi.PsiFile; import com.jetbrains.python.inspections.*; import com.jetbrains.python.inspections.unusedLocal.PyUnusedLocalInspection; import com.jetbrains.python.psi.PythonVisitorFilter; import com.jetbrains.python.validation.DocStringAnnotator; import org.jetbrains.annotations.NotNull; /** * User : catherine * * filter out some python inspections and annotations if we're in console */ public class ConsoleVisitorFilter implements PythonVisitorFilter { @Override public boolean isSupported(@NotNull final Class visitorClass, @NotNull final PsiFile file) { //if we're in console if (PydevConsoleRunner.isInPydevConsole(file)) { //inspections if (visitorClass == PyUnusedLocalInspection.class || visitorClass == PyUnboundLocalVariableInspection.class || visitorClass == PyStatementEffectInspection.class || visitorClass == PySingleQuotedDocstringInspection.class || visitorClass == PyIncorrectDocstringInspection.class || visitorClass == PyMissingOrEmptyDocstringInspection.class || visitorClass == PyMandatoryEncodingInspection.class || visitorClass == PyPep8Inspection.class || visitorClass == PyCompatibilityInspection.class) { return false; } //annotators if (visitorClass == DocStringAnnotator.class) { return false; } } return true; } }
apache-2.0
romartin/kie-wb-common
kie-wb-common-widgets/kie-wb-common-ui/src/test/java/org/kie/workbench/common/widgets/client/datamodel/AsyncPackageDataModelOracleUtilitiesTest.java
5591
/* * Copyright 2014 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kie.workbench.common.widgets.client.datamodel; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.TreeMap; import org.junit.Test; import org.kie.soup.project.datamodel.imports.Import; import org.kie.soup.project.datamodel.imports.Imports; import org.kie.soup.project.datamodel.oracle.FieldAccessorsAndMutators; import org.kie.soup.project.datamodel.oracle.ModelField; import static org.junit.Assert.*; import static org.kie.workbench.common.widgets.client.datamodel.PackageDataModelOracleTestUtils.*; public class AsyncPackageDataModelOracleUtilitiesTest { @Test public void testFilterModelFieldsFactsShareNameInDifferentPackagesAnotherOneIsInCurrentPackage() throws Exception { Map<String, ModelField[]> projectModelFields = new TreeMap<String, ModelField[]>(); projectModelFields.put("org.test.Person", new ModelField[]{getModelField("Person", "org.test.Person")}); projectModelFields.put("org.test.sub.Person", new ModelField[]{getModelField("Person", "org.test.sub.Person")}); projectModelFields.put("org.test.sub.Address", new ModelField[]{getModelField("Address", "org.test.sub.Address")}); projectModelFields.put("org.test.Address", new ModelField[]{getModelField("Address", "org.test.Address")}); FactNameToFQCNHandleRegistry registry = new FactNameToFQCNHandleRegistry(); AsyncPackageDataModelOracleUtilities.filterModelFields( "org.test.sub", new Imports(), projectModelFields, registry); assertEquals("org.test.sub.Person", registry.get("Person")); assertEquals("org.test.sub.Address", registry.get("Address")); } @Test public void testFilterModelFieldsFactsShareNameInDifferentPackagesAnotherOneIsImported() throws Exception { Map<String, ModelField[]> projectModelFields = new TreeMap<String, ModelField[]>(); projectModelFields.put("org.test.Person", new ModelField[]{getModelField("Person", "org.test.Person")}); projectModelFields.put("org.test.sub.Person", new ModelField[]{getModelField("Person", "org.test.sub.Person")}); projectModelFields.put("org.test.sub.Address", new ModelField[]{getModelField("Address", "org.test.sub.Address")}); projectModelFields.put("org.test.Address", new ModelField[]{getModelField("Address", "org.test.Address")}); FactNameToFQCNHandleRegistry registry = new FactNameToFQCNHandleRegistry(); Imports imports = new Imports(); imports.addImport(new Import("org.test.sub.Person")); imports.addImport(new Import("org.test.sub.Address")); AsyncPackageDataModelOracleUtilities.filterModelFields( "org.another", imports, projectModelFields, registry); assertEquals("org.test.sub.Person", registry.get("Person")); assertEquals("org.test.sub.Address", registry.get("Address")); } @Test public void testFilterSuperTypes() { Map<String, List<String>> projectSuperTypes = new HashMap<String, List<String>>(); projectSuperTypes.put("org.test.Person", Arrays.asList(new String[]{"org.test.GrandParent", "org.test.Parent"})); projectSuperTypes.put("org.test.sub.Person", Arrays.asList(new String[]{"org.test.sub.GrandParent", "org.test.sub.Parent"})); projectSuperTypes.put("org.test.sub.Address", Arrays.asList(new String[]{"org.test.sub.Location"})); projectSuperTypes.put("org.test.Address", Arrays.asList(new String[]{"org.test.Location"})); Imports imports = new Imports(); imports.addImport(new Import("org.test.sub.Person")); imports.addImport(new Import("org.test.sub.Address")); Map<String, List<String>> filterSuperTypes = AsyncPackageDataModelOracleUtilities.filterSuperTypes( "org.another", imports, projectSuperTypes); assertEquals(2, filterSuperTypes.size()); assertContains("Person", filterSuperTypes.keySet()); assertContains("Address", filterSuperTypes.keySet()); final List<String> personSuperTypes = filterSuperTypes.get("Person"); assertEquals(2, personSuperTypes.size()); assertEquals("org.test.sub.GrandParent", personSuperTypes.get(0)); assertEquals("org.test.sub.Parent", personSuperTypes.get(1)); final List<String> addressSuperTypes = filterSuperTypes.get("Address"); assertEquals(1, addressSuperTypes.size()); assertEquals("org.test.sub.Location", addressSuperTypes.get(0)); } private ModelField getModelField(String type, String className) { return new ModelField("field", className, ModelField.FIELD_CLASS_TYPE.REGULAR_CLASS, ModelField.FIELD_ORIGIN.DELEGATED, FieldAccessorsAndMutators.BOTH, type); } // check imports }
apache-2.0
jhshin9/scouter
scouter.agent/src/scouter/xtra/tools/JVM.java
1758
/* * Copyright 2015 LG CNS. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package scouter.xtra.tools; import java.io.IOException; import java.util.Properties; import com.sun.tools.attach.AttachNotSupportedException; import com.sun.tools.attach.VirtualMachine; public class JVM { public JVM(String pid) { this.pid = pid; } private String pid; private VirtualMachine vm = null; private String desc; public boolean isConnected() { return vm != null; } public String getPid() { return pid; } public boolean connect() throws AttachNotSupportedException, IOException { this.vm = VirtualMachine.attach(pid); this.desc = vm.getSystemProperties().getProperty("sun.java.command"); return true; } public void close() { if (this.vm != null) { try { this.vm.detach(); } catch (Exception e) { e.printStackTrace(); } } this.pid = null; this.vm = null; } public Properties getSystemProperties() throws IOException { if (vm == null) throw new RuntimeException("Not connected to jvm"); return vm.getSystemProperties(); } public VirtualMachine getVM() { return vm; } public String getDesc() { return this.desc; } }
apache-2.0
mgherghe/gateway
management/src/main/java/org/kaazing/gateway/management/config/ServiceConfigurationBeanImpl.java
19147
/** * Copyright 2007-2016, Kaazing Corporation. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kaazing.gateway.management.config; import static java.util.Arrays.asList; import static org.kaazing.gateway.service.TransportOptionNames.HTTP_KEEP_ALIVE; import static org.kaazing.gateway.service.TransportOptionNames.HTTP_KEEP_ALIVE_TIMEOUT_KEY; import static org.kaazing.gateway.service.TransportOptionNames.INACTIVITY_TIMEOUT; import static org.kaazing.gateway.service.TransportOptionNames.SSL_ENCRYPTION_ENABLED; import static org.kaazing.gateway.service.TransportOptionNames.SUPPORTED_PROTOCOLS; import static org.kaazing.gateway.service.TransportOptionNames.WS_PROTOCOL_VERSION; import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Map.Entry; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; import org.kaazing.gateway.management.context.DefaultManagementContext; import org.kaazing.gateway.management.gateway.GatewayManagementBean; import org.kaazing.gateway.security.CrossSiteConstraintContext; import org.kaazing.gateway.security.RealmContext; import org.kaazing.gateway.service.AcceptOptionsContext; import org.kaazing.gateway.service.ConnectOptionsContext; import org.kaazing.gateway.service.ServiceContext; import org.kaazing.gateway.service.ServiceProperties; import org.kaazing.gateway.util.Utils; @SuppressWarnings("deprecation") public class ServiceConfigurationBeanImpl implements ServiceConfigurationBean { // private static final AtomicInteger serviceConfigurationIds = new AtomicInteger(0); private final ServiceContext serviceContext; private final GatewayManagementBean gatewayBean; private final int id; public ServiceConfigurationBeanImpl(ServiceContext serviceContext, GatewayManagementBean gatewayBean) { this.serviceContext = serviceContext; this.gatewayBean = gatewayBean; this.id = DefaultManagementContext.getNextServiceIndex(serviceContext); } @Override public GatewayManagementBean getGatewayManagementBean() { return gatewayBean; } @Override public int getId() { return id; } @Override public String getType() { return serviceContext.getServiceType(); } @Override public String getServiceName() { String name = serviceContext.getServiceName(); return name == null ? "" : name; } @Override public String getServiceDescription() { String desc = serviceContext.getServiceDescription(); return desc == null ? "" : desc; } @Override public String getAccepts() { if (serviceContext.supportsAccepts()) { Collection<String> accepts = serviceContext.getAccepts(); return accepts == null ? "" : new JSONArray(accepts).toString(); } else { return null; } } @Override public String getAcceptOptions() { if (serviceContext.supportsAccepts()) { AcceptOptionsContext context = serviceContext.getAcceptOptionsContext(); JSONObject jsonOptions = new JSONObject(); JSONObject jsonObj; try { if (context != null) { Map<String, Object> acceptOptions = context.asOptionsMap(); Map<String, String> binds = context.getBinds(); if ((binds != null) && !binds.isEmpty()) { jsonObj = new JSONObject(); for (String key : binds.keySet()) { jsonObj.put(key, binds.get(key)); } jsonOptions.put("binds", jsonObj); } String[] sslCiphers = (String[]) acceptOptions.remove("ssl.ciphers"); if (sslCiphers != null) { String cipherString = Utils.asCommaSeparatedString(asList(sslCiphers)); if (cipherString != null && cipherString.length() > 0) { jsonOptions.put("ssl.ciphers", cipherString); } } boolean isSslEncryptionEnabled = (Boolean) acceptOptions.remove("ssl.encryptionEnabled"); jsonOptions.put("ssl.encryption", isSslEncryptionEnabled ? "enabled" : "disabled"); boolean wantClientAuth = (Boolean) acceptOptions.remove("ssl.wantClientAuth"); boolean needClientAuth = (Boolean) acceptOptions.remove("ssl.needClientAuth"); if (needClientAuth) { jsonOptions.put("ssl.verify-client", "required"); } else if (wantClientAuth) { jsonOptions.put("ssl.verify-client", "optional"); } else { jsonOptions.put("ssl.verify-client", "none"); } // NOTE: we do NOT (at least in 4.0) show the WS extensions // or WS protocols to users (Command Center or otherwise), so don't send them out. // List<String> wsExtensions = context.getWsExtensions(); // if ((wsExtensions != null) && !wsExtensions.isEmpty()) { // jsonArray = new JSONArray(); // for (String wsExtension : wsExtensions) { // jsonArray.put(wsExtension); // } // jsonOptions.put("ws-extensions", jsonArray); // } // List<String> wsProtocols = context.getWsProtocols(); // if ((wsProtocols != null) && !wsProtocols.isEmpty()) { // jsonArray = new JSONArray(); // for (String wsProtocol : wsProtocols) { // jsonArray.put(wsProtocol); // } // jsonOptions.put("ws-protocols", jsonArray); // } acceptOptions.remove(SUPPORTED_PROTOCOLS); jsonOptions.put("ws.maximum.message.size", acceptOptions.remove("ws.maxMessageSize")); Long wsInactivityTimeout = (Long) acceptOptions.remove("ws.inactivityTimeout"); if (wsInactivityTimeout != null) { jsonOptions.put("ws.inactivity.timeout", wsInactivityTimeout); } Integer httpKeepAlive = (Integer) acceptOptions.remove("http[http/1.1].keepAliveTimeout"); if (httpKeepAlive != null) { jsonOptions.put("http.keepalive.timeout", httpKeepAlive); } String pipeTransport = (String) acceptOptions.remove("pipe.transport"); if (pipeTransport != null) { jsonOptions.put("pipe.transport", pipeTransport); } String tcpTransport = (String) acceptOptions.remove("tcp.transport"); if (tcpTransport != null) { jsonOptions.put("tcp.transport", tcpTransport); } String sslTransport = (String) acceptOptions.remove("ssl.transport"); if (sslTransport != null) { jsonOptions.put("ssl.transport", sslTransport); } String httpTransport = (String) acceptOptions.remove("http[http/1.1].transport"); if (httpTransport != null) { jsonOptions.put("http.transport", httpTransport); } long tcpMaxOutboundRate = (Long) acceptOptions.remove("tcp.maximumOutboundRate"); jsonOptions.put("tcp.maximum.outbound.rate", tcpMaxOutboundRate); for (Entry<String, Object> entry : acceptOptions.entrySet()) { String key = entry.getKey(); if (key.startsWith("ws") && (key.endsWith("maxMessageSize") || key.endsWith("inactivityTimeout") || key.endsWith("extensions"))) { // skip over options already seen with the base ws.* set of options continue; } Object value = entry.getValue(); if (value instanceof String[]) { jsonOptions.put(key, Utils.asCommaSeparatedString(asList((String[]) value))); } else { jsonOptions.put(key, value); } } } } catch (Exception ex) { // This is only for JSON exceptions, but there should be no way to // hit this. } return jsonOptions.toString(); } else { return null; } } @Override public String getBalances() { Collection<String> balances = serviceContext.getBalances(); return balances == null ? "" : new JSONArray(balances).toString(); } @Override public String getConnects() { if (serviceContext.supportsConnects()) { Collection<String> connects = serviceContext.getConnects(); return connects == null ? "" : new JSONArray(connects).toString(); } else { return null; } } @Override public String getConnectOptions() { if (serviceContext.supportsConnects()) { ConnectOptionsContext context = serviceContext.getConnectOptionsContext(); JSONObject jsonOptions = new JSONObject(); try { if (context != null) { Map<String, Object> connectOptions = context.asOptionsMap(); String[] sslCiphersArray = (String[]) connectOptions.remove("ssl.ciphers"); if (sslCiphersArray != null) { List<String> sslCiphers = Arrays.asList(sslCiphersArray); if (sslCiphers.size() > 0) { jsonOptions.put("ssl.ciphers", sslCiphers); } } String[] sslProtocolsArray = (String[]) connectOptions.remove("ssl.protocols"); if (sslProtocolsArray != null) { List<String> sslProtocols = Arrays.asList(sslProtocolsArray); if (sslProtocols.size() > 0) { jsonOptions.put("ssl.protocols", sslProtocols); } } // NOTE: we do NOT (at least in 4.0) show the WS extensions // or WS protocols to users (Command Center or otherwise), so don't send them out. //WebSocketWireProtocol protocol = connectOptions.getWebSocketWireProtocol(); //sb.append("websocket-wire-protocol=" + protocol); connectOptions.remove(WS_PROTOCOL_VERSION); String wsVersion = (String) connectOptions.remove("ws.version"); if (wsVersion != null) { jsonOptions.put("ws.version", wsVersion); } String pipeTransport = (String) connectOptions.remove("pipe.transport"); if (pipeTransport != null) { jsonOptions.put("pipe.transport", pipeTransport); } String tcpTransport = (String) connectOptions.remove("tcp.transport"); if (tcpTransport != null) { jsonOptions.put("tcp.transport", tcpTransport); } String sslTransport = (String) connectOptions.remove("ssl.transport"); if (sslTransport != null) { jsonOptions.put("ssl.transport", sslTransport); } String httpTransport = (String) connectOptions.remove("http[http/1.1].transport"); if (httpTransport != null) { jsonOptions.put("http.transport", httpTransport); } Long inactivityTimeout = (Long) connectOptions.remove(INACTIVITY_TIMEOUT); if (inactivityTimeout != null) { jsonOptions.put("ws.inactivity.timeout", inactivityTimeout); } Boolean sslEncryptionEnabled = (Boolean) connectOptions.remove(SSL_ENCRYPTION_ENABLED); if ((sslEncryptionEnabled != null) && Boolean.FALSE.equals(sslEncryptionEnabled)) { jsonOptions.put("ssl.encryption", "disabled"); } else { jsonOptions.put("ssl.encryption", "enabled"); } String udpInterface = (String) connectOptions.remove("udp.interface"); if (udpInterface != null) { jsonOptions.put("udp.interface", udpInterface); } Integer httpKeepaliveTimeout = (Integer) connectOptions.remove(HTTP_KEEP_ALIVE_TIMEOUT_KEY); if (httpKeepaliveTimeout != null) { jsonOptions.put("http.keepalive.timeout", httpKeepaliveTimeout); } Boolean httpKeepalive = (Boolean) connectOptions.remove(HTTP_KEEP_ALIVE); if (httpKeepalive != null) { if (Boolean.FALSE.equals(httpKeepalive)) { jsonOptions.put("http.keepalive", "disabled"); } else { jsonOptions.put("http.keepalive", "enabled"); } } for (Entry<String, Object> entry : connectOptions.entrySet()) { String key = entry.getKey(); Object value = entry.getValue(); if (value instanceof String[]) { jsonOptions.put(key, Utils.asCommaSeparatedString(asList((String[]) value))); } else { jsonOptions.put(key, value); } } } } catch (Exception ex) { // This is only for JSON exceptions, but there should be no way to // hit this. } return jsonOptions.toString(); } else { return null; } } @Override public String getCrossSiteConstraints() { Map<String, ? extends Map<String, ? extends CrossSiteConstraintContext>> crossSiteConstraints = serviceContext.getCrossSiteConstraints(); JSONArray jsonConstraints = new JSONArray(); if ((crossSiteConstraints != null) && !crossSiteConstraints.isEmpty()) { Collection<? extends Map<String, ? extends CrossSiteConstraintContext>> crossSiteConstraintsValues = crossSiteConstraints.values(); if ((crossSiteConstraintsValues != null) && !crossSiteConstraintsValues.isEmpty()) { Map<String, ? extends CrossSiteConstraintContext> constraintMap = crossSiteConstraintsValues.iterator().next(); Collection<? extends CrossSiteConstraintContext> constraints = constraintMap.values(); for (CrossSiteConstraintContext constraint : constraints) { JSONObject jsonObj = new JSONObject(); String allowHeaders = constraint.getAllowHeaders(); String allowMethods = constraint.getAllowMethods(); String allowOrigin = constraint.getAllowOrigin(); Integer maxAge = constraint.getMaximumAge(); try { jsonObj.put("allow-origin", allowOrigin); jsonObj.put("allow-methods", allowMethods); if (allowHeaders != null) { jsonObj.put("allow-headers", allowHeaders); } if (maxAge != null) { jsonObj.put("maximum-age", maxAge); } jsonConstraints.put(jsonObj); } catch (Exception ex) { // It is a programming error to get to here. We should never // get here, because we're just adding strings above. } } } } return jsonConstraints.toString(); } @Override public String getMimeMappings() { if (serviceContext.supportsMimeMappings()) { Map<String, String> mimeMappings = serviceContext.getMimeMappings(); return mimeMappings == null ? "" : new JSONObject(mimeMappings).toString(); } else { return null; } } @Override public String getProperties() { ServiceProperties properties = serviceContext.getProperties(); return properties == null ? "" : asJSONObject(properties).toString(); } @Override public String getRequiredRoles() { Collection<String> roles = asList(serviceContext.getRequireRoles()); return roles == null ? "" : new JSONArray(roles).toString(); } @Override public String getServiceRealm() { RealmContext realm = serviceContext.getServiceRealm(); if (realm != null) { return realm.getName(); } return ""; } private static JSONObject asJSONObject(ServiceProperties properties) { JSONObject result = new JSONObject(); try { for (String name : properties.simplePropertyNames()) { result.put(name, properties.get(name)); } for (String name : properties.nestedPropertyNames()) { for (ServiceProperties nested : properties.getNested(name)) { result.append(name, asJSONObject(nested)); } } } catch (JSONException e) { // can't happen (unless ServiceProperties has a bug and incorrectly returns a null property name) throw new RuntimeException(e); } return result; } }
apache-2.0
venkateshamurthy/java-quantiles
src/test/java/org/apache/commons/math3/stat/descriptive/rank/MinTest.java
2640
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.commons.math3.stat.descriptive.rank; import org.apache.commons.math3.stat.descriptive.StorelessUnivariateStatisticAbstractTest; import org.apache.commons.math3.stat.descriptive.UnivariateStatistic; import org.junit.Assert; import org.junit.Test; /** * Test cases for the {@link UnivariateStatistic} class. */ public class MinTest extends StorelessUnivariateStatisticAbstractTest{ protected Min stat; /** * {@inheritDoc} */ @Override public UnivariateStatistic getUnivariateStatistic() { return new Min(); } /** * {@inheritDoc} */ @Override public double expectedValue() { return this.min; } @Test public void testSpecialValues() { double[] testArray = {0d, Double.NaN, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY}; Min min = new Min(); Assert.assertTrue(Double.isNaN(min.getResult())); min.increment(testArray[0]); Assert.assertEquals(0d, min.getResult(), 0); min.increment(testArray[1]); Assert.assertEquals(0d, min.getResult(), 0); min.increment(testArray[2]); Assert.assertEquals(0d, min.getResult(), 0); min.increment(testArray[3]); Assert.assertEquals(Double.NEGATIVE_INFINITY, min.getResult(), 0); Assert.assertEquals(Double.NEGATIVE_INFINITY, min.evaluate(testArray), 0); } @Test public void testNaNs() { Min min = new Min(); double nan = Double.NaN; Assert.assertEquals(2d, min.evaluate(new double[]{nan, 2d, 3d}), 0); Assert.assertEquals(1d, min.evaluate(new double[]{1d, nan, 3d}), 0); Assert.assertEquals(1d, min.evaluate(new double[]{1d, 2d, nan}), 0); Assert.assertTrue(Double.isNaN(min.evaluate(new double[]{nan, nan, nan}))); } }
apache-2.0
moujian/config-toolkit
config-toolkit/src/main/java/com/dangdang/config/service/file/protocol/ProtocolNames.java
336
package com.dangdang.config.service.file.protocol; /** * @author <a href="mailto:wangyuxuan@dangdang.com">Yuxuan Wang</a> * */ public final class ProtocolNames { public static String FILE = "file"; public static String CLASSPATH = "classpath"; public static String HTTP = "http"; public static String HTTPS = "https"; }
apache-2.0
MichaelEvans/assertj-android
assertj-android/src/main/java/org/assertj/android/api/graphics/drawable/AbstractDrawableAssert.java
5242
// Copyright 2013 Square, Inc. package org.assertj.android.api.graphics.drawable; import android.annotation.TargetApi; import android.graphics.Rect; import android.graphics.drawable.Drawable; import org.assertj.core.api.AbstractAssert; import static android.os.Build.VERSION_CODES.HONEYCOMB; import static android.os.Build.VERSION_CODES.KITKAT; import static org.assertj.core.api.Assertions.assertThat; public abstract class AbstractDrawableAssert<S extends AbstractDrawableAssert<S, A>, A extends Drawable> extends AbstractAssert<S, A> { protected AbstractDrawableAssert(A actual, Class<S> selfType) { super(actual, selfType); } public S hasBounds(Rect bounds) { isNotNull(); Rect actualBounds = actual.getBounds(); assertThat(actualBounds) // .overridingErrorMessage("Expected bounds <%s> but was <%s>.", bounds, actualBounds) // .isEqualTo(bounds); return myself; } @TargetApi(HONEYCOMB) public S hasCallback(Drawable.Callback callback) { isNotNull(); Drawable.Callback actualCallback = actual.getCallback(); assertThat(actualCallback) // .overridingErrorMessage("Expected callback <%s> but was <%s>.", callback, actualCallback) // .isSameAs(callback); return myself; } public S hasChangingConfigurations(int mask) { isNotNull(); int actualMask = actual.getChangingConfigurations(); assertThat(actualMask) // .overridingErrorMessage("Expected changing configurations <%s> but was <%s>.", mask, actualMask) // .isEqualTo(mask); return myself; } public S hasConstantState(Drawable.ConstantState state) { isNotNull(); Drawable.ConstantState actualState = actual.getConstantState(); assertThat(actualState) // .overridingErrorMessage("Expected constant state <%s> but was <%s>.", state, actualState) // .isEqualTo(state); return myself; } public S hasIntrinsicHeight(int height) { isNotNull(); int actualHeight = actual.getIntrinsicHeight(); assertThat(actualHeight) // .overridingErrorMessage("Expected intrinsic height <%s> but was <%s>.", height, actualHeight) // .isEqualTo(height); return myself; } public S hasIntrinsicWidth(int width) { isNotNull(); int actualWidth = actual.getIntrinsicWidth(); assertThat(actualWidth) // .overridingErrorMessage("Expected intrinsic width <%s> but was <%s>.", width, actualWidth) // .isEqualTo(width); return myself; } public S hasLevel(int level) { isNotNull(); int actualLevel = actual.getLevel(); assertThat(actualLevel) // .overridingErrorMessage("Expected level <%s> but was <%s>.", level, actualLevel) // .isEqualTo(level); return myself; } public S hasMinimumHeight(int height) { isNotNull(); int actualHeight = actual.getMinimumHeight(); assertThat(actualHeight) // .overridingErrorMessage("Expected minimum height <%s> but was <%s>.", height, actualHeight) // .isEqualTo(height); return myself; } public S hasMinimumWidth(int width) { isNotNull(); int actualHeight = actual.getMinimumWidth(); assertThat(actualHeight) // .overridingErrorMessage("Expected minimum width <%s> but was <%s>.", width, actualHeight) // .isEqualTo(width); return myself; } public S hasOpacity(int opacity) { isNotNull(); int actualOpacity = actual.getOpacity(); assertThat(actualOpacity) // .overridingErrorMessage("Expected opacity <%s> but was <%s>.", opacity, actualOpacity) // .isEqualTo(opacity); return myself; } public S isStateful() { isNotNull(); assertThat(actual.isStateful()) // .overridingErrorMessage("Expected to be stateful but was not.") // .isTrue(); return myself; } public S isNotStateful() { isNotNull(); assertThat(actual.isStateful()) // .overridingErrorMessage("Expected to not be stateful but was.") // .isFalse(); return myself; } public S isVisible() { isNotNull(); assertThat(actual.isVisible()) // .overridingErrorMessage("Expected to be visible but was not.") // .isTrue(); return myself; } public S isNotVisible() { isNotNull(); assertThat(actual.isVisible()) // .overridingErrorMessage("Expected to not be visible but was.") // .isFalse(); return myself; } @TargetApi(KITKAT) public S hasAlpha(int alpha) { isNotNull(); int actualAlpha = actual.getAlpha(); assertThat(actualAlpha) // .overridingErrorMessage("Expected alpha <%s> but was <%s>.", alpha, actualAlpha) // .isEqualTo(alpha); return myself; } @TargetApi(KITKAT) public S isAutoMirrored() { isNotNull(); assertThat(actual.isAutoMirrored()) // .overridingErrorMessage("Expected to be auto mirrored but was not.") // .isTrue(); return myself; } @TargetApi(KITKAT) public S isNotAutoMirrored() { isNotNull(); assertThat(actual.isAutoMirrored()) // .overridingErrorMessage("Expected to not be auto mirrored but was.") // .isFalse(); return myself; } }
apache-2.0
deroneriksson/incubator-systemml
src/main/java/org/apache/sysml/runtime/controlprogram/parfor/Task.java
4652
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.sysml.runtime.controlprogram.parfor; import java.io.Serializable; import java.util.LinkedList; import java.util.List; import java.util.StringTokenizer; import org.apache.sysml.runtime.instructions.cp.IntObject; /** * A task is a logical group of one or multiple iterations (each iteration is assigned to exactly one task). * There, each single task is executed sequentially. See TaskPartitioner for how tasks are created and * ParWorker for how those tasks are eventually executed. * * NOTE: (Extension possibility: group of statements) * */ public class Task implements Serializable { private static final long serialVersionUID = 2815832451487164284L; public enum TaskType { RANGE, SET } public static final int MAX_VARNAME_SIZE = 256; public static final int MAX_TASK_SIZE = Integer.MAX_VALUE-1; private String _iterVar; private TaskType _type; private LinkedList<IntObject> _iterations; //each iteration is specified as an ordered set of index values public Task() { //default constructor for serialize } public Task( String iterVar, TaskType type ) { if( iterVar.length() > MAX_VARNAME_SIZE ) throw new RuntimeException("Cannot create task, MAX_VARNAME_SIZE exceeded."); _iterVar = iterVar; _type = type; _iterations = new LinkedList<>(); } public void addIteration( IntObject indexVal ) { if( size() >= MAX_TASK_SIZE ) throw new RuntimeException("Cannot add iteration, MAX_TASK_SIZE reached."); _iterations.addLast( indexVal ); } public List<IntObject> getIterations() { return _iterations; } public TaskType getType() { return _type; } public String getVarName() { return _iterVar; } public int size() { return _iterations.size(); } @Override public String toString() { return toFormatedString(); } public String toFormatedString() { StringBuilder sb = new StringBuilder(); sb.append("task (type="); sb.append(_type); sb.append(", iterations={"); int count=0; for( IntObject dat : _iterations ) { if( count!=0 ) sb.append(";"); sb.append("["); sb.append(_iterVar); sb.append("="); sb.append(dat.getLongValue()); sb.append("]"); count++; } sb.append("})"); return sb.toString(); } public String toCompactString() { StringBuilder sb = new StringBuilder( ); sb.append(_type); if( size() > 0 ) { sb.append("."); sb.append(_iterVar); sb.append(".{"); int count = 0; for( IntObject dat : _iterations ) { if( count!=0 ) sb.append(","); sb.append(dat.getLongValue()); count++; } sb.append("}"); } return sb.toString(); } public String toCompactString( int maxDigits ) { StringBuilder sb = new StringBuilder( ); sb.append(_type); if( size() > 0 ) { sb.append("."); sb.append(_iterVar); sb.append(".{"); int count = 0; for( IntObject dat : _iterations ) { if( count!=0 ) sb.append(","); String tmp = String.valueOf(dat.getLongValue()); for( int k=tmp.length(); k<maxDigits; k++ ) sb.append("0"); sb.append(tmp); count++; } sb.append("}"); } return sb.toString(); } public static Task parseCompactString( String stask ) { StringTokenizer st = new StringTokenizer( stask.trim(), "." ); TaskType type = TaskType.valueOf(st.nextToken()); String meta = st.nextToken(); Task newTask = new Task(meta, type); //iteration data String sdata = st.nextToken(); sdata = sdata.substring(1,sdata.length()-1); // remove brackets StringTokenizer st2 = new StringTokenizer(sdata, ","); while( st2.hasMoreTokens() ) { //create new iteration String lsdata = st2.nextToken(); IntObject ldata = new IntObject(Integer.parseInt(lsdata)); newTask.addIteration(ldata); } return newTask; } }
apache-2.0
shaotuanchen/sunflower_exp
tools/source/gcc-4.2.4/libjava/testsuite/libjava.compile/pr15656.java
194
// This used to cause a gcj crash in error_if_numeric_overflow. public class pr15656 { public static void defineClass () { Object ctor = new Object; } }
bsd-3-clause
dsibournemouth/autoweka
weka-3.7.7/src/main/java/weka/gui/beans/TextListener.java
1237
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * TextListener.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.gui.beans; import java.util.EventListener; /** * Interface to something that can process a TextEvent * * @author <a href="mailto:mhall@cs.waikato.ac.nz">Mark Hall</a> * @version $Revision: 8034 $ * @since 1.0 * @see EventListener */ public interface TextListener extends EventListener { /** * Accept and process a text event * * @param e a <code>TextEvent</code> value */ void acceptText(TextEvent e); }
gpl-3.0
1fechner/FeatureExtractor
sources/FeatureExtractor/lib/mysql-connector-java-5.1.38/src/com/mysql/jdbc/JDBC4ServerPreparedStatement.java
5374
/* Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved. The MySQL Connector/J is licensed under the terms of the GPLv2 <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most MySQL Connectors. There are special exceptions to the terms and conditions of the GPLv2 as it is applied to this software, see the FOSS License Exception <http://www.mysql.com/about/legal/licensing/foss-exception.html>. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ package com.mysql.jdbc; import java.io.Reader; import java.sql.NClob; import java.sql.RowId; import java.sql.SQLXML; import java.sql.SQLException; import com.mysql.jdbc.Connection; import com.mysql.jdbc.MysqlDefs; import com.mysql.jdbc.SQLError; import com.mysql.jdbc.ServerPreparedStatement; import com.mysql.jdbc.ServerPreparedStatement.BindValue; public class JDBC4ServerPreparedStatement extends ServerPreparedStatement { public JDBC4ServerPreparedStatement(MySQLConnection conn, String sql, String catalog, int resultSetType, int resultSetConcurrency) throws SQLException { super(conn, sql, catalog, resultSetType, resultSetConcurrency); } /** * @see java.sql.PreparedStatement#setNCharacterStream(int, java.io.Reader, long) */ public void setNCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException { // can't take if characterEncoding isn't utf8 if (!this.charEncoding.equalsIgnoreCase("UTF-8") && !this.charEncoding.equalsIgnoreCase("utf8")) { throw SQLError.createSQLException("Can not call setNCharacterStream() when connection character set isn't UTF-8", getExceptionInterceptor()); } checkClosed(); if (reader == null) { setNull(parameterIndex, java.sql.Types.BINARY); } else { BindValue binding = getBinding(parameterIndex, true); setType(binding, MysqlDefs.FIELD_TYPE_BLOB); binding.value = reader; binding.isNull = false; binding.isLongData = true; if (this.connection.getUseStreamLengthsInPrepStmts()) { binding.bindLength = length; } else { binding.bindLength = -1; } } } /** * @see java.sql.PreparedStatement#setNClob(int, java.sql.NClob) */ public void setNClob(int parameterIndex, NClob x) throws SQLException { setNClob(parameterIndex, x.getCharacterStream(), this.connection.getUseStreamLengthsInPrepStmts() ? x.length() : -1); } /** * JDBC 4.0 Set a NCLOB parameter. * * @param parameterIndex * the first parameter is 1, the second is 2, ... * @param reader * the java reader which contains the UNICODE data * @param length * the number of characters in the stream * * @throws SQLException * if a database error occurs */ public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { // can't take if characterEncoding isn't utf8 if (!this.charEncoding.equalsIgnoreCase("UTF-8") && !this.charEncoding.equalsIgnoreCase("utf8")) { throw SQLError.createSQLException("Can not call setNClob() when connection character set isn't UTF-8", getExceptionInterceptor()); } checkClosed(); if (reader == null) { setNull(parameterIndex, java.sql.Types.NCLOB); } else { BindValue binding = getBinding(parameterIndex, true); setType(binding, MysqlDefs.FIELD_TYPE_BLOB); binding.value = reader; binding.isNull = false; binding.isLongData = true; if (this.connection.getUseStreamLengthsInPrepStmts()) { binding.bindLength = length; } else { binding.bindLength = -1; } } } /** * @see java.sql.PreparedStatement#setNString(int, java.lang.String) */ public void setNString(int parameterIndex, String x) throws SQLException { if (this.charEncoding.equalsIgnoreCase("UTF-8") || this.charEncoding.equalsIgnoreCase("utf8")) { setString(parameterIndex, x); } else { throw SQLError.createSQLException("Can not call setNString() when connection character set isn't UTF-8", getExceptionInterceptor()); } } public void setRowId(int parameterIndex, RowId x) throws SQLException { JDBC4PreparedStatementHelper.setRowId(this, parameterIndex, x); } public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { JDBC4PreparedStatementHelper.setSQLXML(this, parameterIndex, xmlObject); } }
lgpl-2.1
nikhilvibhav/camel
components/camel-beanstalk/src/main/java/org/apache/camel/component/beanstalk/ConnectionSettings.java
4811
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.beanstalk; import java.io.UnsupportedEncodingException; import java.net.URLDecoder; import java.util.ArrayList; import java.util.Arrays; import java.util.Scanner; import com.surftools.BeanstalkClient.Client; import com.surftools.BeanstalkClientImpl.ClientImpl; /** * Represents the connection to Beanstalk. * <p/> * Along with the list of tubes it may watch. */ public class ConnectionSettings { final String host; final int port; final String[] tubes; public ConnectionSettings(final String tube) { this(Client.DEFAULT_HOST, Client.DEFAULT_PORT, tube); } public ConnectionSettings(final String host, final String tube) { this(host, Client.DEFAULT_PORT, tube); } public ConnectionSettings(final String host, final int port, final String tube) { this.host = host; this.port = port; final Scanner scanner = new Scanner(tube); scanner.useDelimiter("\\+"); final ArrayList<String> buffer = new ArrayList<>(); while (scanner.hasNext()) { final String tubeRaw = scanner.next(); try { buffer.add(URLDecoder.decode(tubeRaw, "UTF-8")); } catch (UnsupportedEncodingException e) { buffer.add(tubeRaw); } } this.tubes = buffer.toArray(new String[buffer.size()]); scanner.close(); } /** * Returns the {@link Client} instance ready for writing operations, e.g. "put". * <p/> * <code>use(tube)</code> is applied during this call. * * @return {@link Client} instance * @throws IllegalArgumentException the exception is raised when this ConnectionSettings has more than one tube. */ public Client newWritingClient() throws IllegalArgumentException { if (tubes.length > 1) { throw new IllegalArgumentException("There must be only one tube specified for Beanstalk producer"); } final String tube = tubes.length > 0 ? tubes[0] : BeanstalkComponent.DEFAULT_TUBE; final ClientImpl client = new ClientImpl(host, port); /* FIXME: There is a problem in JavaBeanstalkClient 1.4.4 (at least in 1.4.4), when using uniqueConnectionPerThread=false. The symptom is that ProtocolHandler breaks the protocol, reading incomplete messages. To be investigated. */ //client.setUniqueConnectionPerThread(false); client.useTube(tube); return client; } /** * Returns the {@link Client} instance for reading operations with all the tubes aleady watched * <p/> * <code>watch(tube)</code> is applied for every tube during this call. * * @param useBlockIO configuration param to {@link Client} * @return {@link Client} instance */ public Client newReadingClient(boolean useBlockIO) { final ClientImpl client = new ClientImpl(host, port, useBlockIO); /* FIXME: There is a problem in JavaBeanstalkClient 1.4.4 (at least in 1.4.4), when using uniqueConnectionPerThread=false. The symptom is that ProtocolHandler breaks the protocol, reading incomplete messages. To be investigated. */ //client.setUniqueConnectionPerThread(false); for (String tube : tubes) { client.watch(tube); } return client; } @Override public boolean equals(final Object obj) { if (obj instanceof ConnectionSettings) { final ConnectionSettings other = (ConnectionSettings) obj; return other.host.equals(host) && other.port == port && Arrays.equals(other.tubes, tubes); } return false; } @Override public int hashCode() { return 41 * (41 * (41 + host.hashCode()) + port) + Arrays.hashCode(tubes); } @Override public String toString() { return "beanstalk://" + host + ":" + port + "/" + Arrays.toString(tubes); } }
apache-2.0
wangcy6/storm_app
frame/kafka-0.11.0/kafka-0.11.0.1-src/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTaskContext.java
4430
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.connect.runtime; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.connect.errors.IllegalWorkerStateException; import org.apache.kafka.connect.sink.SinkTaskContext; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; public class WorkerSinkTaskContext implements SinkTaskContext { private Map<TopicPartition, Long> offsets; private long timeoutMs; private KafkaConsumer<byte[], byte[]> consumer; private final Set<TopicPartition> pausedPartitions; private boolean commitRequested; public WorkerSinkTaskContext(KafkaConsumer<byte[], byte[]> consumer) { this.offsets = new HashMap<>(); this.timeoutMs = -1L; this.consumer = consumer; this.pausedPartitions = new HashSet<>(); } @Override public void offset(Map<TopicPartition, Long> offsets) { this.offsets.putAll(offsets); } @Override public void offset(TopicPartition tp, long offset) { offsets.put(tp, offset); } public void clearOffsets() { offsets.clear(); } /** * Get offsets that the SinkTask has submitted to be reset. Used by the Kafka Connect framework. * @return the map of offsets */ public Map<TopicPartition, Long> offsets() { return offsets; } @Override public void timeout(long timeoutMs) { this.timeoutMs = timeoutMs; } /** * Get the timeout in milliseconds set by SinkTasks. Used by the Kafka Connect framework. * @return the backoff timeout in milliseconds. */ public long timeout() { return timeoutMs; } @Override public Set<TopicPartition> assignment() { if (consumer == null) { throw new IllegalWorkerStateException("SinkTaskContext may not be used to look up partition assignment until the task is initialized"); } return consumer.assignment(); } @Override public void pause(TopicPartition... partitions) { if (consumer == null) { throw new IllegalWorkerStateException("SinkTaskContext may not be used to pause consumption until the task is initialized"); } try { for (TopicPartition partition : partitions) pausedPartitions.add(partition); consumer.pause(Arrays.asList(partitions)); } catch (IllegalStateException e) { throw new IllegalWorkerStateException("SinkTasks may not pause partitions that are not currently assigned to them.", e); } } @Override public void resume(TopicPartition... partitions) { if (consumer == null) { throw new IllegalWorkerStateException("SinkTaskContext may not be used to resume consumption until the task is initialized"); } try { for (TopicPartition partition : partitions) pausedPartitions.remove(partition); consumer.resume(Arrays.asList(partitions)); } catch (IllegalStateException e) { throw new IllegalWorkerStateException("SinkTasks may not resume partitions that are not currently assigned to them.", e); } } public Set<TopicPartition> pausedPartitions() { return pausedPartitions; } @Override public void requestCommit() { commitRequested = true; } public boolean isCommitRequested() { return commitRequested; } public void clearCommitRequest() { commitRequested = false; } }
apache-2.0
bclozel/spring-boot
spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/liquibase/LiquibaseDataSource.java
1403
/* * Copyright 2012-2017 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot.autoconfigure.liquibase; import java.lang.annotation.Documented; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.springframework.beans.factory.annotation.Qualifier; /** * Qualifier annotation for a DataSource to be injected in to Liquibase. If used for a * second data source, the other (main) one would normally be marked as {@code @Primary}. * * @author Eddú Meléndez * @since 1.4.1 */ @Target({ ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER, ElementType.TYPE, ElementType.ANNOTATION_TYPE }) @Retention(RetentionPolicy.RUNTIME) @Documented @Qualifier public @interface LiquibaseDataSource { }
apache-2.0
tkpanther/ignite
modules/core/src/test/java/org/apache/ignite/internal/IgniteClientReconnectContinuousProcessorTest.java
13187
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.internal; import java.util.UUID; import java.util.concurrent.CountDownLatch; import javax.cache.event.CacheEntryEvent; import javax.cache.event.CacheEntryUpdatedListener; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.query.ContinuousQuery; import org.apache.ignite.cache.query.QueryCursor; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.events.Event; import org.apache.ignite.events.EventType; import org.apache.ignite.internal.util.typedef.P2; import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.lang.IgniteRunnable; import org.apache.ignite.resources.IgniteInstanceResource; import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.apache.ignite.events.EventType.EVT_CLIENT_NODE_RECONNECTED; /** * */ public class IgniteClientReconnectContinuousProcessorTest extends IgniteClientReconnectAbstractTest { /** */ private static volatile CountDownLatch latch; /** {@inheritDoc} */ @Override protected int serverCount() { return 3; } /** {@inheritDoc} */ @Override protected int clientCount() { return 1; } /** * @throws Exception If failed. */ public void testEventListenerReconnect() throws Exception { Ignite client = grid(serverCount()); assertTrue(client.cluster().localNode().isClient()); Ignite srv = clientRouter(client); TestTcpDiscoverySpi srvSpi = spi(srv); EventListener lsnr = new EventListener(); UUID opId = client.events().remoteListen(lsnr, null, EventType.EVT_JOB_STARTED); lsnr.latch = new CountDownLatch(1); log.info("Created remote listener: " + opId); final CountDownLatch reconnectLatch = new CountDownLatch(1); client.events().localListen(new IgnitePredicate<Event>() { @Override public boolean apply(Event evt) { if (evt.type() == EVT_CLIENT_NODE_RECONNECTED) { info("Reconnected: " + evt); reconnectLatch.countDown(); } return true; } }, EVT_CLIENT_NODE_RECONNECTED); srvSpi.failNode(client.cluster().localNode().id(), null); waitReconnectEvent(reconnectLatch); client.compute().run(new DummyJob()); assertTrue(lsnr.latch.await(5000, MILLISECONDS)); lsnr.latch = new CountDownLatch(1); srv.compute().run(new DummyJob()); assertTrue(lsnr.latch.await(5000, MILLISECONDS)); lsnr.latch = new CountDownLatch(1); log.info("Stop listen, should not get events anymore."); client.events().stopRemoteListen(opId); assertFalse(lsnr.latch.await(3000, MILLISECONDS)); } /** * @throws Exception If failed. */ public void testMessageListenerReconnectAndStopFromServer() throws Exception { testMessageListenerReconnect(false); } /** * @throws Exception If failed. */ public void testMessageListenerReconnectAndStopFromClient() throws Exception { testMessageListenerReconnect(true); } /** * @param stopFromClient If {@code true} stops listener from client node, otherwise from server. * @throws Exception If failed. */ private void testMessageListenerReconnect(boolean stopFromClient) throws Exception { Ignite client = grid(serverCount()); assertTrue(client.cluster().localNode().isClient()); Ignite srv = clientRouter(client); TestTcpDiscoverySpi srvSpi = spi(srv); final String topic = "testTopic"; MessageListener locLsnr = new MessageListener(); UUID opId = client.message().remoteListen(topic, new RemoteMessageListener()); client.message().localListen(topic, locLsnr); final CountDownLatch reconnectLatch = new CountDownLatch(1); client.events().localListen(new IgnitePredicate<Event>() { @Override public boolean apply(Event evt) { if (evt.type() == EVT_CLIENT_NODE_RECONNECTED) { info("Reconnected: " + evt); reconnectLatch.countDown(); } return true; } }, EVT_CLIENT_NODE_RECONNECTED); srvSpi.failNode(client.cluster().localNode().id(), null); waitReconnectEvent(reconnectLatch); locLsnr.latch = new CountDownLatch(1); latch = new CountDownLatch(2); client.message().send(topic, "msg1"); assertTrue(locLsnr.latch.await(5000, MILLISECONDS)); assertTrue(latch.await(5000, MILLISECONDS)); locLsnr.latch = new CountDownLatch(1); latch = new CountDownLatch(2); srv.message().send(topic, "msg2"); assertTrue(locLsnr.latch.await(5000, MILLISECONDS)); assertTrue(latch.await(5000, MILLISECONDS)); Ignite stopFrom = (stopFromClient ? client : srv); log.info("Stop listen, should not get remote messages anymore [from=" + stopFrom.name() + ']'); stopFrom.message().stopRemoteListen(opId); srv.message().send(topic, "msg3"); locLsnr.latch = new CountDownLatch(1); latch = new CountDownLatch(1); assertTrue(locLsnr.latch.await(5000, MILLISECONDS)); assertFalse(latch.await(3000, MILLISECONDS)); log.info("New nodes should not register stopped listeners."); startGrid(serverCount() + 1); srv.message().send(topic, "msg4"); locLsnr.latch = new CountDownLatch(1); latch = new CountDownLatch(1); assertTrue(locLsnr.latch.await(5000, MILLISECONDS)); assertFalse(latch.await(3000, MILLISECONDS)); stopGrid(serverCount() + 1); } /** * @throws Exception If failed. */ public void testCacheContinuousQueryReconnect() throws Exception { Ignite client = grid(serverCount()); assertTrue(client.cluster().localNode().isClient()); IgniteCache<Object, Object> clientCache = client.getOrCreateCache(new CacheConfiguration<>()); CacheEventListener lsnr = new CacheEventListener(); ContinuousQuery<Object, Object> qry = new ContinuousQuery<>(); qry.setAutoUnsubscribe(true); qry.setLocalListener(lsnr); QueryCursor<?> cur = clientCache.query(qry); for (int i = 0; i < 5; i++) { log.info("Iteration: " + i); continuousQueryReconnect(client, clientCache, lsnr); } log.info("Close cursor, should not get cache events anymore."); cur.close(); lsnr.latch = new CountDownLatch(1); clientCache.put(3, 3); assertFalse(lsnr.latch.await(3000, MILLISECONDS)); } /** * @throws Exception If failed. */ public void testCacheContinuousQueryReconnectNewServer() throws Exception { Ignite client = grid(serverCount()); assertTrue(client.cluster().localNode().isClient()); IgniteCache<Object, Object> clientCache = client.getOrCreateCache(new CacheConfiguration<>()); CacheEventListener lsnr = new CacheEventListener(); ContinuousQuery<Object, Object> qry = new ContinuousQuery<>(); qry.setAutoUnsubscribe(true); qry.setLocalListener(lsnr); QueryCursor<?> cur = clientCache.query(qry); continuousQueryReconnect(client, clientCache, lsnr); // Check new server registers listener for reconnected client. try (Ignite newSrv = startGrid(serverCount() + 1)) { awaitPartitionMapExchange(); lsnr.latch = new CountDownLatch(10); IgniteCache<Object, Object> newSrvCache = newSrv.cache(null); for (Integer key : primaryKeys(newSrvCache, 10)) newSrvCache.put(key, key); assertTrue(lsnr.latch.await(5000, MILLISECONDS)); } cur.close(); // Check new server does not register listener for closed query. try (Ignite newSrv = startGrid(serverCount() + 1)) { awaitPartitionMapExchange(); lsnr.latch = new CountDownLatch(5); IgniteCache<Object, Object> newSrvCache = newSrv.cache(null); for (Integer key : primaryKeys(newSrvCache, 5)) newSrvCache.put(key, key); assertFalse(lsnr.latch.await(3000, MILLISECONDS)); } } /** * @param client Client. * @param clientCache Client cache. * @param lsnr Continuous query listener. * @throws Exception If failed. */ private void continuousQueryReconnect(Ignite client, IgniteCache<Object, Object> clientCache, CacheEventListener lsnr) throws Exception { Ignite srv = clientRouter(client); TestTcpDiscoverySpi srvSpi = spi(srv); final CountDownLatch reconnectLatch = new CountDownLatch(1); IgnitePredicate<Event> p = new IgnitePredicate<Event>() { @Override public boolean apply(Event evt) { if (evt.type() == EVT_CLIENT_NODE_RECONNECTED) { info("Reconnected: " + evt); reconnectLatch.countDown(); } return true; } }; client.events().localListen(p, EVT_CLIENT_NODE_RECONNECTED); srvSpi.failNode(client.cluster().localNode().id(), null); waitReconnectEvent(reconnectLatch); client.events().stopLocalListen(p); lsnr.latch = new CountDownLatch(1); clientCache.put(1, 1); assertTrue(lsnr.latch.await(5000, MILLISECONDS)); lsnr.latch = new CountDownLatch(1); srv.cache(null).put(2, 2); assertTrue(lsnr.latch.await(5000, MILLISECONDS)); } /** * */ private static class EventListener implements P2<UUID, Event> { /** */ private volatile CountDownLatch latch; /** */ @IgniteInstanceResource private Ignite ignite; /** {@inheritDoc} */ @Override public boolean apply(UUID uuid, Event evt) { assertTrue(ignite.cluster().localNode().isClient()); ignite.log().info("Received event: " + evt); if (latch != null) latch.countDown(); return true; } } /** * */ private static class MessageListener implements P2<UUID, Object> { /** */ private volatile CountDownLatch latch; /** */ @IgniteInstanceResource private Ignite ignite; /** {@inheritDoc} */ @Override public boolean apply(UUID uuid, Object msg) { assertTrue(ignite.cluster().localNode().isClient()); ignite.log().info("Local listener received message: " + msg); if (latch != null) latch.countDown(); return true; } } /** * */ private static class RemoteMessageListener implements P2<UUID, Object> { /** */ @IgniteInstanceResource private Ignite ignite; /** {@inheritDoc} */ @Override public boolean apply(UUID uuid, Object msg) { ignite.log().info("Remote listener received message: " + msg); if (latch != null) latch.countDown(); return true; } } /** * */ private static class CacheEventListener implements CacheEntryUpdatedListener<Object, Object> { /** */ private volatile CountDownLatch latch; /** */ @IgniteInstanceResource private Ignite ignite; /** {@inheritDoc} */ @Override public void onUpdated(Iterable<CacheEntryEvent<?, ?>> evts) { int cnt = 0; for (CacheEntryEvent<?, ?> evt : evts) { ignite.log().info("Received cache event: " + evt); cnt++; } assertEquals(1, cnt); if (latch != null) latch.countDown(); } } /** * */ static class DummyJob implements IgniteRunnable { /** */ @IgniteInstanceResource private Ignite ignite; /** {@inheritDoc} */ @Override public void run() { ignite.log().info("Job run."); } } }
apache-2.0
AndreasAbdi/jackrabbit-oak
oak-remote/src/main/java/org/apache/jackrabbit/oak/remote/content/ContentRemoteRevision.java
1312
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jackrabbit.oak.remote.content; import org.apache.jackrabbit.oak.api.Root; import org.apache.jackrabbit.oak.remote.RemoteRevision; class ContentRemoteRevision implements RemoteRevision { private final String id; private final Root root; public ContentRemoteRevision(String id, Root root) { this.id = id; this.root = root; } @Override public String asString() { return id; } public Root getRoot() { return root; } }
apache-2.0
bowenli86/flink
flink-ml-parent/flink-ml-api/src/test/java/org/apache/flink/ml/api/core/PipelineTest.java
4759
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.ml.api.core; import org.apache.flink.ml.api.misc.param.ParamInfo; import org.apache.flink.ml.api.misc.param.ParamInfoFactory; import org.apache.flink.ml.api.misc.param.Params; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.TableEnvironment; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; /** * Tests the behavior of {@link Pipeline}. */ public class PipelineTest { @Rule public ExpectedException thrown = ExpectedException.none(); @Test public void testPipelineBehavior() { Pipeline pipeline = new Pipeline(); pipeline.appendStage(new MockTransformer("a")); pipeline.appendStage(new MockEstimator("b")); pipeline.appendStage(new MockEstimator("c")); pipeline.appendStage(new MockTransformer("d")); assert describePipeline(pipeline).equals("a_b_c_d"); Pipeline pipelineModel = pipeline.fit(null, null); assert describePipeline(pipelineModel).equals("a_mb_mc_d"); thrown.expect(RuntimeException.class); thrown.expectMessage("Pipeline contains Estimator, need to fit first."); pipeline.transform(null, null); } @Test public void testPipelineRestore() { Pipeline pipeline = new Pipeline(); pipeline.appendStage(new MockTransformer("a")); pipeline.appendStage(new MockEstimator("b")); pipeline.appendStage(new MockEstimator("c")); pipeline.appendStage(new MockTransformer("d")); String pipelineJson = pipeline.toJson(); Pipeline restoredPipeline = new Pipeline(pipelineJson); assert describePipeline(restoredPipeline).equals("a_b_c_d"); Pipeline pipelineModel = pipeline.fit(null, null); String modelJson = pipelineModel.toJson(); Pipeline restoredPipelineModel = new Pipeline(modelJson); assert describePipeline(restoredPipelineModel).equals("a_mb_mc_d"); } private static String describePipeline(Pipeline p) { StringBuilder res = new StringBuilder(); for (PipelineStage s : p.getStages()) { if (res.length() != 0) { res.append("_"); } res.append(((SelfDescribe) s).describe()); } return res.toString(); } /** * Interface to describe a class with a string, only for pipeline test. */ private interface SelfDescribe { ParamInfo<String> DESCRIPTION = ParamInfoFactory.createParamInfo("description", String.class).build(); String describe(); } /** * Mock estimator for pipeline test. */ public static class MockEstimator implements Estimator<MockEstimator, MockModel>, SelfDescribe { private final Params params = new Params(); public MockEstimator() { } MockEstimator(String description) { set(DESCRIPTION, description); } @Override public MockModel fit(TableEnvironment tEnv, Table input) { return new MockModel("m" + describe()); } @Override public Params getParams() { return params; } @Override public String describe() { return get(DESCRIPTION); } } /** * Mock transformer for pipeline test. */ public static class MockTransformer implements Transformer<MockTransformer>, SelfDescribe { private final Params params = new Params(); public MockTransformer() { } MockTransformer(String description) { set(DESCRIPTION, description); } @Override public Table transform(TableEnvironment tEnv, Table input) { return input; } @Override public Params getParams() { return params; } @Override public String describe() { return get(DESCRIPTION); } } /** * Mock model for pipeline test. */ public static class MockModel implements Model<MockModel>, SelfDescribe { private final Params params = new Params(); public MockModel() { } MockModel(String description) { set(DESCRIPTION, description); } @Override public Table transform(TableEnvironment tEnv, Table input) { return input; } @Override public Params getParams() { return params; } @Override public String describe() { return get(DESCRIPTION); } } }
apache-2.0
qudansdl/osgi-in-action
shell/org.foo.shell/src/org/foo/shell/commands/ResolveCommand.java
900
package org.foo.shell.commands; import java.io.PrintStream; import java.util.*; import org.foo.shell.BasicCommand; import org.osgi.framework.Bundle; import org.osgi.service.packageadmin.PackageAdmin; public class ResolveCommand extends BasicCommand { public void exec(String args, PrintStream out, PrintStream err) throws Exception { if (args == null) { getPackageAdminService().resolveBundles(null); } else { List<Bundle> bundles = new ArrayList<Bundle>(); StringTokenizer tok = new StringTokenizer(args); while (tok.hasMoreTokens()) { bundles.add(getBundle(tok.nextToken())); } getPackageAdminService().resolveBundles(bundles.toArray(new Bundle[bundles.size()])); } } private PackageAdmin getPackageAdminService() { return (PackageAdmin) m_context.getService(m_context.getServiceReference(PackageAdmin.class.getName())); } }
apache-2.0
akosyakov/intellij-community
python/python-rest/test/com/jetbrains/rest/RestParsingTest.java
919
package com.jetbrains.rest; import com.intellij.testFramework.ParsingTestCase; import com.jetbrains.python.PythonHelpersLocator; import com.jetbrains.python.fixtures.PyTestCase; import com.jetbrains.rest.parsing.RestParserDefinition; /** * User : catherine */ public class RestParsingTest extends ParsingTestCase { public RestParsingTest() { super("", "rst", new RestParserDefinition()); PyTestCase.initPlatformPrefix(); } public void testTitle() { doTest(true); } public void testInjection() { doTest(true); } public void testReference() { doTest(true); } public void testReferenceTarget() { doTest(true); } public void testSubstitution() { doTest(true); } protected String getTestDataPath() { return PythonHelpersLocator.getPythonCommunityPath() + "/python-rest/testData/psi"; } protected boolean checkAllPsiRoots() { return false; } }
apache-2.0
leafclick/intellij-community
java/java-tests/testData/codeInsight/daemonCodeAnalyzer/quickFix/emptyStatement/beforeIfSwitchCondition2Java13Preview.java
316
// "Extract side effects as an 'if' statement" "true" class Z { void z() { i<caret>f (foo ? switch(0) { case 0: yield false; case 1: yield true; default: yield new Foo().getBar(); } : switch(0) { case 0: yield false; case 1: yield true; default: yield false;}) {} } }
apache-2.0
marinmitev/smarthome
bundles/core/org.eclipse.smarthome.core.extension.sample/src/main/java/org/eclipse/smarthome/core/extension/sample/internal/SampleExtensionService.java
3054
/** * Copyright (c) 2014-2015 openHAB UG (haftungsbeschraenkt) and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html */ package org.eclipse.smarthome.core.extension.sample.internal; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.StringUtils; import org.eclipse.smarthome.core.extension.Extension; import org.eclipse.smarthome.core.extension.ExtensionService; import org.eclipse.smarthome.core.extension.ExtensionType; /** * This is an implementation of an {@link ExtensionService} that can be used as a dummy service for testing the * functionality. * It is not meant to be used anywhere productively. * * @author Kai Kreuzer - Initial contribution and API * */ public class SampleExtensionService implements ExtensionService { List<ExtensionType> types = new ArrayList<>(3); Map<String, Extension> extensions = new HashMap<>(30); protected void activate() { types.add(new ExtensionType("binding", "Bindings")); types.add(new ExtensionType("ui", "User Interfaces")); types.add(new ExtensionType("persistence", "Persistence Services")); for (ExtensionType type : types) { for (int i = 0; i < 10; i++) { String id = type.getId() + Integer.toString(i); boolean installed = Math.random() > 0.5; String label = RandomStringUtils.randomAlphabetic(5) + " " + StringUtils.capitalize(type.getId()); String typeId = type.getId(); String version = "1.0"; Extension extension = new Extension(id, typeId, label, version, installed); extensions.put(extension.getId(), extension); } } } protected void deactivate() { types.clear(); extensions.clear(); } @Override public void install(String id) { try { Thread.sleep((long) (Math.random() * 10000)); Extension extension = getExtension(id, null); extension.setInstalled(true); } catch (InterruptedException e) { } } @Override public void uninstall(String id) { try { Thread.sleep((long) (Math.random() * 5000)); Extension extension = getExtension(id, null); extension.setInstalled(false); } catch (InterruptedException e) { } } @Override public List<Extension> getExtensions(Locale locale) { return new ArrayList<>(extensions.values()); } @Override public Extension getExtension(String id, Locale locale) { return extensions.get(id); } @Override public List<ExtensionType> getTypes(Locale locale) { return types; } }
epl-1.0
dominicdesu/openhab2-addons
addons/binding/org.openhab.binding.opensprinkler/src/main/java/org/openhab/binding/opensprinkler/internal/api/OpenSprinklerHttpApiV100.java
8529
/** * Copyright (c) 2010-2017 by the respective copyright holders. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html */ package org.openhab.binding.opensprinkler.internal.api; import static org.openhab.binding.opensprinkler.internal.api.OpenSprinklerApiConstants.*; import org.openhab.binding.opensprinkler.internal.api.exception.CommunicationApiException; import org.openhab.binding.opensprinkler.internal.api.exception.GeneralApiException; import org.openhab.binding.opensprinkler.internal.util.Http; import org.openhab.binding.opensprinkler.internal.util.Parse; /** * The {@link OpenSprinklerHttpApiV100} class is used for communicating with * the OpenSprinkler API for firmware versions less than 2.1.0 * * @author Chris Graham - Initial contribution */ public class OpenSprinklerHttpApiV100 implements OpenSprinklerApi { protected final String hostname; protected final int port; protected final String password; protected int firmwareVersion = -1; protected int numberOfStations = DEFAULT_STATION_COUNT; protected boolean connectionOpen = false; /** * Constructor for the OpenSprinkler API class to create a connection to the OpenSprinkler * device for control and obtaining status info. * * @param hostname Hostname or IP address as a String of the OpenSprinkler device. * @param port The port number the OpenSprinkler API is listening on. * @param password Admin password for the OpenSprinkler device. * @throws Exception */ public OpenSprinklerHttpApiV100(final String hostname, final int port, final String password) throws Exception { if (hostname == null) { throw new GeneralApiException("The given url is null."); } if (port < 1 || port > 65535) { throw new GeneralApiException("The given port is invalid."); } if (password == null) { throw new GeneralApiException("The given password is null."); } if (hostname.startsWith(HTTP_REQUEST_URL_PREFIX) || hostname.startsWith(HTTPS_REQUEST_URL_PREFIX)) { throw new GeneralApiException("The given hostname does not need to start with " + HTTP_REQUEST_URL_PREFIX + " or " + HTTP_REQUEST_URL_PREFIX); } this.hostname = hostname; this.port = port; this.password = password; } /** {@inheritDoc} */ @Override public boolean isConnected() { return connectionOpen; } /** {@inheritDoc} */ @Override public void openConnection() throws Exception { try { Http.sendHttpGet(getBaseUrl(), getRequestRequiredOptions() + "&" + CMD_ENABLE_MANUAL_MODE); } catch (Exception exp) { throw new CommunicationApiException( "There was a problem in the HTTP communication with the OpenSprinkler API: " + exp.getMessage()); } this.firmwareVersion = getFirmwareVersion(); this.numberOfStations = getNumberOfStations(); connectionOpen = true; } /** {@inheritDoc} */ @Override public void closeConnection() throws Exception { connectionOpen = false; try { Http.sendHttpGet(getBaseUrl(), getRequestRequiredOptions() + "&" + CMD_DISABLE_MANUAL_MODE); } catch (Exception exp) { throw new CommunicationApiException( "There was a problem in the HTTP communication with the OpenSprinkler API: " + exp.getMessage()); } } /** {@inheritDoc} */ @Override public void openStation(int station) throws Exception { if (station < 0 || station >= numberOfStations) { throw new GeneralApiException("This OpenSprinkler device only has " + this.numberOfStations + " but station " + station + " was requested to be opened."); } try { Http.sendHttpGet(getBaseUrl() + "sn" + station + "=1", null); } catch (Exception exp) { throw new CommunicationApiException( "There was a problem in the HTTP communication with the OpenSprinkler API: " + exp.getMessage()); } } /** {@inheritDoc} */ @Override public void closeStation(int station) throws Exception { if (station < 0 || station >= numberOfStations) { throw new GeneralApiException("This OpenSprinkler device only has " + this.numberOfStations + " but station " + station + " was requested to be closed."); } try { Http.sendHttpGet(getBaseUrl() + "sn" + station + "=0", null); } catch (Exception exp) { throw new CommunicationApiException( "There was a problem in the HTTP communication with the OpenSprinkler API: " + exp.getMessage()); } } /** {@inheritDoc} */ @Override public boolean isStationOpen(int station) throws Exception { String returnContent; if (station < 0 || station >= numberOfStations) { throw new GeneralApiException("This OpenSprinkler device only has " + this.numberOfStations + " but station " + station + " was requested for a status update."); } try { returnContent = Http.sendHttpGet(getBaseUrl() + "sn" + station, null); } catch (Exception exp) { throw new CommunicationApiException( "There was a problem in the HTTP communication with the OpenSprinkler API: " + exp.getMessage()); } return returnContent != null && returnContent.equals("1"); } /** {@inheritDoc} */ @Override public boolean isRainDetected() throws Exception { String returnContent; int rainBit = -1; try { returnContent = Http.sendHttpGet(getBaseUrl() + CMD_STATUS_INFO, getRequestRequiredOptions()); } catch (Exception exp) { throw new CommunicationApiException( "There was a problem in the HTTP communication with the OpenSprinkler API: " + exp.getMessage()); } try { rainBit = Parse.jsonInt(returnContent, JSON_OPTION_RAINSENSOR); } catch (Exception exp) { rainBit = -1; } if (rainBit == 1) { return true; } else if (rainBit == 0) { return false; } else { throw new GeneralApiException("Could not get the current state of the rain sensor."); } } /** {@inheritDoc} */ @Override public int getNumberOfStations() throws Exception { String returnContent; try { returnContent = Http.sendHttpGet(getBaseUrl() + CMD_STATION_INFO, getRequestRequiredOptions()); } catch (Exception exp) { throw new CommunicationApiException( "There was a problem in the HTTP communication with the OpenSprinkler API: " + exp.getMessage()); } this.numberOfStations = Parse.jsonInt(returnContent, JSON_OPTION_STATION_COUNT); return this.numberOfStations; } /** {@inheritDoc} */ @Override public int getFirmwareVersion() throws Exception { String returnContent; try { returnContent = Http.sendHttpGet(getBaseUrl() + CMD_OPTIONS_INFO, null); } catch (Exception exp) { throw new CommunicationApiException( "There was a problem in the HTTP communication with the OpenSprinkler API: " + exp.getMessage()); } try { this.firmwareVersion = Parse.jsonInt(returnContent, JSON_OPTION_FIRMWARE_VERSION); } catch (Exception exp) { this.firmwareVersion = -1; } return this.firmwareVersion; } /** * Returns the hostname and port formatted URL as a String. * * @return String representation of the OpenSprinkler API URL. */ protected String getBaseUrl() { return HTTP_REQUEST_URL_PREFIX + hostname + ":" + port + "/"; } /** * Returns the required URL parameters required for every API call. * * @return String representation of the parameters needed during an API call. */ protected String getRequestRequiredOptions() { return CMD_PASSWORD + this.password; } }
epl-1.0
jeremiahyan/djinni
intellij-plugin/src/com/dropbox/djinni/ideaplugin/psi/impl/DjinniPsiImplUtil.java
10540
/* * Copyright 2015 Dropbox, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.dropbox.djinni.ideaplugin.psi.impl; import com.dropbox.djinni.ideaplugin.DjinniIcons; import com.dropbox.djinni.ideaplugin.psi.*; import com.intellij.lang.ASTNode; import com.intellij.navigation.ItemPresentation; import com.intellij.openapi.util.TextRange; import com.intellij.psi.PsiElement; import com.intellij.psi.PsiFile; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import javax.swing.*; /** * Created by jaetzold on 7/23/15. */ public class DjinniPsiImplUtil { public enum DjinniType { ENUM,RECORD,INTERFACE } public static String getTypeName(DjinniTypeDefinition typeDefinition) { return typeDefinition.getIdentifier().getText(); } @NotNull public static DjinniType getDjinniType(DjinniTypeDefinition typeDefinition) { if(typeDefinition.getRecordTypeVariant() != null) { return DjinniType.RECORD; } else if(typeDefinition.getInterfaceTypeVariant() != null) { return DjinniType.INTERFACE; } else { return DjinniType.ENUM; } } public static String getName(DjinniTypeDefinition typeDefinition) { return getTypeName(typeDefinition); } public static PsiElement setName(DjinniTypeDefinition typeDefinition, String newName) { ASTNode nameNode = typeDefinition.getIdentifier().getNode(); if (nameNode != null) { DjinniTypeDefinition newTypeDefinition = DjinniElementFactory.createTypeDefinition(typeDefinition.getProject(), newName, typeDefinition.getDjinniType()); ASTNode newIdentifierNode = newTypeDefinition.getIdentifier().getNode(); typeDefinition.getNode().replaceChild(nameNode, newIdentifierNode); } return typeDefinition; } @Nullable public static PsiElement getNameIdentifier(DjinniTypeDefinition typeDefinition) { ASTNode nameNode = typeDefinition.getIdentifier().getNode(); if (nameNode != null) { return nameNode.getPsi(); } else { return null; } } public static ItemPresentation getPresentation(final DjinniTypeDefinition typeDefinition) { return new ItemPresentation() { @Nullable @Override public String getPresentableText() { return typeDefinition.getTypeName(); } @Nullable @Override public String getLocationString() { PsiFile containingFile = typeDefinition.getContainingFile(); return containingFile != null ? containingFile.getName() : null; } @Nullable @Override public Icon getIcon(boolean unused) { return DjinniIcons.FILE; } }; } public static String getName(DjinniTypeReference typeReference) { return typeReference.getText(); } public static PsiElement setName(DjinniTypeReference typeReference, String newName) { PsiElement identifier = typeReference.getIdentifier(); ASTNode node = identifier != null ? identifier.getNode() : null; if (node != null) { DjinniTypeReference newTypeReference = DjinniElementFactory.createTypeReference(typeReference.getProject(), newName); if (newTypeReference != null) { PsiElement newIdentifier = newTypeReference.getIdentifier(); if (newIdentifier != null) { ASTNode newIdentifierNode = newIdentifier.getNode(); typeReference.getNode().replaceChild(node, newIdentifierNode); } } } return typeReference; } @Nullable public static PsiElement getNameIdentifier(DjinniTypeReference typeReference) { PsiElement identifier = typeReference.getIdentifier(); if (identifier != null) { ASTNode nameNode = identifier.getNode(); if (nameNode != null) { return nameNode.getPsi(); } } return null; } public static String getName(DjinniConstReference reference) { return reference.getText(); } public static PsiElement setName(DjinniConstReference reference, String newName) { PsiElement identifier = reference.getIdentifier(); ASTNode node = identifier.getNode(); if (node != null) { DjinniConstReference newReference = DjinniElementFactory.createConstReference(reference.getProject(), newName); if (newReference != null) { PsiElement newIdentifier = newReference.getIdentifier(); ASTNode newIdentifierNode = newIdentifier.getNode(); reference.getNode().replaceChild(node, newIdentifierNode); } } return reference; } @Nullable public static PsiElement getNameIdentifier(DjinniConstReference reference) { PsiElement identifier = reference.getIdentifier(); ASTNode nameNode = identifier.getNode(); if (nameNode != null) { return nameNode.getPsi(); } else { return null; } } public static String getName(DjinniConstNamedValue namedValue) { return namedValue.getText(); } public static PsiElement setName(DjinniConstNamedValue namedValue, String newName) { PsiElement identifier = namedValue.getIdentifier(); ASTNode node = identifier.getNode(); if (node != null) { DjinniConstNamedValue newValue = DjinniElementFactory.createConstNamedValue(namedValue.getProject(), newName); if (newValue != null) { PsiElement newIdentifier = newValue.getIdentifier(); ASTNode newIdentifierNode = newIdentifier.getNode(); namedValue.getNode().replaceChild(node, newIdentifierNode); } } return namedValue; } @Nullable public static PsiElement getNameIdentifier(DjinniConstNamedValue namedValue) { PsiElement identifier = namedValue.getIdentifier(); ASTNode nameNode = identifier.getNode(); if (nameNode != null) { return nameNode.getPsi(); } else { return null; } } public static String getName(DjinniEnumValue enumValue) { return enumValue.getText(); } public static PsiElement setName(DjinniEnumValue enumValue, String newName) { PsiElement identifier = enumValue.getIdentifier(); ASTNode node = identifier.getNode(); if (node != null) { DjinniEnumValue newValue = DjinniElementFactory.createEnumValue(enumValue.getProject(), newName); if (newValue != null) { PsiElement newIdentifier = newValue.getIdentifier(); ASTNode newIdentifierNode = newIdentifier.getNode(); enumValue.getNode().replaceChild(node, newIdentifierNode); } } return enumValue; } @Nullable public static PsiElement getNameIdentifier(DjinniEnumValue enumValue) { PsiElement identifier = enumValue.getIdentifier(); ASTNode nameNode = identifier.getNode(); if (nameNode != null) { return nameNode.getPsi(); } else { return null; } } @NotNull public static String getName(DjinniImportStatement importStatement) { String stringLiteral = importStatement.getStringLiteral().getText(); if (stringLiteral.length() > 2) { return stringLiteral.substring(1, stringLiteral.length() - 1); } else { return ""; } } public static PsiElement setName(DjinniImportStatement importStatement, String newName) { ASTNode node = importStatement.getStringLiteral().getNode(); if (node != null) { DjinniImportStatement newImportStatement = DjinniElementFactory.createImportStatement(importStatement.getProject(), newName); if (newImportStatement != null) { ASTNode newIdentifierNode = newImportStatement.getStringLiteral().getNode(); importStatement.getNode().replaceChild(node, newIdentifierNode); } } return importStatement; } public static TextRange getRangeOfPath(DjinniImportStatement importStatement) { TextRange importRange = importStatement.getTextRange(); TextRange pathRange = importStatement.getStringLiteral().getTextRange(); assert pathRange.getLength() >= 2; // if it is not enclosed by quotes it's not a string literal return new TextRange((pathRange.getStartOffset() - importRange.getStartOffset()) + 1, (pathRange.getEndOffset() - importRange.getStartOffset()) - 1); } @NotNull public static String getPath(DjinniImportStatement importStatement) { TextRange rangeOfPath = getRangeOfPath(importStatement); String text = importStatement.getText(); return text.substring(rangeOfPath.getStartOffset(), rangeOfPath.getEndOffset()); } @NotNull public static String getName(DjinniExternStatement externStatement) { String stringLiteral = externStatement.getStringLiteral().getText(); if (stringLiteral.length() > 2) { return stringLiteral.substring(1, stringLiteral.length() - 1); } else { return ""; } } public static PsiElement setName(DjinniExternStatement externStatement, String newName) { ASTNode node = externStatement.getStringLiteral().getNode(); if (node != null) { DjinniImportStatement newImportStatement = DjinniElementFactory.createImportStatement(externStatement.getProject(), newName); if (newImportStatement != null) { ASTNode newIdentifierNode = newImportStatement.getStringLiteral().getNode(); externStatement.getNode().replaceChild(node, newIdentifierNode); } } return externStatement; } public static TextRange getRangeOfPath(DjinniExternStatement externStatement) { TextRange importRange = externStatement.getTextRange(); TextRange pathRange = externStatement.getStringLiteral().getTextRange(); assert pathRange.getLength() >= 2; // if it is not enclosed by quotes it's not a string literal return new TextRange((pathRange.getStartOffset() - importRange.getStartOffset()) + 1, (pathRange.getEndOffset() - importRange.getStartOffset()) - 1); } @NotNull public static String getPath(DjinniExternStatement externStatement) { TextRange rangeOfPath = getRangeOfPath(externStatement); String text = externStatement.getText(); return text.substring(rangeOfPath.getStartOffset(), rangeOfPath.getEndOffset()); } }
apache-2.0
pedroigor/undertow
core/src/test/java/io/undertow/server/protocol/http2/HTTP2ViaUpgradeTestCase.java
16094
/* * JBoss, Home of Professional Open Source. * Copyright 2014 Red Hat, Inc., and individual contributors * as indicated by the @author tags. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.undertow.server.protocol.http2; import io.netty.bootstrap.Bootstrap; import io.netty.buffer.ByteBuf; import io.netty.channel.Channel; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; import io.netty.channel.ChannelPipeline; import io.netty.channel.ChannelPromise; import io.netty.channel.EventLoopGroup; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.SocketChannel; import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; import io.netty.handler.codec.http.HttpClientCodec; import io.netty.handler.codec.http.HttpClientUpgradeHandler; import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpHeaderValues; import io.netty.handler.codec.http.HttpMethod; import io.netty.handler.codec.http.HttpVersion; import io.netty.handler.codec.http2.DefaultHttp2Connection; import io.netty.handler.codec.http2.DefaultHttp2FrameReader; import io.netty.handler.codec.http2.DefaultHttp2FrameWriter; import io.netty.handler.codec.http2.DelegatingDecompressorFrameListener; import io.netty.handler.codec.http2.Http2ClientUpgradeCodec; import io.netty.handler.codec.http2.Http2Connection; import io.netty.handler.codec.http2.Http2FrameLogger; import io.netty.handler.codec.http2.Http2FrameReader; import io.netty.handler.codec.http2.Http2FrameWriter; import io.netty.handler.codec.http2.Http2InboundFrameLogger; import io.netty.handler.codec.http2.Http2OutboundFrameLogger; import io.netty.handler.codec.http2.Http2Settings; import io.netty.handler.codec.http2.HttpConversionUtil; import io.netty.handler.codec.http2.HttpToHttp2ConnectionHandler; import io.netty.handler.codec.http2.HttpToHttp2ConnectionHandlerBuilder; import io.netty.handler.codec.http2.InboundHttp2ToHttpAdapterBuilder; import io.netty.handler.logging.LogLevel; import io.undertow.Handlers; import io.undertow.Undertow; import io.undertow.UndertowOptions; import io.undertow.server.HttpHandler; import io.undertow.server.HttpServerExchange; import io.undertow.server.session.SessionCookieConfig; import io.undertow.testutils.DefaultServer; import io.undertow.testutils.HttpOneOnly; import io.undertow.util.Headers; import io.undertow.util.HttpString; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; import org.xnio.Options; import java.net.URI; import java.net.URISyntaxException; import java.nio.charset.StandardCharsets; import java.util.Iterator; import java.util.Map.Entry; import java.util.SortedMap; import java.util.TreeMap; import java.util.concurrent.LinkedBlockingDeque; import java.util.concurrent.TimeUnit; /** * Tests the load balancing proxy * * @author Stuart Douglas */ @RunWith(DefaultServer.class) @HttpOneOnly public class HTTP2ViaUpgradeTestCase { static Undertow server; static volatile String message; private static final LinkedBlockingDeque<String> messages = new LinkedBlockingDeque<>(); @BeforeClass public static void setup() throws URISyntaxException { final SessionCookieConfig sessionConfig = new SessionCookieConfig(); int port = DefaultServer.getHostPort("default"); server = Undertow.builder() .addHttpListener(port + 1, DefaultServer.getHostAddress("default")) .setServerOption(UndertowOptions.ENABLE_HTTP2, true) .setSocketOption(Options.REUSE_ADDRESSES, true) .setHandler(Handlers.header(new Http2UpgradeHandler(new HttpHandler() { @Override public void handleRequest(HttpServerExchange exchange) throws Exception { if (!(exchange.getConnection() instanceof Http2ServerConnection)) { throw new RuntimeException("Not HTTP2"); } exchange.getResponseHeaders().add(new HttpString("X-Custom-Header"), "foo"); exchange.getResponseSender().send(message); } }, "h2c", "h2c-17"), Headers.SEC_WEB_SOCKET_ACCEPT_STRING, "fake")) //work around Netty bug, it assumes that every upgrade request that does not have this header is an old style websocket upgrade .build(); server.start(); } @AfterClass public static void stop() { server.stop(); } @Test public void testHttp2WithNettyClient() throws Exception { message = "Hello World"; EventLoopGroup workerGroup = new NioEventLoopGroup(); Http2ClientInitializer initializer = new Http2ClientInitializer(Integer.MAX_VALUE); try { // Configure the client. Bootstrap b = new Bootstrap(); b.group(workerGroup); b.channel(NioSocketChannel.class); b.option(ChannelOption.SO_KEEPALIVE, true); final int port = DefaultServer.getHostPort("default") + 1; final String host = DefaultServer.getHostAddress("default"); b.remoteAddress(host, port); b.handler(initializer); // Start the client. Channel channel = b.connect().syncUninterruptibly().channel(); Http2SettingsHandler http2SettingsHandler = initializer.settingsHandler(); http2SettingsHandler.awaitSettings(5, TimeUnit.SECONDS); HttpResponseHandler responseHandler = initializer.responseHandler(); int streamId = 3; URI hostName = URI.create("http://" + host + ':' + port); System.err.println("Sending request(s)..."); // Create a simple GET request. final ChannelPromise promise = channel.newPromise(); responseHandler.put(streamId, promise); FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, hostName.toString()); request.headers().add(HttpHeaderNames.HOST, hostName); request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, HttpHeaderValues.GZIP); request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, HttpHeaderValues.DEFLATE); channel.writeAndFlush(request); streamId += 2; promise.await(10, TimeUnit.SECONDS); Assert.assertEquals(message, messages.poll()); System.out.println("Finished HTTP/2 request(s)"); // Wait until the connection is closed. channel.close().syncUninterruptibly(); } finally { workerGroup.shutdownGracefully(); } } static class Http2ClientInitializer extends ChannelInitializer<SocketChannel> { private static final Http2FrameLogger logger = new Http2FrameLogger(LogLevel.INFO, Http2ClientInitializer.class); private final int maxContentLength; private HttpToHttp2ConnectionHandler connectionHandler; private HttpResponseHandler responseHandler; private Http2SettingsHandler settingsHandler; Http2ClientInitializer(int maxContentLength) { this.maxContentLength = maxContentLength; } @Override public void initChannel(SocketChannel ch) throws Exception { final Http2Connection connection = new DefaultHttp2Connection(false); connectionHandler = new HttpToHttp2ConnectionHandlerBuilder() .connection(connection) .frameListener(new DelegatingDecompressorFrameListener(connection, new InboundHttp2ToHttpAdapterBuilder(connection) .maxContentLength(maxContentLength) .propagateSettings(true) .build())) .build(); responseHandler = new HttpResponseHandler(); settingsHandler = new Http2SettingsHandler(ch.newPromise()); configureClearText(ch); } public HttpResponseHandler responseHandler() { return responseHandler; } public Http2SettingsHandler settingsHandler() { return settingsHandler; } protected void configureEndOfPipeline(ChannelPipeline pipeline) { pipeline.addLast(settingsHandler, responseHandler); } /** * Configure the pipeline for a cleartext upgrade from HTTP to HTTP/2. */ private void configureClearText(SocketChannel ch) { HttpClientCodec sourceCodec = new HttpClientCodec(); Http2ClientUpgradeCodec upgradeCodec = new Http2ClientUpgradeCodec(connectionHandler); HttpClientUpgradeHandler upgradeHandler = new HttpClientUpgradeHandler(sourceCodec, upgradeCodec, 65536); ch.pipeline().addLast(sourceCodec, upgradeHandler, new UpgradeRequestHandler(), new UserEventLogger()); } /** * A handler that triggers the cleartext upgrade to HTTP/2 by sending an initial HTTP request. */ private final class UpgradeRequestHandler extends ChannelInboundHandlerAdapter { @Override public void channelActive(ChannelHandlerContext ctx) throws Exception { DefaultFullHttpRequest upgradeRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/sdf"); upgradeRequest.headers().add(Headers.HOST_STRING, "default"); ctx.writeAndFlush(upgradeRequest); ctx.fireChannelActive(); // Done with this handler, remove it from the pipeline. ctx.pipeline().remove(this); configureEndOfPipeline(ctx.pipeline()); } } /** * Class that logs any User Events triggered on this channel. */ private static class UserEventLogger extends ChannelInboundHandlerAdapter { @Override public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { System.out.println("User Event Triggered: " + evt); ctx.fireUserEventTriggered(evt); } } private static Http2FrameReader frameReader() { return new Http2InboundFrameLogger(new DefaultHttp2FrameReader(), logger); } private static Http2FrameWriter frameWriter() { return new Http2OutboundFrameLogger(new DefaultHttp2FrameWriter(), logger); } } static class Http2SettingsHandler extends SimpleChannelInboundHandler<Http2Settings> { private ChannelPromise promise; /** * Create new instance * * @param promise Promise object used to notify when first settings are received */ Http2SettingsHandler(ChannelPromise promise) { this.promise = promise; } /** * Wait for this handler to be added after the upgrade to HTTP/2, and for initial preface * handshake to complete. * * @param timeout Time to wait * @param unit {@link java.util.concurrent.TimeUnit} for {@code timeout} * @throws Exception if timeout or other failure occurs */ public void awaitSettings(long timeout, TimeUnit unit) throws Exception { if (!promise.awaitUninterruptibly(timeout, unit)) { throw new IllegalStateException("Timed out waiting for settings"); } if (!promise.isSuccess()) { throw new RuntimeException(promise.cause()); } } @Override protected void channelRead0(ChannelHandlerContext ctx, Http2Settings msg) throws Exception { promise.setSuccess(); // Only care about the first settings message ctx.pipeline().remove(this); } } static class HttpResponseHandler extends SimpleChannelInboundHandler<FullHttpResponse> { private SortedMap<Integer, ChannelPromise> streamidPromiseMap; HttpResponseHandler() { streamidPromiseMap = new TreeMap<Integer, ChannelPromise>(); } /** * Create an association between an anticipated response stream id and a {@link io.netty.channel.ChannelPromise} * * @param streamId The stream for which a response is expected * @param promise The promise object that will be used to wait/notify events * @return The previous object associated with {@code streamId} * @see HttpResponseHandler#awaitResponses(long, java.util.concurrent.TimeUnit) */ public ChannelPromise put(int streamId, ChannelPromise promise) { return streamidPromiseMap.put(streamId, promise); } /** * Wait (sequentially) for a time duration for each anticipated response * * @param timeout Value of time to wait for each response * @param unit Units associated with {@code timeout} * @see HttpResponseHandler#put(int, io.netty.channel.ChannelPromise) */ public void awaitResponses(long timeout, TimeUnit unit) { Iterator<Entry<Integer, ChannelPromise>> itr = streamidPromiseMap.entrySet().iterator(); while (itr.hasNext()) { Entry<Integer, ChannelPromise> entry = itr.next(); ChannelPromise promise = entry.getValue(); if (!promise.awaitUninterruptibly(timeout, unit)) { throw new IllegalStateException("Timed out waiting for response on stream id " + entry.getKey()); } if (!promise.isSuccess()) { throw new RuntimeException(promise.cause()); } System.out.println("---Stream id: " + entry.getKey() + " received---"); itr.remove(); } } @Override protected void channelRead0(ChannelHandlerContext ctx, FullHttpResponse msg) throws Exception { Integer streamId = msg.headers().getInt(HttpConversionUtil.ExtensionHeaderNames.STREAM_ID.text()); if (streamId == null) { System.err.println("HttpResponseHandler unexpected message received: " + msg); return; } ChannelPromise promise = streamidPromiseMap.get(streamId); if (promise == null) { System.err.println("Message received for unknown stream id " + streamId); } else { // Do stuff with the message (for now just print it) ByteBuf content = msg.content(); if (content.isReadable()) { int contentLength = content.readableBytes(); byte[] arr = new byte[contentLength]; content.readBytes(arr); messages.add(new String(arr, StandardCharsets.UTF_8)); } promise.setSuccess(); } } } }
apache-2.0
soninaren/azure-mobile-services-test
sdk/Android/ZumoE2ETestApp/src/com/microsoft/windowsazure/mobileservices/zumoe2etestapp/push/GCMMessageHelper.java
5310
/* Copyright (c) Microsoft Open Technologies, Inc. All Rights Reserved Apache 2.0 License Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See the Apache Version 2.0 License for specific language governing permissions and limitations under the License. */ package com.microsoft.windowsazure.mobileservices.zumoe2etestapp.push; import java.util.Set; import java.util.Map.Entry; import android.content.Intent; import com.google.gson.JsonElement; import com.google.gson.JsonObject; import com.google.gson.JsonParser; import com.microsoft.windowsazure.mobileservices.zumoe2etestapp.framework.TestCase; import com.microsoft.windowsazure.mobileservices.zumoe2etestapp.framework.TestExecutionCallback; import com.microsoft.windowsazure.mobileservices.zumoe2etestapp.framework.TestResult; import com.microsoft.windowsazure.mobileservices.zumoe2etestapp.framework.TestStatus; import com.microsoft.windowsazure.mobileservices.zumoe2etestapp.tests.EnhancedPushTests; import com.microsoft.windowsazure.mobileservices.zumoe2etestapp.tests.PushTests; public class GCMMessageHelper { public static GCMMessageCallback getRegistrationCallBack(final TestCase test, final TestExecutionCallback callback, final Class<?> clazz) { return new GCMMessageCallback() { @Override public void timeoutElapsed() { test.log("Error, registration message did not arrive on time"); TestResult testResult = new TestResult(); testResult.setTestCase(test); testResult.setStatus(TestStatus.Failed); callback.onTestComplete(test, testResult); } @Override public void registrationMessageReceived(boolean isError, String value) { TestResult testResult = new TestResult(); testResult.setTestCase(test); if (isError) { test.log("Received error during registration: errorId = " + value); testResult.setStatus(TestStatus.Failed); callback.onTestComplete(test, testResult); } else { if (clazz.getCanonicalName().equals(EnhancedPushTests.class.getCanonicalName())) { EnhancedPushTests.registrationId = value; } else if (clazz.getCanonicalName().equals(PushTests.class.getCanonicalName())) { PushTests.registrationId = value; } test.log("Registration completed successfully. RegistrationId = " + value); testResult.setStatus(TestStatus.Passed); callback.onTestComplete(test, testResult); } } }; } public static GCMMessageCallback getPushCallback(TestCase test, String expectedPayload, TestExecutionCallback callback) { return getPushCallback(test, new JsonParser().parse(expectedPayload).getAsJsonObject(), callback); } public static GCMMessageCallback getPushCallback(final TestCase test, final JsonObject expectedPayload, final TestExecutionCallback callback) { return new GCMMessageCallback() { @Override public void timeoutElapsed() { test.log("Did not receive push message on time, test failed"); TestResult testResult = new TestResult(); testResult.setTestCase(test); testResult.setStatus(TestStatus.Failed); callback.onTestComplete(test, testResult); } @Override public void pushMessageReceived(Intent intent) { test.log("Received push message: " + intent.toString()); TestResult testResult = new TestResult(); testResult.setTestCase(test); testResult.setStatus(TestStatus.Passed); Set<Entry<String, JsonElement>> payloadEntries = expectedPayload.entrySet(); for (Entry<String, JsonElement> entry : payloadEntries) { String key = entry.getKey(); String value = entry.getValue().getAsString(); String intentExtra = intent.getStringExtra(key); if (value.equals(intentExtra)) { test.log("Retrieved correct value for key " + key); } else { test.log("Error retrieving value for key " + key + ". Expected: " + value + "; actual: " + intentExtra); testResult.setStatus(TestStatus.Failed); } } callback.onTestComplete(test, testResult); } }; } public static GCMMessageCallback getNegativePushCallback(final TestCase test, final TestExecutionCallback callback) { return new GCMMessageCallback() { @Override public void timeoutElapsed() { test.log("Did not receive push message after timeout. Correctly unregistered. Test succeded"); TestResult testResult = new TestResult(); testResult.setTestCase(test); testResult.setStatus(TestStatus.Passed); callback.onTestComplete(test, testResult); } @Override public void pushMessageReceived(Intent intent) { test.log("Received push message: " + intent.toString() + ". Incorrectly unregistered. Test failed."); TestResult testResult = new TestResult(); testResult.setTestCase(test); testResult.setStatus(TestStatus.Failed); callback.onTestComplete(test, testResult); } }; } }
apache-2.0
mduerig/jackrabbit-oak
oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/DocumentFactory.java
1387
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jackrabbit.oak.upgrade.cli.node; import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreBuilder; abstract class DocumentFactory implements NodeStoreFactory { private static final long MB = 1024 * 1024; static <T extends DocumentNodeStoreBuilder<?>> T baseConfiguration(T builder, int cacheSize) { boolean fastMigration = !Boolean.getBoolean("mongomk.disableFastMigration"); builder.memoryCacheSize(cacheSize * MB); if (fastMigration) { builder.disableBranches(); } return builder; } }
apache-2.0
irudyak/ignite
modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/impl/HadoopTxConfigCacheTest.java
1684
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.internal.processors.hadoop.impl; import org.apache.ignite.Ignite; import org.apache.ignite.internal.processors.cache.IgniteInternalCache; import org.apache.ignite.internal.processors.cache.IgniteTxConfigCacheSelfTest; import org.apache.ignite.internal.util.typedef.internal.CU; /** * Test checks whether hadoop system cache doesn't use user defined TX config. */ public class HadoopTxConfigCacheTest extends IgniteTxConfigCacheSelfTest { /** * Success if system caches weren't timed out. * * @throws Exception If failed. */ public void testSystemCacheTx() throws Exception { final Ignite ignite = grid(0); final IgniteInternalCache<Object, Object> hadoopCache = getSystemCache(ignite, CU.SYS_CACHE_HADOOP_MR); checkImplicitTxSuccess(hadoopCache); checkStartTxSuccess(hadoopCache); } }
apache-2.0
GlenRSmith/elasticsearch
x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueReader.java
3756
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ package org.elasticsearch.xpack.spatial.index.fielddata; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.io.stream.ByteArrayStreamInput; import java.io.IOException; /** * A reusable Geometry doc value reader for a previous serialized {@link org.elasticsearch.geometry.Geometry} using * {@link GeometryDocValueWriter}. * * * ----------------------------------------- * | The binary format of the tree | * ----------------------------------------- * ----------------------------------------- -- * | centroid-x-coord (4 bytes) | | * ----------------------------------------- | * | centroid-y-coord (4 bytes) | | * ----------------------------------------- | * | DimensionalShapeType (1 byte) | | Centroid-related header * ----------------------------------------- | * | Sum of weights (VLong 1-8 bytes) | | * ----------------------------------------- -- * | Extent (var-encoding) | * ----------------------------------------- * | Triangle Tree | * ----------------------------------------- * ----------------------------------------- */ public class GeometryDocValueReader { private final ByteArrayStreamInput input; private final Extent extent; private int treeOffset; private int docValueOffset; public GeometryDocValueReader() { this.extent = new Extent(); this.input = new ByteArrayStreamInput(); } /** * reset the geometry. */ public void reset(BytesRef bytesRef) throws IOException { this.input.reset(bytesRef.bytes, bytesRef.offset, bytesRef.length); docValueOffset = bytesRef.offset; treeOffset = 0; } /** * returns the {@link Extent} of this geometry. */ protected Extent getExtent() throws IOException { if (treeOffset == 0) { getSumCentroidWeight(); // skip CENTROID_HEADER + var-long sum-weight Extent.readFromCompressed(input, extent); treeOffset = input.getPosition(); } else { input.setPosition(treeOffset); } return extent; } /** * returns the encoded X coordinate of the centroid. */ protected int getCentroidX() throws IOException { input.setPosition(docValueOffset + 0); return input.readInt(); } /** * returns the encoded Y coordinate of the centroid. */ protected int getCentroidY() throws IOException { input.setPosition(docValueOffset + 4); return input.readInt(); } protected DimensionalShapeType getDimensionalShapeType() { input.setPosition(docValueOffset + 8); return DimensionalShapeType.readFrom(input); } protected double getSumCentroidWeight() throws IOException { input.setPosition(docValueOffset + 9); return Double.longBitsToDouble(input.readVLong()); } /** * Visit the triangle tree with the provided visitor */ public void visit(TriangleTreeReader.Visitor visitor) throws IOException { Extent geometryExtent = getExtent(); int thisMaxX = geometryExtent.maxX(); int thisMinX = geometryExtent.minX(); int thisMaxY = geometryExtent.maxY(); int thisMinY = geometryExtent.minY(); if (visitor.push(thisMinX, thisMinY, thisMaxX, thisMaxY)) { TriangleTreeReader.visit(input, visitor, thisMaxX, thisMaxY); } } }
apache-2.0
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/adaptive/allocator/SlotSharingSlotAllocator.java
11753
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.scheduler.adaptive.allocator; import org.apache.flink.runtime.clusterframework.types.AllocationID; import org.apache.flink.runtime.clusterframework.types.ResourceProfile; import org.apache.flink.runtime.instance.SlotSharingGroupId; import org.apache.flink.runtime.jobgraph.JobVertexID; import org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup; import org.apache.flink.runtime.jobmaster.LogicalSlot; import org.apache.flink.runtime.jobmaster.SlotInfo; import org.apache.flink.runtime.jobmaster.SlotRequestId; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlot; import org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID; import org.apache.flink.runtime.util.ResourceCounter; import org.apache.flink.util.Preconditions; import javax.annotation.Nonnull; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; /** {@link SlotAllocator} implementation that supports slot sharing. */ public class SlotSharingSlotAllocator implements SlotAllocator { private final ReserveSlotFunction reserveSlotFunction; private final FreeSlotFunction freeSlotFunction; private final IsSlotAvailableAndFreeFunction isSlotAvailableAndFreeFunction; private SlotSharingSlotAllocator( ReserveSlotFunction reserveSlot, FreeSlotFunction freeSlotFunction, IsSlotAvailableAndFreeFunction isSlotAvailableAndFreeFunction) { this.reserveSlotFunction = reserveSlot; this.freeSlotFunction = freeSlotFunction; this.isSlotAvailableAndFreeFunction = isSlotAvailableAndFreeFunction; } public static SlotSharingSlotAllocator createSlotSharingSlotAllocator( ReserveSlotFunction reserveSlot, FreeSlotFunction freeSlotFunction, IsSlotAvailableAndFreeFunction isSlotAvailableAndFreeFunction) { return new SlotSharingSlotAllocator( reserveSlot, freeSlotFunction, isSlotAvailableAndFreeFunction); } @Override public ResourceCounter calculateRequiredSlots( Iterable<JobInformation.VertexInformation> vertices) { int numTotalRequiredSlots = 0; for (Integer requiredSlots : getMaxParallelismForSlotSharingGroups(vertices).values()) { numTotalRequiredSlots += requiredSlots; } return ResourceCounter.withResource(ResourceProfile.UNKNOWN, numTotalRequiredSlots); } private static Map<SlotSharingGroupId, Integer> getMaxParallelismForSlotSharingGroups( Iterable<JobInformation.VertexInformation> vertices) { final Map<SlotSharingGroupId, Integer> maxParallelismForSlotSharingGroups = new HashMap<>(); for (JobInformation.VertexInformation vertex : vertices) { maxParallelismForSlotSharingGroups.compute( vertex.getSlotSharingGroup().getSlotSharingGroupId(), (slotSharingGroupId, currentMaxParallelism) -> currentMaxParallelism == null ? vertex.getParallelism() : Math.max(currentMaxParallelism, vertex.getParallelism())); } return maxParallelismForSlotSharingGroups; } @Override public Optional<VertexParallelismWithSlotSharing> determineParallelism( JobInformation jobInformation, Collection<? extends SlotInfo> freeSlots) { // TODO: This can waste slots if the max parallelism for slot sharing groups is not equal final int slotsPerSlotSharingGroup = freeSlots.size() / jobInformation.getSlotSharingGroups().size(); if (slotsPerSlotSharingGroup == 0) { // => less slots than slot-sharing groups return Optional.empty(); } final Iterator<? extends SlotInfo> slotIterator = freeSlots.iterator(); final Collection<ExecutionSlotSharingGroupAndSlot> assignments = new ArrayList<>(); final Map<JobVertexID, Integer> allVertexParallelism = new HashMap<>(); for (SlotSharingGroup slotSharingGroup : jobInformation.getSlotSharingGroups()) { final List<JobInformation.VertexInformation> containedJobVertices = slotSharingGroup.getJobVertexIds().stream() .map(jobInformation::getVertexInformation) .collect(Collectors.toList()); final Map<JobVertexID, Integer> vertexParallelism = determineParallelism(containedJobVertices, slotsPerSlotSharingGroup); final Iterable<ExecutionSlotSharingGroup> sharedSlotToVertexAssignment = createExecutionSlotSharingGroups(vertexParallelism); for (ExecutionSlotSharingGroup executionSlotSharingGroup : sharedSlotToVertexAssignment) { final SlotInfo slotInfo = slotIterator.next(); assignments.add( new ExecutionSlotSharingGroupAndSlot(executionSlotSharingGroup, slotInfo)); } allVertexParallelism.putAll(vertexParallelism); } return Optional.of(new VertexParallelismWithSlotSharing(allVertexParallelism, assignments)); } private static Map<JobVertexID, Integer> determineParallelism( Collection<JobInformation.VertexInformation> containedJobVertices, int availableSlots) { final Map<JobVertexID, Integer> vertexParallelism = new HashMap<>(); for (JobInformation.VertexInformation jobVertex : containedJobVertices) { final int parallelism = Math.min(jobVertex.getParallelism(), availableSlots); vertexParallelism.put(jobVertex.getJobVertexID(), parallelism); } return vertexParallelism; } private static Iterable<ExecutionSlotSharingGroup> createExecutionSlotSharingGroups( Map<JobVertexID, Integer> containedJobVertices) { final Map<Integer, Set<ExecutionVertexID>> sharedSlotToVertexAssignment = new HashMap<>(); for (Map.Entry<JobVertexID, Integer> jobVertex : containedJobVertices.entrySet()) { for (int i = 0; i < jobVertex.getValue(); i++) { sharedSlotToVertexAssignment .computeIfAbsent(i, ignored -> new HashSet<>()) .add(new ExecutionVertexID(jobVertex.getKey(), i)); } } return sharedSlotToVertexAssignment.values().stream() .map(ExecutionSlotSharingGroup::new) .collect(Collectors.toList()); } @Override public Optional<ReservedSlots> tryReserveResources(VertexParallelism vertexParallelism) { Preconditions.checkArgument( vertexParallelism instanceof VertexParallelismWithSlotSharing, String.format( "%s expects %s as argument.", SlotSharingSlotAllocator.class.getSimpleName(), VertexParallelismWithSlotSharing.class.getSimpleName())); final VertexParallelismWithSlotSharing vertexParallelismWithSlotSharing = (VertexParallelismWithSlotSharing) vertexParallelism; final Collection<AllocationID> expectedSlots = calculateExpectedSlots(vertexParallelismWithSlotSharing.getAssignments()); if (areAllExpectedSlotsAvailableAndFree(expectedSlots)) { final Map<ExecutionVertexID, LogicalSlot> assignedSlots = new HashMap<>(); for (ExecutionSlotSharingGroupAndSlot executionSlotSharingGroup : vertexParallelismWithSlotSharing.getAssignments()) { final SharedSlot sharedSlot = reserveSharedSlot(executionSlotSharingGroup.getSlotInfo()); for (ExecutionVertexID executionVertexId : executionSlotSharingGroup .getExecutionSlotSharingGroup() .getContainedExecutionVertices()) { final LogicalSlot logicalSlot = sharedSlot.allocateLogicalSlot(); assignedSlots.put(executionVertexId, logicalSlot); } } return Optional.of(ReservedSlots.create(assignedSlots)); } else { return Optional.empty(); } } @Nonnull private Collection<AllocationID> calculateExpectedSlots( Iterable<? extends ExecutionSlotSharingGroupAndSlot> assignments) { final Collection<AllocationID> requiredSlots = new ArrayList<>(); for (ExecutionSlotSharingGroupAndSlot assignment : assignments) { requiredSlots.add(assignment.getSlotInfo().getAllocationId()); } return requiredSlots; } private boolean areAllExpectedSlotsAvailableAndFree( Iterable<? extends AllocationID> requiredSlots) { for (AllocationID requiredSlot : requiredSlots) { if (!isSlotAvailableAndFreeFunction.isSlotAvailableAndFree(requiredSlot)) { return false; } } return true; } private SharedSlot reserveSharedSlot(SlotInfo slotInfo) { final PhysicalSlot physicalSlot = reserveSlotFunction.reserveSlot( slotInfo.getAllocationId(), ResourceProfile.UNKNOWN); return new SharedSlot( new SlotRequestId(), physicalSlot, slotInfo.willBeOccupiedIndefinitely(), () -> freeSlotFunction.freeSlot( slotInfo.getAllocationId(), null, System.currentTimeMillis())); } static class ExecutionSlotSharingGroup { private final Set<ExecutionVertexID> containedExecutionVertices; public ExecutionSlotSharingGroup(Set<ExecutionVertexID> containedExecutionVertices) { this.containedExecutionVertices = containedExecutionVertices; } public Collection<ExecutionVertexID> getContainedExecutionVertices() { return containedExecutionVertices; } } static class ExecutionSlotSharingGroupAndSlot { private final ExecutionSlotSharingGroup executionSlotSharingGroup; private final SlotInfo slotInfo; public ExecutionSlotSharingGroupAndSlot( ExecutionSlotSharingGroup executionSlotSharingGroup, SlotInfo slotInfo) { this.executionSlotSharingGroup = executionSlotSharingGroup; this.slotInfo = slotInfo; } public ExecutionSlotSharingGroup getExecutionSlotSharingGroup() { return executionSlotSharingGroup; } public SlotInfo getSlotInfo() { return slotInfo; } } }
apache-2.0
RainyWang103/elephant-bird
mahout/src/test/java/com/twitter/elephantbird/pig/mahout/TestSequentialAccessSparseVectorWritableConverter.java
2604
package com.twitter.elephantbird.pig.mahout; import java.io.IOException; import org.apache.mahout.math.DenseVector; import org.apache.mahout.math.SequentialAccessSparseVector; import org.apache.mahout.math.Vector; import org.apache.mahout.math.VectorWritable; import org.junit.Test; import com.twitter.elephantbird.pig.util.AbstractTestWritableConverter; /** * Unit tests for {@link VectorWritableConverter} and {@link SequentialAccessSparseVector}. * * @author Andy Schlaikjer */ public class TestSequentialAccessSparseVectorWritableConverter extends AbstractTestWritableConverter<VectorWritable, VectorWritableConverter> { private static final Vector V1 = new DenseVector(new double[] { 1, 2, 3 }); private static final Vector V2 = new SequentialAccessSparseVector(V1); private static final VectorWritable[] DATA = { new VectorWritable(V2) }; private static final String[] EXPECTED = { "(3,{(0,1.0),(1,2.0),(2,3.0)})" }; private static final String SCHEMA = "(cardinality: int, entries: {entry: (index: int, value: double)})"; public TestSequentialAccessSparseVectorWritableConverter() { super(VectorWritable.class, VectorWritableConverter.class, "-- -sequential", DATA, EXPECTED, SCHEMA); } @Test public void testLoadValidSchema01() throws IOException { registerReadQuery("-- -sparse", null); validate(pigServer.openIterator("A")); } @Test public void testLoadValidSchema02() throws IOException { registerReadQuery("-- -sparse -cardinality 3", null); validate(new String[] { "({(0,1.0),(1,2.0),(2,3.0)})" }, pigServer.openIterator("A")); } @Test public void testLoadConversionSchema() throws IOException { registerReadQuery("-- -dense -cardinality 3", null); validate(new String[] { "(1.0,2.0,3.0)" }, pigServer.openIterator("A")); } @Test(expected = Exception.class) public void testLoadInvalidSchema() throws IOException { registerReadQuery(tempFilename, "-- -sparse -cardinality 2", null); validate(pigServer.openIterator("A")); } @Test public void testDenseToSparse() throws IOException { registerReadQuery("-- -dense -cardinality 3", null); registerWriteQuery(tempFilename + "-2", "-- -sparse"); registerReadQuery(tempFilename + "-2"); pigServer.registerQuery("A = FOREACH A GENERATE key, FLATTEN(value);"); pigServer.registerQuery(String.format("A = FOREACH A {\n" + "entries_sorted = ORDER entries BY index ASC;\n" + "GENERATE key, TOTUPLE(cardinality, entries_sorted) AS value;\n" + "}")); validate(pigServer.openIterator("A")); } }
apache-2.0
chenrui2014/XCL-Charts
XCL-Charts/src/org/xclcharts/renderer/axis/RoundAxisRender.java
14928
/** * Copyright 2014 XCL-Charts * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @Project XCL-Charts * @Description Android图表基类库 * @author XiongChuanLiang<br/>(xcl_168@aliyun.com) * @license http://www.apache.org/licenses/ Apache v2 License * @version v0.1 */ package org.xclcharts.renderer.axis; import java.util.ArrayList; import java.util.List; import org.xclcharts.common.DrawHelper; import org.xclcharts.common.MathHelper; import org.xclcharts.renderer.XEnum; import android.graphics.Canvas; import android.graphics.Paint.Align; import android.graphics.PointF; import android.util.Log; /** * @ClassName RoundAxisRender * @Description 仪表盘绘制类 * @author XiongChuanLiang<br/>(xcl_168@aliyun.com) * */ public class RoundAxisRender extends RoundAxis{ private static final String TAG="RoundAxisRender"; private XEnum.Location mLocation = XEnum.Location.BOTTOM; //线的风格(点或线之类) //private XEnum.LineStyle mLineStyle = XEnum.LineStyle.SOLID; //设置线箭头 (三角,方形,棱形....) //private XEnum.DotStyle mLineCap = XEnum.DotStyle.HIDE; public RoundAxisRender() { } public void setAxisPercentage(List<Float> angle) { if(null != mPercentage) mPercentage.clear(); if(null == mPercentage) mPercentage = new ArrayList<Float>(); mPercentage = angle; } public void setAxisColor(List<Integer> color) { if(null != mColor) mColor.clear(); if(null == mColor) mColor = new ArrayList<Integer>(); mColor = color; } public void setAxisLabels(List<String> labels) { if(null != mLabels) mLabels.clear(); if(null == mLabels) mLabels = new ArrayList<String>(); mLabels = labels; } public void setLineAxisLocation( XEnum.Location location) { mLocation = location; } /** * 绘制标签环形轴标签 * @param canvas 画布 * @param labels 标签集合 * @return 是否绘制成功 */ public boolean renderTicks(Canvas canvas,List<String> labels) { float cirX = mCirX; float cirY = mCirY; int count = labels.size(); float stepsAngle = 0; if(Float.compare(mTotalAngle, 360f) == 0) { stepsAngle = MathHelper.getInstance().div(mTotalAngle ,count ) ; }else{ stepsAngle = MathHelper.getInstance().div(mTotalAngle ,count -1 ) ; } float innerRadius1 = mRadius ; float tickRadius = 0.0f,detailRadius = 0.0f; if( XEnum.RoundTickAxisType.INNER_TICKAXIS == mRoundTickAxisType) { tickRadius = mRadius * 0.95f; detailRadius = tickRadius; //有启用主明细步长设置 (inner) if(1 < mDetailModeSteps) tickRadius = tickRadius - mRadius * 0.05f; }else{ tickRadius = mRadius + mRadius * 0.05f; detailRadius = tickRadius; if(1 < mDetailModeSteps) tickRadius = mRadius + mRadius * 0.08f; } int steps = mDetailModeSteps; float Angle = 0.0f; float tickMarkWidth = getTickMarksPaint().getStrokeWidth(); float stopX = 0.0f,stopY = 0.0f; float labelX = 0.0f,labelY = 0.0f; float startX = 0.0f,startY = 0.0f; for(int i=0;i<count;i++) { if(0 == i) { Angle = mInitAngle; }else{ //Angle = MathHelper.getInstance().add(Angle,stepsAngle); Angle = MathHelper.getInstance().add(mInitAngle, i * stepsAngle); } MathHelper.getInstance().calcArcEndPointXY(cirX, cirY, innerRadius1, Angle); startX = MathHelper.getInstance().getPosX(); startY = MathHelper.getInstance().getPosY(); stopX = stopY = 0.0f; labelX = labelY = 0.0f; MathHelper.getInstance().calcArcEndPointXY(cirX, cirY,tickRadius, Angle); labelX = MathHelper.getInstance().getPosX(); labelY = MathHelper.getInstance().getPosY(); if(steps == mDetailModeSteps ) { stopX = labelX; stopY = labelY; steps = 0; }else{ MathHelper.getInstance().calcArcEndPointXY(cirX, cirY, detailRadius , Angle); stopX = MathHelper.getInstance().getPosX(); stopY = MathHelper.getInstance().getPosY(); steps++; } if (isShowTickMarks()) { if(0 == steps && mLongTickfakeBold ) { getTickMarksPaint().setStrokeWidth( tickMarkWidth + 1); }else{ if(mLongTickfakeBold)getTickMarksPaint().setStrokeWidth(tickMarkWidth); } canvas.drawLine(startX, startY, stopX, stopY, getTickMarksPaint()); } if (isShowAxisLabels()) { //回调函数定制化标签显示格式 String label = getFormatterLabel(labels.get(i)); PointF pLabel = getLabelXY(label,labelX,labelY,cirX,cirY,mTotalAngle,Angle); //标签显示 DrawHelper.getInstance().drawRotateText(label,pLabel.x , pLabel.y, getTickLabelRotateAngle(), canvas, getTickLabelPaint()); } } //end for return true; } //得到标签显示位置 private PointF getLabelXY(String label,float defLabelX,float defLabelY, float cirX,float cirY,float totalAngle,float Angle) { PointF pLabel = new PointF(defLabelX,defLabelY); float labelWidth =DrawHelper.getInstance().getTextWidth(getTickLabelPaint(), label); float labelHeight = DrawHelper.getInstance().getPaintFontHeight(getTickLabelPaint()); getTickLabelPaint().setTextAlign(Align.CENTER); if( XEnum.RoundTickAxisType.INNER_TICKAXIS == mRoundTickAxisType) { if(Float.compare(pLabel.y, cirY) == 0) { if(Float.compare(pLabel.x , cirX) == -1 ) { pLabel.x += labelWidth/2 ; }else{ pLabel.x -= labelWidth/2 ; } }else if(Float.compare(pLabel.x, cirX) == 0){ if(Float.compare(pLabel.y , cirY) == -1 ) { pLabel.y += labelHeight/2 ; }else{ pLabel.y -= labelHeight/2 ; } }else if(Float.compare(totalAngle, Angle) == 0 ){ pLabel.y += labelHeight ; }else if(Float.compare(pLabel.x, cirX) == 1 ){ if(Float.compare(totalAngle, 360f) == 0) { getTickLabelPaint().setTextAlign(Align.RIGHT); }else{ pLabel.x -= labelWidth/2 ; } }else if(Float.compare(pLabel.x, cirX) == -1 ){ if(Float.compare(totalAngle, 360f) == 0) { getTickLabelPaint().setTextAlign(Align.LEFT); }else{ pLabel.x += labelWidth/2 ; } } }else{ if(Float.compare(pLabel.y, cirY) == 0) { if(Float.compare(pLabel.x , cirX) == -1 ) { pLabel.x -= labelWidth/2 ; }else{ pLabel.x += labelWidth/2 ; } }else if(Float.compare(pLabel.x, cirX) == 0){ if(Float.compare(pLabel.y , cirY) == -1 ) { pLabel.y -= labelHeight/2 ; }else{ pLabel.y += labelHeight/2 ; } }else if(Float.compare(totalAngle, Angle) == 0 ){ pLabel.y -= labelHeight ; }else if(Float.compare(pLabel.x, cirX) == 1 ){ if(Float.compare(totalAngle, 360f) == 0) { getTickLabelPaint().setTextAlign(Align.LEFT); }else{ pLabel.x += labelWidth/2 ; } }else if(Float.compare(pLabel.x, cirX) == -1 ){ if(Float.compare(totalAngle, 360f) == 0) { getTickLabelPaint().setTextAlign(Align.RIGHT); }else{ pLabel.x -= labelWidth/2 ; } } } return pLabel; } //fillAxis /** * 绘制填充环形轴 * @param canvas 画布 * @return 是否成功 * @throws Exception 例外 */ public boolean renderFillAxis(Canvas canvas) throws Exception { if(isShow() && isShowAxisLine()) { if(null != mColor) getFillAxisPaint().setColor(mColor.get(0)); DrawHelper.getInstance().drawPercent(canvas, this.getFillAxisPaint(), mCirX, mCirY, mRadius, mInitAngle, mTotalAngle, true); } return true; } /** * 绘制标签环形轴 * @param canvas * @return * @throws Exception */ public boolean renderTickAxis(Canvas canvas) throws Exception { if(!isShow()) return false; if(null == mLabels) return false; if(isShowAxisLine()) { DrawHelper.getInstance().drawPathArc(canvas, this.getAxisPaint(), this.mCirX,this.mCirY,this.mRadius,this.mInitAngle, this.mTotalAngle); } return renderTicks(canvas,this.mLabels); } //arcline /** * 绘制弧形环形轴 * @param canvas * @return * @throws Exception */ public boolean renderArcLineAxis(Canvas canvas) throws Exception { if(isShow() && isShowAxisLine()) { DrawHelper.getInstance().drawPathArc(canvas, this.getAxisPaint() ,mCirX, mCirY, mRadius,this.mInitAngle, this.mTotalAngle); } return true; } public boolean renderCircleAxis(Canvas canvas) throws Exception { if(isShow() && isShowAxisLine()) { if(null != mColor) getAxisPaint().setColor(mColor.get(0)); canvas.drawCircle(mCirX, mCirY, mRadius, this.getAxisPaint()); } return true; } /** * 绘制颜色块环形轴 * @param canvas 画布 * @return 结果 * @throws Exception 例外 */ public boolean renderRingAxis(Canvas canvas) throws Exception { if(!isShow()|| !isShowAxisLine()) return true; if(null == mPercentage) return false; int angleCount = 0,colorCount = 0,labelsCount = 0; angleCount = this.mPercentage.size(); if(null != mColor)colorCount = this.mColor.size(); if(null != mLabels)labelsCount = this.mLabels.size(); float offsetAngle = this.mInitAngle; int currentColor = -1; String currentLabel = ""; float sweepAngle = 0.0f; for(int i=0;i<angleCount;i++) { if(null != mColor && colorCount > i) currentColor = mColor.get(i); if(null != mLabels && labelsCount > i)currentLabel = mLabels.get(i); sweepAngle = MathHelper.getInstance().mul( mTotalAngle , mPercentage.get(i)); renderPartitions(canvas,offsetAngle,sweepAngle,currentColor,currentLabel) ; offsetAngle = MathHelper.getInstance().add(offsetAngle, sweepAngle); currentColor = -1; currentLabel = ""; } if(Float.compare(getRingInnerRadiusPercentage() , 0.0f) != 0 && Float.compare(getRingInnerRadiusPercentage() , 0.0f) == 1) { canvas.drawCircle(this.mCirX, mCirY, getRingInnerRadius(), this.getFillAxisPaint()); } return true; } /** * 绘制颜色轴 * @throws Exception */ private boolean renderPartitions(Canvas canvas,float startAngle,float sweepAngle, int color,String label) throws Exception { if(-1 != color) getAxisPaint().setColor(color); if(Float.compare(sweepAngle, 0.0f) < 0){ Log.e(TAG,"负角度???!!!"); return false; }else if(Float.compare(sweepAngle, 0.0f) == 0){ Log.w(TAG,"零角度???!!!"); return true; } DrawHelper.getInstance().drawPercent(canvas, this.getAxisPaint(), this.mCirX, this.mCirY,mRadius, startAngle, sweepAngle, true); if (isShowAxisLabels() && ""!= label) { float Angle = MathHelper.getInstance().add(startAngle , sweepAngle / 2) ; MathHelper.getInstance().calcArcEndPointXY(this.mCirX, this.mCirY, mRadius * 0.5f,Angle ); float labelX = MathHelper.getInstance().getPosX(); float labelY = MathHelper.getInstance().getPosY(); //定制化显示格式 Angle* -2 DrawHelper.getInstance().drawRotateText(getFormatterLabel(label) ,labelX , labelY, getTickLabelRotateAngle(), canvas, getTickLabelPaint()); } return true; } /** * 设置线的风格(点或线之类) * @param style 线的风格 */ /* public void setLineStyle(XEnum.LineStyle style) { mLineStyle = style; } // 设置线箭头 (三角,方形,棱形....) public void setLineCap(XEnum.DotStyle style) { this.mLineCap = style; } */ /** * 中心点的线轴 * @param canvas 画布 * @return 结果 * @throws Exception 例外 */ public boolean renderLineAxis(Canvas canvas) throws Exception { if(!isShow()|| !isShowAxisLine()) return true; switch(mLocation) { case TOP: canvas.drawLine(mCirX, mCirY, mCirX, mCirY - mRadius , this.getAxisPaint()); break; case BOTTOM: canvas.drawLine(mCirX, mCirY, mCirX, mCirY + mRadius , this.getAxisPaint()); break; case LEFT: canvas.drawLine(mCirX, mCirY, mCirX - mRadius , mCirY , this.getAxisPaint()); break; case RIGHT: canvas.drawLine(mCirX, mCirY, mCirX + mRadius , mCirY , this.getAxisPaint()); break; default: return false; } return true; } /** * 圆心坐标 * @param x x坐标 * @param y y坐标 */ public void setCenterXY(float x,float y) { mCirX = x; mCirY = y; } /** * Ploat范围半径 * @param radius */ public void setOrgRadius(float radius) { mOrgRadius = radius; } /** * 指定角度及偏移 * @param totalAngle 总角度 * @param initAngle 偏移 */ public void setAngleInfo(float totalAngle,float initAngle) { mTotalAngle = totalAngle; mInitAngle = initAngle; } /** * 绘制图表 * @param canvas 画布 * @return 是否成功 * @throws Exception 例外 */ public boolean render(Canvas canvas) throws Exception { boolean ret = false; mRadius = getOuterRadius(); // TICKAXIS,RINGAXIS,LENAXIS switch(getAxisType()) { case TICKAXIS: ret = renderTickAxis(canvas); break; case RINGAXIS: ret = renderRingAxis(canvas); break; case ARCLINEAXIS: ret = renderArcLineAxis(canvas); break; case FILLAXIS: ret = renderFillAxis(canvas); break; case CIRCLEAXIS: ret = renderCircleAxis(canvas); break; case LINEAXIS: ret = renderLineAxis(canvas); break; default: break; } return ret; } }
apache-2.0
prazanna/kite
kite-data/kite-data-core/src/test/java/org/kitesdk/data/spi/filesystem/TestFileSystemPartitionIterator.java
7337
/* * Copyright "2013" Cloudera. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kitesdk.data.spi.filesystem; import com.google.common.base.Predicate; import org.apache.avro.Schema; import org.apache.avro.SchemaBuilder; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.kitesdk.data.spi.Constraints; import org.kitesdk.data.MiniDFSTest; import org.kitesdk.data.PartitionStrategy; import org.kitesdk.data.spi.StorageKey; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import com.google.common.io.Files; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Set; @RunWith(Parameterized.class) public class TestFileSystemPartitionIterator extends MiniDFSTest { public FileSystem fileSystem; public Path testDirectory; public static List<StorageKey> keys; public static final PartitionStrategy strategy = new PartitionStrategy.Builder() .year("timestamp") .month("timestamp") .day("timestamp") .build(); public static final Constraints emptyConstraints = new Constraints( SchemaBuilder.record("Event").fields() .requiredLong("timestamp") .endRecord(), strategy); private static final Schema schema = SchemaBuilder.record("Event").fields() .requiredLong("id") .requiredLong("timestamp") .endRecord(); @BeforeClass public static void createExpectedKeys() { keys = Lists.newArrayList(); for (Object year : Arrays.asList(2012, 2013)) { for (Object month : Arrays.asList(9, 10, 11, 12)) { for (Object day : Arrays.asList(22, 24, 25)) { StorageKey k = new StorageKey.Builder(strategy) .add("year", year).add("month", month).add("day", day).build(); keys.add(k); } } } } @Parameterized.Parameters public static Collection<Object[]> data() throws IOException { MiniDFSTest.setupFS(); Object[][] data = new Object[][] { { getDFS() }, { getFS() } }; return Arrays.asList(data); } public TestFileSystemPartitionIterator(FileSystem fileSystem) { this.fileSystem = fileSystem; } @Before public void createDirectoryLayout() throws Exception { testDirectory = fileSystem.makeQualified( new Path(Files.createTempDir().getAbsolutePath())); for (String year : Arrays.asList("year=2012", "year=2013")) { final Path yearPath = new Path(testDirectory, year); for (String month : Arrays.asList("month=09", "month=10", "month=11", "month=12")) { final Path monthPath = new Path(yearPath, month); for (String day : Arrays.asList("day=22", "day=24", "day=25")) { final Path dayPath = new Path(monthPath, day); fileSystem.mkdirs(dayPath); } } } } @After public void cleanDirectoryLayout() throws Exception { fileSystem.delete(testDirectory, true); } @Test public void testUnbounded() throws Exception { Iterable<StorageKey> partitions = new FileSystemPartitionIterator( fileSystem, testDirectory, strategy, schema, emptyConstraints.toKeyPredicate()); assertIterableEquals(keys, partitions); } public static final long oct_25_2012 = new DateTime(2012, 10, 25, 0, 0, DateTimeZone.UTC).getMillis(); public static final long oct_24_2013 = new DateTime(2013, 10, 24, 0, 0, DateTimeZone.UTC).getMillis(); public static final long oct_25_2013 = new DateTime(2013, 10, 25, 0, 0, DateTimeZone.UTC).getMillis(); public static final long oct_24_2013_end = oct_25_2013 - 1; @Test public void testFrom() throws Exception { Iterable<StorageKey> partitions = new FileSystemPartitionIterator( fileSystem, testDirectory, strategy, schema, emptyConstraints.from("timestamp", oct_24_2013).toKeyPredicate()); assertIterableEquals(keys.subList(16, 24), partitions); } @Test public void testAfter() throws Exception { Iterable<StorageKey> partitions = new FileSystemPartitionIterator( fileSystem, testDirectory, strategy, schema, emptyConstraints.fromAfter("timestamp", oct_24_2013_end).toKeyPredicate()); assertIterableEquals(keys.subList(17, 24), partitions); } @Test public void testTo() throws Exception { Iterable<StorageKey> partitions = new FileSystemPartitionIterator( fileSystem, testDirectory, strategy, schema, emptyConstraints.to("timestamp", oct_25_2012).toKeyPredicate()); assertIterableEquals(keys.subList(0, 6), partitions); } @Test public void testBefore() throws Exception { Iterable <StorageKey> partitions = new FileSystemPartitionIterator( fileSystem, testDirectory, strategy, schema, emptyConstraints.toBefore("timestamp", oct_25_2012).toKeyPredicate()); assertIterableEquals(keys.subList(0, 5), partitions); } @Test public void testWith() throws Exception { Iterable<StorageKey> partitions = new FileSystemPartitionIterator( fileSystem, testDirectory, strategy, schema, emptyConstraints.with("timestamp", oct_24_2013).toKeyPredicate()); assertIterableEquals(keys.subList(16, 17), partitions); } @Test public void testDayRange() throws Exception { Predicate<StorageKey> predicate = emptyConstraints .from("timestamp", oct_24_2013) .to("timestamp", oct_24_2013_end) .toKeyPredicate(); Iterable<StorageKey> partitions = new FileSystemPartitionIterator( fileSystem, testDirectory, strategy, schema, predicate); assertIterableEquals(keys.subList(16, 17), partitions); } @Test public void testLargerRange() throws Exception { Predicate<StorageKey> predicate = emptyConstraints .from("timestamp", oct_25_2012) .to("timestamp", oct_24_2013) .toKeyPredicate(); Iterable<StorageKey> partitions = new FileSystemPartitionIterator( fileSystem, testDirectory, strategy, schema, predicate); assertIterableEquals(keys.subList(5, 17), partitions); } public static <T> void assertIterableEquals( Iterable<T> expected, Iterable<T> actualIterable) { Set<T> expectedSet = Sets.newHashSet(expected); for (T actual : actualIterable) { // need to check as iteration happens because the StorageKey is reused Assert.assertTrue("Unexpected record: " + actual, expectedSet.remove(actual)); } Assert.assertEquals("Not all expected records were present: " + expectedSet, 0, expectedSet.size()); } }
apache-2.0
OnePaaS/jbpm
jbpm-flow/src/main/java/org/jbpm/process/core/ContextContainer.java
1035
/** * Copyright 2010 JBoss Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jbpm.process.core; import java.util.List; /** * * @author <a href="mailto:kris_verlaenen@hotmail.com">Kris Verlaenen</a> */ public interface ContextContainer { List<Context> getContexts(String contextType); void addContext(Context context); Context getContext(String contextType, long id); void setDefaultContext(Context context); Context getDefaultContext(String contextType); }
apache-2.0
chaonextdoor/android
appMobiLib/src/com/phonegap/HttpHandler.java
1903
/* * PhoneGap is available under *either* the terms of the modified BSD license *or* the * MIT License (2008). See http://opensource.org/licenses/alphabetical for full text. * * Copyright (c) 2005-2010, Nitobi Software Inc. * Copyright (c) 2010, IBM Corporation */ package com.phonegap; import java.io.EOFException; import java.io.FileOutputStream; import java.io.InputStream; import org.apache.http.HttpEntity; import org.apache.http.HttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.DefaultHttpClient; public class HttpHandler { protected Boolean get(String url, String file) { HttpEntity entity = getHttpEntity(url); try { writeToDisk(entity, file); } catch (Exception e) { e.printStackTrace(); return false; } try { entity.consumeContent(); } catch (Exception e) { e.printStackTrace(); return false; } return true; } private HttpEntity getHttpEntity(String url) /** * get the http entity at a given url */ { HttpEntity entity=null; try { DefaultHttpClient httpclient = new DefaultHttpClient(); HttpGet httpget = new HttpGet(url); HttpResponse response = httpclient.execute(httpget); entity = response.getEntity(); } catch (Exception e) { e.printStackTrace(); return null; } return entity; } private void writeToDisk(HttpEntity entity, String file) throws EOFException /** * writes a HTTP entity to the specified filename and location on disk */ { int i=0; String FilePath="/sdcard/" + file; try { InputStream in = entity.getContent(); byte buff[] = new byte[1024]; FileOutputStream out= new FileOutputStream(FilePath); do { int numread = in.read(buff); if (numread <= 0) break; out.write(buff, 0, numread); i++; } while (true); out.flush(); out.close(); } catch (Exception e) { e.printStackTrace(); } } }
mit